This commit is contained in:
Jesper Wilhelmsson 2018-01-05 22:02:20 +01:00
commit fb8bca78ee
237 changed files with 2571 additions and 1130 deletions

View File

@ -257,7 +257,6 @@ SUNWprivate_1.1 {
Java_jdk_internal_reflect_NativeConstructorAccessorImpl_newInstance0;
Java_jdk_internal_reflect_NativeMethodAccessorImpl_invoke0;
Java_jdk_internal_reflect_Reflection_getCallerClass__;
Java_jdk_internal_reflect_Reflection_getCallerClass__I;
Java_jdk_internal_reflect_Reflection_getClassAccessFlags;
Java_jdk_internal_misc_VM_latestUserDefinedLoader0;
Java_jdk_internal_misc_VM_getuid;

View File

@ -27,7 +27,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
text: .text%Java_java_io_FileOutputStream_initIDs;
text: .text%Java_java_lang_System_setIn0;
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
text: .text%Java_java_lang_Class_forName0;
text: .text%Java_java_lang_Object_getClass;
text: .text%Java_sun_reflect_Reflection_getClassAccessFlags;

View File

@ -26,7 +26,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
text: .text%Java_java_io_FileOutputStream_initIDs;
text: .text%Java_java_lang_System_setIn0;
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
text: .text%Java_java_lang_Class_forName0;
text: .text%Java_java_lang_String_intern;
text: .text%Java_java_lang_StringUTF16_isBigEndian;

View File

@ -27,7 +27,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
text: .text%Java_java_io_FileOutputStream_initIDs;
text: .text%Java_java_lang_System_setIn0;
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
text: .text%Java_java_lang_Class_forName0;
text: .text%Java_java_lang_String_intern;
text: .text%Java_java_lang_StringUTF16_isBigEndian;

View File

@ -5844,8 +5844,8 @@ operand immPollPage()
operand immByteMapBase()
%{
// Get base of card map
predicate((jbyte*)n->get_ptr() ==
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
match(ConP);
op_cost(0);

View File

@ -848,7 +848,7 @@ public:
// architecture. In debug mode we shrink it in order to test
// trampolines, but not so small that branches in the interpreter
// are out of range.
static const unsigned long branch_range = INCLUDE_JVMCI ? 128 * M : NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
static bool reachable_from_branch_at(address branch, address target) {
return uabs(target - branch) < branch_range;

View File

@ -71,6 +71,13 @@ int CompiledStaticCall::to_interp_stub_size() {
return 7 * NativeInstruction::instruction_size;
}
int CompiledStaticCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 3 instructions here (although
// there are only two) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
return 3 * NativeInstruction::instruction_size + wordSize;
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call

View File

@ -109,7 +109,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
@ -124,22 +124,22 @@ void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset
case INVOKEINTERFACE: {
assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
_instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_virtual_call_stub());
break;
}
case INVOKESTATIC: {
assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call->set_destination(SharedRuntime::get_resolve_static_call_stub());
_instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_static_call_stub());
break;
}
case INVOKESPECIAL: {
assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
_instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_opt_virtual_call_stub());
break;
}
default:

View File

@ -801,7 +801,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
end_a_stub();
return stub;
return stub_start_addr;
}
address MacroAssembler::ic_call(address entry, jint method_index) {

View File

@ -367,3 +367,24 @@ void NativeCallTrampolineStub::set_destination(address new_destination) {
set_ptr_at(data_offset, new_destination);
OrderAccess::release();
}
// Generate a trampoline for a branch to dest. If there's no need for a
// trampoline, simply patch the call directly to dest.
address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
MacroAssembler a(&cbuf);
address stub = NULL;
if (a.far_branches()
&& ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
}
if (stub == NULL) {
// If we generated no stub, patch this call directly to dest.
// This will happen if we don't need far branches or if there
// already was a trampoline.
set_destination(dest);
}
return stub;
}

View File

@ -61,7 +61,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
return uint_at(0);
}
bool is_blr() const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register)
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
inline bool is_nop();
@ -143,8 +143,9 @@ inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
}
inline NativeCall* nativeCall_at(address address);
// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
// The NativeCall is an abstraction for accessing/manipulating native
// call instructions (used to manipulate inline caches, primitive &
// DSO calls, etc.).
class NativeCall: public NativeInstruction {
public:
@ -155,7 +156,6 @@ class NativeCall: public NativeInstruction {
return_address_offset = 4
};
enum { cache_line_size = BytesPerWord }; // conservative estimate!
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { return addr_at(return_address_offset); }
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
@ -206,6 +206,7 @@ class NativeCall: public NativeInstruction {
void set_destination_mt_safe(address dest, bool assert_lock = true);
address get_trampoline();
address trampoline_jump(CodeBuffer &cbuf, address dest);
};
inline NativeCall* nativeCall_at(address address) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,4 +35,10 @@
format_width = 0
};
public:
// This platform has no oops in the code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return false; }
#endif // CPU_AARCH64_VM_RELOCINFO_AARCH64_HPP

View File

@ -3404,7 +3404,6 @@ void TemplateTable::_new() {
Label done;
Label initialize_header;
Label initialize_object; // including clearing the fields
Label allocate_shared;
__ get_cpool_and_tags(r4, r0);
// Make sure the class we're about to instantiate has been resolved.
@ -3433,18 +3432,24 @@ void TemplateTable::_new() {
// test to see if it has a finalizer or is malformed in some way
__ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
// Allocate the instance
// 1) Try to allocate in the TLAB
// 2) if fail and the object is large allocate in the shared Eden
// 3) if the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.)
// Allocate the instance:
// If TLAB is enabled:
// Try to allocate in the TLAB.
// If fails, go to the slow path.
// Else If inline contiguous allocations are enabled:
// Try to allocate in eden.
// If fails due to heap end, go to slow path.
//
// If TLAB is enabled OR inline contiguous is enabled:
// Initialize the allocation.
// Exit.
//
// Go to slow path.
const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc();
if (UseTLAB) {
__ tlab_allocate(r0, r3, 0, noreg, r1,
allow_shared_alloc ? allocate_shared : slow_case);
__ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
if (ZeroTLAB) {
// the fields have been already cleared
@ -3453,19 +3458,19 @@ void TemplateTable::_new() {
// initialize both the header and fields
__ b(initialize_object);
}
} else {
// Allocation in the shared Eden, if allowed.
//
// r3: instance size in bytes
if (allow_shared_alloc) {
__ eden_allocate(r0, r3, 0, r10, slow_case);
__ incr_allocated_bytes(rthread, r3, 0, rscratch1);
}
}
// Allocation in the shared Eden, if allowed.
//
// r3: instance size in bytes
if (allow_shared_alloc) {
__ bind(allocate_shared);
__ eden_allocate(r0, r3, 0, r10, slow_case);
__ incr_allocated_bytes(rthread, r3, 0, rscratch1);
}
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
// If UseTLAB or allow_shared_alloc are true, the object is created above and
// there is an initialize need. Otherwise, skip and go to the slow path.
if (UseTLAB || allow_shared_alloc) {
// The object is initialized before the header. If the object size is
// zero, go directly to the header initialization.
__ bind(initialize_object);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,4 +32,10 @@
format_width = 0
};
public:
// This platform has no oops in the code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return false; }
#endif // CPU_ARM_VM_RELOCINFO_ARM_HPP

View File

@ -5601,12 +5601,17 @@ void MacroAssembler::zap_from_to(Register low, int before, Register high, int af
#endif // !PRODUCT
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
void SkipIfEqualZero::skip_to_label_if_equal_zero(MacroAssembler* masm, Register temp,
const bool* flag_addr, Label& label) {
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
assert(sizeof(bool) == 1, "PowerPC ABI");
masm->lbz(temp, simm16_offset, temp);
masm->cmpwi(CCR0, temp, 0);
masm->beq(CCR0, _label);
masm->beq(CCR0, label);
}
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
skip_to_label_if_equal_zero(masm, temp, flag_addr, _label);
}
SkipIfEqualZero::~SkipIfEqualZero() {

View File

@ -979,6 +979,8 @@ class SkipIfEqualZero : public StackObj {
public:
// 'Temp' is a temp register that this object can use (and trash).
explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
static void skip_to_label_if_equal_zero(MacroAssembler*, Register temp,
const bool* flag_addr, Label& label);
~SkipIfEqualZero();
};

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,4 +43,10 @@
#endif
};
public:
// This platform has no oops in the code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return false; }
#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP

View File

@ -3634,10 +3634,7 @@ void TemplateTable::_new() {
transition(vtos, atos);
Label Lslow_case,
Ldone,
Linitialize_header,
Lallocate_shared,
Linitialize_object; // Including clearing the fields.
Ldone;
const Register RallocatedObject = R17_tos,
RinstanceKlass = R9_ARG7,
@ -3648,8 +3645,6 @@ void TemplateTable::_new() {
Rtags = R3_ARG1,
Rindex = R5_ARG3;
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
// --------------------------------------------------------------------------
// Check if fast case is possible.
@ -3658,6 +3653,8 @@ void TemplateTable::_new() {
// Load index of constant pool entry.
__ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
// Note: compared to other architectures, PPC's implementation always goes
// to the slow path if TLAB is used and fails.
if (UseTLAB) {
// Make sure the class we're about to instantiate has been resolved
// This is done before loading instanceKlass to be consistent with the order
@ -3687,8 +3684,7 @@ void TemplateTable::_new() {
// Fast case:
// Allocate the instance.
// 1) Try to allocate in the TLAB.
// 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
// 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
// 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
Register RnewTopValue = R6_ARG4;
@ -3702,53 +3698,13 @@ void TemplateTable::_new() {
// If there is enough space, we do not CAS and do not clear.
__ cmpld(CCR0, RnewTopValue, RendValue);
__ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
__ bgt(CCR0, Lslow_case);
__ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
if (ZeroTLAB) {
// The fields have already been cleared.
__ b(Linitialize_header);
} else {
// Initialize both the header and fields.
__ b(Linitialize_object);
}
// Fall through: TLAB was too small.
if (allow_shared_alloc) {
Register RtlabWasteLimitValue = R10_ARG8;
Register RfreeValue = RnewTopValue;
__ bind(Lallocate_shared);
// Check if tlab should be discarded (refill_waste_limit >= free).
__ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
__ subf(RfreeValue, RoldTopValue, RendValue);
__ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
__ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
__ bge(CCR0, Lslow_case);
// Increment waste limit to prevent getting stuck on this slow path.
__ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
}
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
}
// else: Always go the slow path.
// --------------------------------------------------------------------------
// slow case
__ bind(Lslow_case);
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
if (UseTLAB) {
__ b(Ldone);
// --------------------------------------------------------------------------
// Init1: Zero out newly allocated memory.
if (!ZeroTLAB || allow_shared_alloc) {
// Clear object fields.
__ bind(Linitialize_object);
if (!ZeroTLAB) {
// --------------------------------------------------------------------------
// Init1: Zero out newly allocated memory.
// Initialize remaining object fields.
Register Rbase = Rtags;
__ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
@ -3757,13 +3713,10 @@ void TemplateTable::_new() {
// Clear out object skipping header. Takes also care of the zero length case.
__ clear_memory_doubleword(Rbase, Rinstance_size);
// fallthru: __ b(Linitialize_header);
}
// --------------------------------------------------------------------------
// Init2: Initialize the header: mark, klass
__ bind(Linitialize_header);
// Init mark.
if (UseBiasedLocking) {
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
@ -3777,14 +3730,19 @@ void TemplateTable::_new() {
__ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
// Check and trigger dtrace event.
{
SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
__ push(atos);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
__ pop(atos);
}
SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone);
__ push(atos);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
__ pop(atos);
__ b(Ldone);
}
// --------------------------------------------------------------------------
// slow case
__ bind(Lslow_case);
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
// continue
__ bind(Ldone);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -110,6 +110,10 @@
pcrel_data_format = 3 // Relocation is for the target data of a pc-relative instruction.
};
// This platform has no oops in the code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return false; }
// Store the new target address into an oop_Relocation cell, if any.
// Return indication if update happened.
static bool update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb);

View File

@ -73,6 +73,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
int CompiledStaticCall::to_trampoline_stub_size() {
// SPARC doesn't use trampolines.
return 0;
}
int CompiledStaticCall::to_interp_stub_size() {
// This doesn't need to be accurate but it must be larger or equal to
// the real size of the stub.

View File

@ -115,7 +115,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,6 +38,11 @@
format_width = 1
};
public:
// This platform has no oops in the code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return false; }
//Reconciliation History
// 1.3 97/10/15 15:38:36 relocInfo_i486.hpp

View File

@ -3270,11 +3270,19 @@ void TemplateTable::_new() {
__ br(Assembler::notZero, false, Assembler::pn, slow_case);
__ delayed()->nop();
// allocate the instance
// 1) Try to allocate in the TLAB
// 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
// 3) if the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.)
// Allocate the instance:
// If TLAB is enabled:
// Try to allocate in the TLAB.
// If fails, go to the slow path.
// Else If inline contiguous allocations are enabled:
// Try to allocate in eden.
// If fails due to heap end, go to slow path.
//
// If TLAB is enabled OR inline contiguous is enabled:
// Initialize the allocation.
// Exit.
//
// Go to slow path.
const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc();
@ -3302,61 +3310,43 @@ void TemplateTable::_new() {
}
__ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
// Allocation does not fit in the TLAB.
__ ba_short(slow_case);
} else {
// Allocation in the shared Eden
if (allow_shared_alloc) {
// Check if tlab should be discarded (refill_waste_limit >= free)
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
__ sub(RendValue, RoldTopValue, RfreeValue);
__ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
Register RoldTopValue = G1_scratch;
Register RtopAddr = G3_scratch;
Register RnewTopValue = RallocatedObject;
Register RendValue = Rscratch;
// increment waste limit to prevent getting stuck on this slow path
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
__ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
} else {
// set64 does not use the temp register if the given constant is 32 bit. So
// we can just use any register; using G0 results in ignoring of the upper 32 bit
// of that value.
__ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
__ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
}
__ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
} else {
// No allocation in the shared eden.
__ ba_short(slow_case);
__ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
Label retry;
__ bind(retry);
__ set((intptr_t)Universe::heap()->end_addr(), RendValue);
__ ld_ptr(RendValue, 0, RendValue);
__ ld_ptr(RtopAddr, 0, RoldTopValue);
__ add(RoldTopValue, Roffset, RnewTopValue);
// RnewTopValue contains the top address after the new object
// has been allocated.
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
// if someone beat us on the allocation, try again, otherwise continue
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
// bump total bytes allocated by this thread
// RoldTopValue and RtopAddr are dead, so can use G1 and G3
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
}
}
// Allocation in the shared Eden
if (allow_shared_alloc) {
Register RoldTopValue = G1_scratch;
Register RtopAddr = G3_scratch;
Register RnewTopValue = RallocatedObject;
Register RendValue = Rscratch;
__ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
Label retry;
__ bind(retry);
__ set((intptr_t)Universe::heap()->end_addr(), RendValue);
__ ld_ptr(RendValue, 0, RendValue);
__ ld_ptr(RtopAddr, 0, RoldTopValue);
__ add(RoldTopValue, Roffset, RnewTopValue);
// RnewTopValue contains the top address after the new object
// has been allocated.
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
// if someone beat us on the allocation, try again, otherwise continue
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
// bump total bytes allocated by this thread
// RoldTopValue and RtopAddr are dead, so can use G1 and G3
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
}
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
// If UseTLAB or allow_shared_alloc are true, the object is created above and
// there is an initialize need. Otherwise, skip and go to the slow path.
if (UseTLAB || allow_shared_alloc) {
// clear object fields
__ bind(initialize_object);
__ deccc(Roffset, sizeof(oopDesc));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3167,6 +3167,89 @@ void Assembler::nop(int i) {
return;
}
if (UseAddressNop && VM_Version::is_zx()) {
//
// Using multi-bytes nops "0x0F 0x1F [address]" for ZX
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
// 4: 0x0F 0x1F 0x40 0x00
// 5: 0x0F 0x1F 0x44 0x00 0x00
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// The rest coding is ZX specific - don't use consecutive address nops
// 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
while (i >= 15) {
// For ZX don't generate consecutive addess nops (mix with regular nops)
i -= 15;
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
addr_nop_8();
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
emit_int8((unsigned char)0x90);
// nop
}
switch (i) {
case 14:
emit_int8(0x66); // size prefix
case 13:
emit_int8(0x66); // size prefix
case 12:
addr_nop_8();
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
emit_int8(0x66); // size prefix
emit_int8((unsigned char)0x90);
// nop
break;
case 11:
emit_int8(0x66); // size prefix
case 10:
emit_int8(0x66); // size prefix
case 9:
emit_int8(0x66); // size prefix
case 8:
addr_nop_8();
break;
case 7:
addr_nop_7();
break;
case 6:
emit_int8(0x66); // size prefix
case 5:
addr_nop_5();
break;
case 4:
addr_nop_4();
break;
case 3:
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
emit_int8(0x66); // size prefix
case 2:
emit_int8(0x66); // size prefix
case 1:
emit_int8((unsigned char)0x90);
// nop
break;
default:
assert(i == 0, " ");
}
return;
}
// Using nops with size prefixes "0x66 0x90".
// From AMD Optimization Guide:
// 1: 0x90

View File

@ -73,6 +73,11 @@ int CompiledStaticCall::to_interp_stub_size() {
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
}
int CompiledStaticCall::to_trampoline_stub_size() {
// x86 doesn't use trampolines.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call

View File

@ -144,7 +144,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,4 +40,10 @@
#endif
};
public:
// Instruct loadConP of x86_64.ad places oops in code that are not also
// listed in the oop section.
static bool mustIterateImmediateOopsInCode() { return true; }
#endif // CPU_X86_VM_RELOCINFO_X86_HPP

View File

@ -433,7 +433,7 @@ class StubGenerator: public StubCodeGenerator {
//----------------------------------------------------------------------------------------------------
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
// Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
//
// xchg exists as far back as 8086, lock needed for MP only
// Stack layout immediately after call:

View File

@ -611,8 +611,8 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
// jbyte compare_value)
// Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
// int8_t compare_value)
//
// Arguments :
// c_rarg0: exchange_value
@ -637,9 +637,9 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
// volatile jlong* dest,
// jlong compare_value)
// Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
// volatile int64_t* dest,
// int64_t compare_value)
// Arguments :
// c_rarg0: exchange_value
// c_rarg1: dest
@ -694,8 +694,8 @@ class StubGenerator: public StubCodeGenerator {
// Result:
// *dest += add_value
// return *dest;
address generate_atomic_add_ptr() {
StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
address generate_atomic_add_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
address start = __ pc();
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@ -5015,14 +5015,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_catch_exception_entry = generate_catch_exception();
// atomic calls
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_entry = generate_atomic_add();
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
StubRoutines::_fence_entry = generate_orderaccess_fence();
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_entry = generate_atomic_add();
StubRoutines::_atomic_add_long_entry = generate_atomic_add_long();
StubRoutines::_fence_entry = generate_orderaccess_fence();
// platform dependent
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();

View File

@ -3851,7 +3851,6 @@ void TemplateTable::_new() {
Label done;
Label initialize_header;
Label initialize_object; // including clearing the fields
Label allocate_shared;
__ get_cpool_and_tags(rcx, rax);
@ -3877,12 +3876,19 @@ void TemplateTable::_new() {
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
// Allocate the instance:
// If TLAB is enabled:
// Try to allocate in the TLAB.
// If fails, go to the slow path.
// Else If inline contiguous allocations are enabled:
// Try to allocate in eden.
// If fails due to heap end, go to slow path.
//
// Allocate the instance
// 1) Try to allocate in the TLAB
// 2) if fail and the object is large allocate in the shared Eden
// 3) if the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.)
// If TLAB is enabled OR inline contiguous is enabled:
// Initialize the allocation.
// Exit.
//
// Go to slow path.
const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc();
@ -3898,7 +3904,7 @@ void TemplateTable::_new() {
__ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
__ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
__ jcc(Assembler::above, slow_case);
__ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
if (ZeroTLAB) {
// the fields have been already cleared
@ -3907,40 +3913,40 @@ void TemplateTable::_new() {
// initialize both the header and fields
__ jmp(initialize_object);
}
}
// Allocation in the shared Eden, if allowed.
//
// rdx: instance size in bytes
if (allow_shared_alloc) {
__ bind(allocate_shared);
ExternalAddress heap_top((address)Universe::heap()->top_addr());
ExternalAddress heap_end((address)Universe::heap()->end_addr());
Label retry;
__ bind(retry);
__ movptr(rax, heap_top);
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, heap_end);
__ jcc(Assembler::above, slow_case);
// Compare rax, with the top addr, and if still equal, store the new
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
} else {
// Allocation in the shared Eden, if allowed.
//
// rax,: object begin
// rbx,: object end
// rdx: instance size in bytes
__ locked_cmpxchgptr(rbx, heap_top);
if (allow_shared_alloc) {
ExternalAddress heap_top((address)Universe::heap()->top_addr());
ExternalAddress heap_end((address)Universe::heap()->end_addr());
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
Label retry;
__ bind(retry);
__ movptr(rax, heap_top);
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, heap_end);
__ jcc(Assembler::above, slow_case);
__ incr_allocated_bytes(thread, rdx, 0);
// Compare rax, with the top addr, and if still equal, store the new
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
//
// rax,: object begin
// rbx,: object end
// rdx: instance size in bytes
__ locked_cmpxchgptr(rbx, heap_top);
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
__ incr_allocated_bytes(thread, rdx, 0);
}
}
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
// If UseTLAB or allow_shared_alloc are true, the object is created above and
// there is an initialize need. Otherwise, skip and go to the slow path.
if (UseTLAB || allow_shared_alloc) {
// The object is initialized before the header. If the object size is
// zero, go directly to the header initialization.
__ bind(initialize_object);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -628,6 +628,11 @@ void VM_Version::get_processor_features() {
if (UseSSE < 1)
_features &= ~CPU_SSE;
//since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
UseAVX = 0;
}
// first try initial setting and detect what we can support
int use_avx_limit = 0;
if (UseAVX > 0) {
@ -1078,6 +1083,66 @@ void VM_Version::get_processor_features() {
// UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
// UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
if (is_zx()) { // ZX cpus specific settings
if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
UseStoreImmI16 = false; // don't use it on ZX cpus
}
if ((cpu_family() == 6) || (cpu_family() == 7)) {
if (FLAG_IS_DEFAULT(UseAddressNop)) {
// Use it on all ZX cpus
UseAddressNop = true;
}
}
if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
}
if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
if (supports_sse3()) {
UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
} else {
UseXmmRegToRegMoveAll = false;
}
}
if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(MaxLoopPad)) {
// For new ZX cpus do the next optimization:
// don't align the beginning of a loop if there are enough instructions
// left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
// in current fetch line (OptoLoopAlignment) or the padding
// is big (> MaxLoopPad).
// Set MaxLoopPad to 11 for new ZX cpus to reduce number of
// generated NOP instructions. 11 is the largest size of one
// address NOP instruction '0F 1F' (see Assembler::nop(i)).
MaxLoopPad = 11;
}
#endif // COMPILER2
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
}
if (supports_sse4_2()) { // new ZX cpus
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
}
}
if (supports_sse4_2()) {
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
}
} else {
if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
}
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
}
}
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
}
if( is_amd() ) { // AMD cpus specific settings
if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
// Use it on new AMD cpus starting from Opteron.
@ -1374,6 +1439,14 @@ void VM_Version::get_processor_features() {
#endif
}
if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
}
#endif
}
#ifdef _LP64
// Prefetch settings

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -305,6 +305,9 @@ protected:
enum Extended_Family {
// AMD
CPU_FAMILY_AMD_11H = 0x11,
// ZX
CPU_FAMILY_ZX_CORE_F6 = 6,
CPU_FAMILY_ZX_CORE_F7 = 7,
// Intel
CPU_FAMILY_INTEL_CORE = 6,
CPU_MODEL_NEHALEM = 0x1e,
@ -549,6 +552,16 @@ protected:
}
}
// ZX features.
if (is_zx()) {
if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
result |= CPU_LZCNT;
// for ZX, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
result |= CPU_3DNOW_PREFETCH;
}
}
return result;
}
@ -657,6 +670,7 @@ public:
static bool is_P6() { return cpu_family() >= 6; }
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS '
static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
@ -680,6 +694,15 @@ public:
}
} else if (is_amd()) {
result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
} else if (is_zx()) {
bool supports_topology = supports_processor_topology();
if (supports_topology) {
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
}
if (!supports_topology || result == 0) {
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
}
}
return result;
}
@ -688,6 +711,8 @@ public:
uint result = 1;
if (is_intel() && supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else if (is_zx() && supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
if (cpu_family() >= 0x17) {
result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
@ -705,6 +730,8 @@ public:
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
} else if (is_amd()) {
result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
} else if (is_zx()) {
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
}
if (result < 32) // not defined ?
result = 32; // 32 bytes by default on x86 and other x64

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -34,4 +34,8 @@
format_width = 1
};
public:
static bool mustIterateImmediateOopsInCode() { return true; }
#endif // CPU_ZERO_VM_RELOCINFO_ZERO_HPP

View File

@ -258,7 +258,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_long_entry = ShouldNotCallThisStub();
StubRoutines::_fence_entry = ShouldNotCallThisStub();
}

View File

@ -132,8 +132,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
extern "C" {
// defined in bsd_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
}
template<>
@ -143,15 +143,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
}
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest;
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -160,7 +160,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // AMD64

View File

@ -633,10 +633,10 @@ mmx_acs_CopyLeft:
ret
# Support for jlong Atomic::cmpxchg(jlong exchange_value,
# volatile jlong* dest,
# jlong compare_value,
# bool is_MP)
# Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
# volatile int64_t* dest,
# int64_t compare_value,
# bool is_MP)
#
.p2align 4,,15
ELF_TYPE(_Atomic_cmpxchg_long,@function)
@ -658,8 +658,8 @@ SYMBOL(_Atomic_cmpxchg_long):
ret
# Support for jlong Atomic::load and Atomic::store.
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
# Support for int64_t Atomic::load and Atomic::store.
# void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst)
.p2align 4,,15
ELF_TYPE(_Atomic_move_long,@function)
SYMBOL(_Atomic_move_long):

View File

@ -265,8 +265,8 @@ template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest;
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -275,7 +275,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP

View File

@ -50,7 +50,7 @@ template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
(*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
(*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
}
template<>
@ -59,7 +59,7 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif
@ -103,7 +103,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
: "memory");
return val;
#else
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
#endif
}
@ -146,7 +146,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
: "memory");
return old_val;
#else
return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
#endif
}
@ -178,17 +178,17 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
#ifndef AARCH64
inline jint reorder_cmpxchg_func(jint exchange_value,
jint volatile* dest,
jint compare_value) {
inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
int32_t volatile* dest,
int32_t compare_value) {
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
}
inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
jlong volatile* dest,
jlong compare_value) {
assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
int64_t volatile* dest,
int64_t compare_value) {
assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
}
@ -221,7 +221,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
: "memory");
return rv;
#else
return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
#endif
}
@ -251,7 +251,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
: "memory");
return rv;
#else
return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
#endif
}

View File

@ -598,11 +598,11 @@ void os::print_register_info(outputStream *st, const void *context) {
#ifndef AARCH64
typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*);
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) {
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
// try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -612,16 +612,16 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_valu
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest;
int64_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
typedef jlong load_long_func_t(const volatile jlong*);
typedef int64_t load_long_func_t(const volatile int64_t*);
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
// try to use the stub:
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
@ -631,15 +631,15 @@ jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *src;
int64_t old_value = *src;
return old_value;
}
typedef void store_long_func_t(jlong, volatile jlong*);
typedef void store_long_func_t(int64_t, volatile int64_t*);
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
// try to use the stub:
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
@ -652,11 +652,11 @@ void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
*dest = val;
}
typedef jint atomic_add_func_t(jint add_value, volatile jint *dest);
typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
StubRoutines::atomic_add_entry());
if (func != NULL) {
@ -664,16 +664,16 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
return (*func)(add_value, dest);
}
jint old_value = *dest;
int32_t old_value = *dest;
*dest = old_value + add_value;
return (old_value + add_value);
}
typedef jint atomic_xchg_func_t(jint exchange_value, volatile jint *dest);
typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
StubRoutines::atomic_xchg_entry());
if (func != NULL) {
@ -681,16 +681,16 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
return (*func)(exchange_value, dest);
}
jint old_value = *dest;
int32_t old_value = *dest;
*dest = exchange_value;
return (old_value);
}
typedef jint cmpxchg_func_t(jint, jint, volatile jint*);
typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) {
int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -700,7 +700,7 @@ jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volat
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest;
int32_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;

View File

@ -45,35 +45,35 @@
static bool register_code_area(char *low, char *high) { return true; }
#ifndef AARCH64
static jlong (*atomic_cmpxchg_long_func)(jlong compare_value,
jlong exchange_value,
volatile jlong *dest);
static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
int64_t exchange_value,
volatile int64_t *dest);
static jlong (*atomic_load_long_func)(const volatile jlong*);
static int64_t (*atomic_load_long_func)(const volatile int64_t*);
static void (*atomic_store_long_func)(jlong, volatile jlong*);
static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
static jint (*atomic_add_func)(jint add_value, volatile jint *dest);
static int32_t (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
static jint (*atomic_xchg_func)(jint exchange_value, volatile jint *dest);
static int32_t (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
static jint (*atomic_cmpxchg_func)(jint compare_value,
jint exchange_value,
volatile jint *dest);
static int32_t (*atomic_cmpxchg_func)(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
static jlong atomic_load_long_bootstrap(const volatile jlong*);
static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
static void atomic_store_long_bootstrap(jlong, volatile jlong*);
static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
static jint atomic_add_bootstrap(jint add_value, volatile jint *dest);
static int32_t atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
static jint atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest);
static int32_t atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
static jint atomic_cmpxchg_bootstrap(jint compare_value,
jint exchange_value,
volatile jint *dest);
static int32_t atomic_cmpxchg_bootstrap(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
#endif // !AARCH64
#endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,15 +28,15 @@
//
// NOTE: we are back in class os here, not Linux
//
static jint (*atomic_xchg_func) (jint, volatile jint*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
static jint (*atomic_add_func) (jint, volatile jint*);
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
static jint atomic_add_bootstrap (jint, volatile jint*);
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu() {}

View File

@ -133,8 +133,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
}
template<>
@ -144,15 +144,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
}
template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest;
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -161,7 +161,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // AMD64

View File

@ -30,67 +30,6 @@
// Implementation of class atomic
#ifdef ARM
/*
* __kernel_cmpxchg
*
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
* Return zero if *ptr was changed or non-zero if no exchange happened.
* The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code.
*
*/
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
/* Perform an atomic compare and swap: if the current value of `*PTR'
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
`*PTR' before the operation.*/
static inline int arm_compare_and_swap(int newval,
volatile int *ptr,
int oldval) {
for (;;) {
int prev = *ptr;
if (prev != oldval)
return prev;
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
// Success.
return prev;
// We failed even though prev == oldval. Try again.
}
}
/* Atomically add an int to memory. */
static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
for (;;) {
// Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr;
if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
return prev + add_value;
}
}
/* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) {
// Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr;
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
return prev;
}
}
#endif // ARM
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -105,11 +44,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
#ifdef ARM
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
#else
return __sync_add_and_fetch(dest, add_value);
#endif // ARM
}
template<>
@ -117,7 +52,6 @@ template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
return __sync_add_and_fetch(dest, add_value);
}
@ -126,9 +60,6 @@ template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
#else
// __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate
@ -140,7 +71,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
// barrier.
__sync_synchronize();
return result;
#endif // ARM
}
template<>
@ -164,11 +94,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
T compare_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
#else
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
#endif // ARM
}
template<>
@ -185,8 +111,8 @@ template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest;
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}
@ -195,7 +121,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,15 +28,15 @@
//
// NOTE: we are back in class os here, not Solaris
//
static jint (*atomic_xchg_func) (jint, volatile jint*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
static jint (*atomic_add_func) (jint, volatile jint*);
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
static jint atomic_add_bootstrap (jint, volatile jint*);
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu() {}

View File

@ -28,16 +28,16 @@
// For Sun Studio - implementation is in solaris_x86_64.il.
extern "C" {
jint _Atomic_add(jint add_value, volatile jint* dest);
jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest);
int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest);
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
jbyte compare_value);
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
jint compare_value);
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
jlong compare_value);
int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest);
int8_t _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest,
int8_t compare_value);
int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
int32_t compare_value);
int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest,
int64_t compare_value);
}
template<size_t byte_size>
@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
return PrimitiveConversions::cast<D>(
_Atomic_add(PrimitiveConversions::cast<jint>(add_value),
reinterpret_cast<jint volatile*>(dest)));
_Atomic_add(PrimitiveConversions::cast<int32_t>(add_value),
reinterpret_cast<int32_t volatile*>(dest)));
}
// Not using add_using_helper; see comment for cmpxchg.
@ -66,8 +66,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
return PrimitiveConversions::cast<D>(
_Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
reinterpret_cast<jlong volatile*>(dest)));
_Atomic_add_long(PrimitiveConversions::cast<int64_t>(add_value),
reinterpret_cast<int64_t volatile*>(dest)));
}
template<>
@ -76,11 +76,11 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
reinterpret_cast<jint volatile*>(dest)));
_Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
reinterpret_cast<int32_t volatile*>(dest)));
}
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest);
template<>
template<typename T>
@ -88,8 +88,8 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
reinterpret_cast<jlong volatile*>(dest)));
_Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
reinterpret_cast<int64_t volatile*>(dest)));
}
// Not using cmpxchg_using_helper here, because some configurations of
@ -106,9 +106,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value),
reinterpret_cast<jbyte volatile*>(dest),
PrimitiveConversions::cast<jbyte>(compare_value)));
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
reinterpret_cast<int8_t volatile*>(dest),
PrimitiveConversions::cast<int8_t>(compare_value)));
}
template<>
@ -119,9 +119,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value),
reinterpret_cast<jint volatile*>(dest),
PrimitiveConversions::cast<jint>(compare_value)));
_Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
reinterpret_cast<int32_t volatile*>(dest),
PrimitiveConversions::cast<int32_t>(compare_value)));
}
template<>
@ -132,9 +132,9 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
reinterpret_cast<jlong volatile*>(dest),
PrimitiveConversions::cast<jlong>(compare_value)));
_Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
reinterpret_cast<int64_t volatile*>(dest),
PrimitiveConversions::cast<int64_t>(compare_value)));
}
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP

View File

@ -904,12 +904,12 @@ void os::Solaris::init_thread_fpu_state(void) {
// until initialization is complete.
// TODO - replace with .il implementation when compiler supports it.
typedef jint xchg_func_t (jint, volatile jint*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
typedef jint add_func_t (jint, volatile jint*);
typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
typedef int64_t cmpxchg_long_func_t(int64_t, volatile int64_t*, int64_t);
typedef int32_t add_func_t (int32_t, volatile int32_t*);
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub:
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
@ -919,12 +919,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest;
int32_t old_value = *dest;
*dest = exchange_value;
return old_value;
}
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
// try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -934,13 +934,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest;
int32_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
// try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -950,13 +950,13 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest;
int64_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
// try to use the stub:
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,15 +31,15 @@
#ifdef AMD64
static void setup_fpu() {}
#else
static jint (*atomic_xchg_func) (jint, volatile jint*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
static jint (*atomic_add_func) (jint, volatile jint*);
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
static jint atomic_add_bootstrap (jint, volatile jint*);
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu();
#endif // AMD64

View File

@ -54,13 +54,13 @@ struct Atomic::PlatformAdd
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
}
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
}
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
@ -72,8 +72,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
}
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
#undef DEFINE_STUB_XCHG
@ -88,9 +88,9 @@ DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
}
DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func)
DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
#undef DEFINE_STUB_CMPXCHG
@ -162,10 +162,10 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value,
cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
jint ex_lo = (jint)exchange_value;
jint ex_hi = *( ((jint*)&exchange_value) + 1 );
jint cmp_lo = (jint)compare_value;
jint cmp_hi = *( ((jint*)&compare_value) + 1 );
int32_t ex_lo = (int32_t)exchange_value;
int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
int32_t cmp_lo = (int32_t)compare_value;
int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
__asm {
push ebx
push edi

View File

@ -218,17 +218,17 @@ void os::initialize_thread(Thread* thr) {
// Atomics and Stub Functions
typedef jint xchg_func_t (jint, volatile jint*);
typedef intptr_t xchg_long_func_t (jlong, volatile jlong*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
typedef jint add_func_t (jint, volatile jint*);
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*);
typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t);
typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t);
typedef int32_t add_func_t (int32_t, volatile int32_t*);
typedef int64_t add_long_func_t (int64_t, volatile int64_t*);
#ifdef AMD64
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub:
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
@ -238,12 +238,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest;
int32_t old_value = *dest;
*dest = exchange_value;
return old_value;
}
intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
// try to use the stub:
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
@ -253,13 +253,13 @@ intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* de
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
intptr_t old_value = *dest;
int64_t old_value = *dest;
*dest = exchange_value;
return old_value;
}
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
// try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -269,13 +269,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest;
int32_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
}
jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
// try to use the stub:
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
@ -285,7 +285,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jbyte old_value = *dest;
int8_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
@ -293,7 +293,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
#endif // AMD64
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
// try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -303,7 +303,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest;
int64_t old_value = *dest;
if (old_value == compare_value)
*dest = exchange_value;
return old_value;
@ -311,7 +311,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
#ifdef AMD64
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
// try to use the stub:
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
@ -324,12 +324,12 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
return (*dest) += add_value;
}
intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
// try to use the stub:
add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
if (func != NULL) {
os::atomic_add_ptr_func = func;
os::atomic_add_long_func = func;
return (*func)(add_value, dest);
}
assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -342,7 +342,7 @@ xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstr
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap;
#endif // AMD64

View File

@ -29,32 +29,32 @@
// NOTE: we are back in class os here, not win32
//
#ifdef AMD64
static jint (*atomic_xchg_func) (jint, volatile jint*);
static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*);
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static int64_t (*atomic_xchg_long_func) (int64_t, volatile int64_t*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte);
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t);
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
static jint (*atomic_add_func) (jint, volatile jint*);
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static int64_t (*atomic_add_long_func) (int64_t, volatile int64_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*);
static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*);
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static int64_t atomic_xchg_long_bootstrap (int64_t, volatile int64_t*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte);
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t);
#else
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
#endif // AMD64
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
#ifdef AMD64
static jint atomic_add_bootstrap (jint, volatile jint*);
static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*);
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static int64_t atomic_add_long_bootstrap (int64_t, volatile int64_t*);
#endif // AMD64
static void setup_fpu();

View File

@ -802,6 +802,14 @@ void ClassLoader::setup_boot_search_path(const char *class_path) {
int end = 0;
bool set_base_piece = true;
#if INCLUDE_CDS
if (DumpSharedSpaces) {
if (!Arguments::has_jimage()) {
vm_exit_during_initialization("CDS is not supported in exploded JDK build", NULL);
}
}
#endif
// Iterate over class path entries
for (int start = 0; start < len; start = end) {
while (class_path[end] && class_path[end] != os::path_separator()[0]) {

View File

@ -26,6 +26,7 @@
#include "classfile/bytecodeAssembler.hpp"
#include "classfile/defaultMethods.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.hpp"
@ -683,10 +684,11 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
Symbol* _method_name;
Symbol* _method_signature;
StatefulMethodFamily* _family;
bool _cur_class_is_interface;
public:
FindMethodsByErasedSig(Symbol* name, Symbol* signature) :
_method_name(name), _method_signature(signature),
FindMethodsByErasedSig(Symbol* name, Symbol* signature, bool is_interf) :
_method_name(name), _method_signature(signature), _cur_class_is_interface(is_interf),
_family(NULL) {}
void get_discovered_family(MethodFamily** family) {
@ -709,14 +711,17 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
InstanceKlass* iklass = current_class();
Method* m = iklass->find_method(_method_name, _method_signature);
// private interface methods are not candidates for default methods
// invokespecial to private interface methods doesn't use default method logic
// private class methods are not candidates for default methods,
// private methods do not override default methods, so need to perform
// default method inheritance without including private methods
// The overpasses are your supertypes' errors, we do not include them
// future: take access controls into account for superclass methods
if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private()) {
// Private interface methods are not candidates for default methods.
// invokespecial to private interface methods doesn't use default method logic.
// Private class methods are not candidates for default methods.
// Private methods do not override default methods, so need to perform
// default method inheritance without including private methods.
// The overpasses are your supertypes' errors, we do not include them.
// Non-public methods in java.lang.Object are not candidates for default
// methods.
// Future: take access controls into account for superclass methods
if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private() &&
(!_cur_class_is_interface || !SystemDictionary::is_nonpublic_Object_method(m))) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
@ -726,8 +731,8 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
scope->add_mark(restorer);
} else {
// This is the rule that methods in classes "win" (bad word) over
// methods in interfaces. This works because of single inheritance
// private methods in classes do not "win", they will be found
// methods in interfaces. This works because of single inheritance.
// Private methods in classes do not "win", they will be found
// first on searching, but overriding for invokevirtual needs
// to find default method candidates for the same signature
_family->set_target_if_empty(m);
@ -745,10 +750,10 @@ static void create_defaults_and_exceptions(
static void generate_erased_defaults(
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
EmptyVtableSlot* slot, TRAPS) {
EmptyVtableSlot* slot, bool is_intf, TRAPS) {
// sets up a set of methods with the same exact erased signature
FindMethodsByErasedSig visitor(slot->name(), slot->signature());
FindMethodsByErasedSig visitor(slot->name(), slot->signature(), is_intf);
visitor.run(klass);
MethodFamily* family;
@ -817,7 +822,7 @@ void DefaultMethods::generate_default_methods(
slot->print_on(&ls);
ls.cr();
}
generate_erased_defaults(klass, empty_slots, slot, CHECK);
generate_erased_defaults(klass, empty_slots, slot, klass->is_interface(), CHECK);
}
log_debug(defaultmethods)("Creating defaults and overpasses...");
create_defaults_and_exceptions(empty_slots, klass, CHECK);

View File

@ -649,6 +649,12 @@ public:
static bool is_platform_class_loader(oop class_loader);
static void clear_invoke_method_table();
// Returns TRUE if the method is a non-public member of class java.lang.Object.
static bool is_nonpublic_Object_method(Method* m) {
assert(m != NULL, "Unexpected NULL Method*");
return !m->is_public() && m->method_holder() == SystemDictionary::Object_klass();
}
protected:
static InstanceKlass* find_shared_class(Symbol* class_name);

View File

@ -344,6 +344,7 @@ class CompiledStaticCall : public ResourceObj {
// Code
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
static int to_aot_stub_size();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1485,16 +1485,18 @@ bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_oc
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
// Compiled code
{
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
return true;
// Prevent extra code cache walk for platforms that don't have immediate oops.
if (relocInfo::mustIterateImmediateOopsInCode()) {
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
return true;
}
}
}
}
}
return do_unloading_scopes(is_alive, unloading_occurred);
}
@ -1584,18 +1586,21 @@ void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
// (See comment above.)
}
RelocIterator iter(this, low_boundary);
// Prevent extra code cache walk for platforms that don't have immediate oops.
if (relocInfo::mustIterateImmediateOopsInCode()) {
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc();
// In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc();
// In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr());
}
}
}
}
@ -1620,7 +1625,7 @@ bool nmethod::test_set_oops_do_mark() {
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
if (_oops_do_mark_link == NULL) {
// Claim this nmethod for this thread to mark.
if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
// Atomically append this nmethod (now claimed) to the head of the list:
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
for (;;) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -910,6 +910,10 @@ class oop_Relocation : public DataRelocation {
}
// an oop in the instruction stream
static RelocationHolder spec_for_immediate() {
// If no immediate oops are generated, we can skip some walks over nmethods.
// Assert that they don't get generated accidently!
assert(relocInfo::mustIterateImmediateOopsInCode(),
"Must return true so we will search for oops as roots etc. in the code.");
const int oop_index = 0;
const int offset = 0; // if you want an offset, use the oop pool
RelocationHolder rh = newHolder();

View File

@ -77,7 +77,7 @@ GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
if (_time_stamps == NULL) {
// We allocate the _time_stamps array lazily since logging can be enabled dynamically
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) {
// Someone already setup the time stamps
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
}

View File

@ -317,23 +317,18 @@ JVM_NewArray(JNIEnv *env, jclass eltClass, jint length);
JNIEXPORT jobject JNICALL
JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);
/*
* java.lang.Class and java.lang.ClassLoader
*/
#define JVM_CALLER_DEPTH -1
/*
* Returns the immediate caller class of the native method invoking
* JVM_GetCallerClass. The Method.invoke and other frames due to
* reflection machinery are skipped.
*
* The depth parameter must be -1 (JVM_DEPTH). The caller is expected
* to be marked with sun.reflect.CallerSensitive. The JVM will throw
* an error if it is not marked propertly.
* The caller is expected to be marked with
* jdk.internal.reflect.CallerSensitive. The JVM will throw an
* error if it is not marked properly.
*/
JNIEXPORT jclass JNICALL
JVM_GetCallerClass(JNIEnv *env, int depth);
JVM_GetCallerClass(JNIEnv *env);
/*

View File

@ -699,6 +699,7 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
// Estimate the number of static and aot call stubs that might be emitted.
int static_call_stubs = 0;
int aot_call_stubs = 0;
int trampoline_stubs = 0;
objArrayOop sites = this->sites();
for (int i = 0; i < sites->length(); i++) {
oop site = sites->obj_at(i);
@ -710,8 +711,18 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
}
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
if (id == INVOKESTATIC || id == INVOKESPECIAL) {
switch (id) {
case INVOKEINTERFACE:
case INVOKEVIRTUAL:
trampoline_stubs++;
break;
case INVOKESTATIC:
case INVOKESPECIAL:
static_call_stubs++;
trampoline_stubs++;
break;
default:
break;
}
}
}
@ -726,6 +737,7 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
}
}
int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
size += trampoline_stubs * CompiledStaticCall::to_trampoline_stub_size();
#if INCLUDE_AOT
size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
#endif
@ -1171,7 +1183,7 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, T
}
TRACE_jvmci_3("method call");
CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset, CHECK);
CodeInstaller::pd_relocate_JavaMethod(buffer, hotspot_method, pc_offset, CHECK);
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
// Need a static call stub for transitions from compiled to interpreted.
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
@ -1282,4 +1294,3 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, T
}
}
}

View File

@ -185,7 +185,7 @@ private:
void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
void pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS);
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
void pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle method, jint pc_offset, TRAPS);
void pd_relocate_poll(address pc, jint mark, TRAPS);
objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }

View File

@ -749,8 +749,13 @@ C2V_VMENTRY(jobject, findUniqueConcreteMethod, (JNIEnv *, jobject, jobject jvmci
C2V_END
C2V_VMENTRY(jobject, getImplementor, (JNIEnv *, jobject, jobject jvmci_type))
InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
oop implementor = CompilerToVM::get_jvmci_type(klass->implementor(), CHECK_NULL);
Klass* klass = CompilerToVM::asKlass(jvmci_type);
if (!klass->is_interface()) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("Expected interface type, got %s", klass->external_name()));
}
InstanceKlass* iklass = InstanceKlass::cast(klass);
oop implementor = CompilerToVM::get_jvmci_type(iklass->implementor(), CHECK_NULL);
return JNIHandles::make_local(THREAD, implementor);
C2V_END
@ -989,8 +994,12 @@ C2V_VMENTRY(jboolean, hasFinalizableSubclass,(JNIEnv *, jobject, jobject jvmci_t
C2V_END
C2V_VMENTRY(jobject, getClassInitializer, (JNIEnv *, jobject, jobject jvmci_type))
InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
oop result = CompilerToVM::get_jvmci_method(klass->class_initializer(), CHECK_NULL);
Klass* klass = CompilerToVM::asKlass(jvmci_type);
if (!klass->is_instance_klass()) {
return NULL;
}
InstanceKlass* iklass = InstanceKlass::cast(klass);
oop result = CompilerToVM::get_jvmci_method(iklass->class_initializer(), CHECK_NULL);
return JNIHandles::make_local(THREAD, result);
C2V_END

View File

@ -177,6 +177,7 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
// instruction somehow links to a non-interface method (in Object).
// In that case, the method has no itable index and must be invoked as a virtual.
// Set a flag to keep track of this corner case.
assert(method->is_public(), "Calling non-public method in Object with invokeinterface");
change_to_virtual = true;
// ...and fall through as if we were handling invokevirtual:

View File

@ -86,13 +86,14 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
GrowableArray<Method*> new_mirandas(20);
// compute the number of mirandas methods that must be added to the end
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces,
class_flags.is_interface());
*num_new_mirandas = new_mirandas.length();
// Interfaces do not need interface methods in their vtables
// This includes miranda methods and during later processing, default methods
if (!class_flags.is_interface()) {
vtable_length += *num_new_mirandas * vtableEntry::size();
vtable_length += *num_new_mirandas * vtableEntry::size();
}
if (Universe::is_bootstrapping() && vtable_length == 0) {
@ -454,8 +455,13 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, const methodHand
} else {
super_method = method_at(i);
}
// Check if method name matches
if (super_method->name() == name && super_method->signature() == signature) {
// Check if method name matches. Ignore match if klass is an interface and the
// matching method is a non-public java.lang.Object method. (See JVMS 5.4.3.4)
// This is safe because the method at this slot should never get invoked.
// (TBD: put in a method to throw NoSuchMethodError if this slot is ever used.)
if (super_method->name() == name && super_method->signature() == signature &&
(!_klass->is_interface() ||
!SystemDictionary::is_nonpublic_Object_method(super_method))) {
// get super_klass for method_holder for the found method
InstanceKlass* super_klass = super_method->method_holder();
@ -713,7 +719,7 @@ bool klassVtable::is_miranda_entry_at(int i) {
if (mhk->is_interface()) {
assert(m->is_public(), "should be public");
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super(), klass()->is_interface())) {
return true;
}
}
@ -738,7 +744,10 @@ bool klassVtable::is_miranda_entry_at(int i) {
// During the first run, the current instanceKlass has not yet been
// created, the superclasses and superinterfaces do have instanceKlasses
// but may not have vtables, the default_methods list is empty, no overpasses.
// This is seen by default method creation.
// Default method generation uses the all_mirandas array as the starter set for
// maximally-specific default method calculation. So, for both classes and
// interfaces, it is necessary that the first pass will find all non-private
// interface instance methods, whether or not they are concrete.
//
// Pass 2: recalculated during vtable initialization: only include abstract methods.
// The goal of pass 2 is to walk through the superinterfaces to see if any of
@ -772,7 +781,8 @@ bool klassVtable::is_miranda_entry_at(int i) {
// Part of the Miranda Rights in the US mean that if you do not have
// an attorney one will be appointed for you.
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, const Klass* super) {
Array<Method*>* default_methods, const Klass* super,
bool is_interface) {
if (m->is_static() || m->is_private() || m->is_overpass()) {
return false;
}
@ -800,8 +810,11 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
{
if (InstanceKlass::cast(cursuper)->find_local_method(name, signature,
Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
Method* found_mth = InstanceKlass::cast(cursuper)->find_local_method(name, signature,
Klass::find_overpass, Klass::skip_static, Klass::skip_private);
// Ignore non-public methods in java.lang.Object if klass is an interface.
if (found_mth != NULL && (!is_interface ||
!SystemDictionary::is_nonpublic_Object_method(found_mth))) {
return false;
}
}
@ -820,7 +833,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
void klassVtable::add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
Array<Method*>* default_methods, const Klass* super) {
Array<Method*>* default_methods, const Klass* super, bool is_interface) {
// iterate thru the current interface's method to see if it a miranda
int num_methods = current_interface_methods->length();
@ -839,7 +852,7 @@ void klassVtable::add_new_mirandas_to_lists(
}
if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
if (is_miranda(im, class_methods, default_methods, super, is_interface)) { // is it a miranda at all?
const InstanceKlass *sk = InstanceKlass::cast(super);
// check if it is a duplicate of a super's miranda
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::find_defaults) == NULL) {
@ -858,7 +871,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
const Klass* super,
Array<Method*>* class_methods,
Array<Method*>* default_methods,
Array<Klass*>* local_interfaces) {
Array<Klass*>* local_interfaces,
bool is_interface) {
assert((new_mirandas->length() == 0) , "current mirandas must be 0");
// iterate thru the local interfaces looking for a miranda
@ -867,7 +881,7 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
ik->methods(), class_methods,
default_methods, super);
default_methods, super, is_interface);
// iterate thru each local's super interfaces
Array<Klass*>* super_ifs = ik->transitive_interfaces();
int num_super_ifs = super_ifs->length();
@ -875,7 +889,7 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
sik->methods(), class_methods,
default_methods, super);
default_methods, super, is_interface);
}
}
}
@ -888,7 +902,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
int klassVtable::fill_in_mirandas(int initialized) {
GrowableArray<Method*> mirandas(20);
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
ik()->default_methods(), ik()->local_interfaces());
ik()->default_methods(), ik()->local_interfaces(),
klass()->is_interface());
for (int i = 0; i < mirandas.length(); i++) {
if (log_develop_is_enabled(Trace, vtables)) {
Method* meth = mirandas.at(i);

View File

@ -144,21 +144,24 @@ class klassVtable VALUE_OBJ_CLASS_SPEC {
bool is_miranda_entry_at(int i);
int fill_in_mirandas(int initialized);
static bool is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, const Klass* super);
Array<Method*>* default_methods, const Klass* super,
bool is_interface);
static void add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas,
GrowableArray<Method*>* all_mirandas,
Array<Method*>* current_interface_methods,
Array<Method*>* class_methods,
Array<Method*>* default_methods,
const Klass* super);
const Klass* super,
bool is_interface);
static void get_mirandas(
GrowableArray<Method*>* new_mirandas,
GrowableArray<Method*>* all_mirandas,
const Klass* super,
Array<Method*>* class_methods,
Array<Method*>* default_methods,
Array<Klass*>* local_interfaces);
Array<Klass*>* local_interfaces,
bool is_interface);
void verify_against(outputStream* st, klassVtable* vt, int index);
inline InstanceKlass* ik() const;
// When loading a class from CDS archive at run time, and no class redefintion

View File

@ -446,7 +446,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
bool Method::init_method_counters(MethodCounters* counters) {
// Try to install a pointer to MethodCounters, return true on success.
return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
return Atomic::replace_if_null(counters, &_method_counters);
}
void Method::cleanup_inline_caches() {

View File

@ -678,17 +678,9 @@ JVM_END
// Misc. class handling ///////////////////////////////////////////////////////////
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env))
JVMWrapper("JVM_GetCallerClass");
// Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
// sun.reflect.Reflection.getCallerClass with a depth parameter is provided
// temporarily for existing code to use until a replacement API is defined.
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
Klass* k = thread->security_get_caller_class(depth);
return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
}
// Getting the class of the caller frame.
//
// The call stack at this point looks something like this:

View File

@ -127,7 +127,7 @@ JvmtiRawMonitor::is_valid() {
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
for (;;) {
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
if (Atomic::replace_if_null(Self, &_owner)) {
return OS_OK ;
}
@ -139,7 +139,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
Node._next = _EntryList ;
_EntryList = &Node ;
OrderAccess::fence() ;
if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) {
_EntryList = Node._next ;
RawMonitor_lock->unlock() ;
return OS_OK ;

View File

@ -1727,10 +1727,16 @@ WB_END
WB_ENTRY(jboolean, WB_IsCDSIncludedInVmBuild(JNIEnv* env))
#if INCLUDE_CDS
# ifdef _LP64
if (!UseCompressedOops || !UseCompressedClassPointers) {
// On 64-bit VMs, CDS is supported only with compressed oops/pointers
return false;
}
# endif // _LP64
return true;
#else
return false;
#endif
#endif // INCLUDE_CDS
WB_END

View File

@ -200,7 +200,9 @@ SystemProperty::SystemProperty(const char* key, const char* value, bool writeabl
_writeable = writeable;
}
AgentLibrary::AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
AgentLibrary::AgentLibrary(const char* name, const char* options,
bool is_absolute_path, void* os_lib,
bool instrument_lib) {
_name = AllocateHeap(strlen(name)+1, mtArguments);
strcpy(_name, name);
if (options == NULL) {
@ -214,6 +216,7 @@ AgentLibrary::AgentLibrary(const char* name, const char* options, bool is_absolu
_next = NULL;
_state = agent_invalid;
_is_static_lib = false;
_is_instrument_lib = instrument_lib;
}
// Check if head of 'option' matches 'name', and sets 'tail' to the remaining
@ -290,6 +293,10 @@ void Arguments::add_init_agent(const char* name, char* options, bool absolute_pa
_agentList.add(new AgentLibrary(name, options, absolute_path, NULL));
}
void Arguments::add_instrument_agent(const char* name, char* options, bool absolute_path) {
_agentList.add(new AgentLibrary(name, options, absolute_path, NULL, true));
}
// Late-binding agents not started via arguments
void Arguments::add_loaded_agent(AgentLibrary *agentLib) {
_agentList.add(agentLib);
@ -2797,7 +2804,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
size_t length = strlen(tail) + 1;
char *options = NEW_C_HEAP_ARRAY(char, length, mtArguments);
jio_snprintf(options, length, "%s", tail);
add_init_agent("instrument", options, false);
add_instrument_agent("instrument", options, false);
// java agents need module java.instrument
if (!create_numbered_property("jdk.module.addmods", "java.instrument", addmods_count++)) {
return JNI_ENOMEM;

View File

@ -142,6 +142,7 @@ public:
void* _os_lib;
bool _is_absolute_path;
bool _is_static_lib;
bool _is_instrument_lib;
AgentState _state;
AgentLibrary* _next;
@ -154,13 +155,15 @@ public:
void set_os_lib(void* os_lib) { _os_lib = os_lib; }
AgentLibrary* next() const { return _next; }
bool is_static_lib() const { return _is_static_lib; }
bool is_instrument_lib() const { return _is_instrument_lib; }
void set_static_lib(bool is_static_lib) { _is_static_lib = is_static_lib; }
bool valid() { return (_state == agent_valid); }
void set_valid() { _state = agent_valid; }
void set_invalid() { _state = agent_invalid; }
// Constructor
AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib);
AgentLibrary(const char* name, const char* options, bool is_absolute_path,
void* os_lib, bool instrument_lib=false);
};
// maintain an order of entry list of AgentLibrary
@ -337,6 +340,7 @@ class Arguments : AllStatic {
// -agentlib and -agentpath arguments
static AgentLibraryList _agentList;
static void add_init_agent(const char* name, char* options, bool absolute_path);
static void add_instrument_agent(const char* name, char* options, bool absolute_path);
// Late-binding agents not started via arguments
static void add_loaded_agent(AgentLibrary *agentLib);

View File

@ -45,8 +45,8 @@ enum cmpxchg_memory_order {
class Atomic : AllStatic {
public:
// Atomic operations on jlong types are not available on all 32-bit
// platforms. If atomic ops on jlongs are defined here they must only
// Atomic operations on int64 types are not available on all 32-bit
// platforms. If atomic ops on int64 are defined here they must only
// be used from code that verifies they are available at runtime and
// can provide an alternative action if not - see supports_cx8() for
// a means to test availability.
@ -639,16 +639,16 @@ struct Atomic::AddImpl<
//
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
template<>
struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
jshort operator()(jshort add_value, jshort volatile* dest) const {
struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
short operator()(short add_value, short volatile* dest) const {
#ifdef VM_LITTLE_ENDIAN
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
#else
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
#endif
return (jshort)(new_value >> 16); // preserves sign
return (short)(new_value >> 16); // preserves sign
}
};
@ -807,7 +807,7 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
do {
// value to swap in matches current value ...
uint32_t new_value = cur;
// ... except for the one jbyte we want to update
// ... except for the one byte we want to update
reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);

View File

@ -47,6 +47,7 @@ jobject JNIHandles::make_local(oop obj) {
} else {
Thread* thread = Thread::current();
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj);
}
}
@ -59,6 +60,8 @@ jobject JNIHandles::make_local(Thread* thread, oop obj) {
return NULL; // ignore null handles
} else {
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
assert(thread->is_Java_thread(), "not a Java thread");
assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj);
}
}
@ -70,6 +73,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
} else {
JavaThread* thread = JavaThread::thread_from_jni_environment(env);
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj);
}
}
@ -77,6 +81,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
jobject JNIHandles::make_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native");
jobject res = NULL;
if (!obj.is_null()) {
// ignore null handles
@ -93,6 +98,7 @@ jobject JNIHandles::make_global(Handle obj) {
jobject JNIHandles::make_weak_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native");
jobject res = NULL;
if (!obj.is_null()) {
// ignore null handles
@ -265,6 +271,13 @@ void JNIHandles::verify() {
weak_oops_do(&verify_handle);
}
// This method is implemented here to avoid circular includes between
// jniHandles.hpp and thread.hpp.
bool JNIHandles::current_thread_in_native() {
Thread* thread = Thread::current();
return (thread->is_Java_thread() &&
JavaThread::current()->thread_state() == _thread_in_native);
}
void jni_handles_init() {

View File

@ -48,6 +48,10 @@ class JNIHandles : AllStatic {
template<bool external_guard> inline static oop resolve_impl(jobject handle);
template<bool external_guard> static oop resolve_jweak(jweak handle);
// This method is not inlined in order to avoid circular includes between
// this header file and thread.hpp.
static bool current_thread_in_native();
public:
// Low tag bit in jobject used to distinguish a jweak. jweak is
// type equivalent to jobject, but there are places where we need to
@ -230,6 +234,7 @@ inline oop JNIHandles::guard_value(oop value) {
template<bool external_guard>
inline oop JNIHandles::resolve_impl(jobject handle) {
assert(handle != NULL, "precondition");
assert(!current_thread_in_native(), "must not be in native");
oop result;
if (is_jweak(handle)) { // Unlikely
result = resolve_jweak<external_guard>(handle);

View File

@ -467,7 +467,7 @@ void Monitor::ILock(Thread * Self) {
OrderAccess::fence();
// Optional optimization ... try barging on the inner lock
if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) {
goto OnDeck_LOOP;
}
@ -574,7 +574,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
// Unlike a normal lock, however, the exiting thread "locks" OnDeck,
// picks a successor and marks that thread as OnDeck. That successor
// thread will then clear OnDeck once it eventually acquires the outer lock.
if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) {
return;
}

View File

@ -421,7 +421,7 @@ void ObjectMonitor::enter(TRAPS) {
int ObjectMonitor::TryLock(Thread * Self) {
void * own = _owner;
if (own != NULL) return 0;
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
if (Atomic::replace_if_null(Self, &_owner)) {
// Either guarantee _recursions == 0 or set _recursions = 0.
assert(_recursions == 0, "invariant");
assert(_owner == Self, "invariant");
@ -529,7 +529,7 @@ void ObjectMonitor::EnterI(TRAPS) {
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
// Try to assume the role of responsible thread for the monitor.
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
Atomic::replace_if_null(Self, &_Responsible);
}
// The lock might have been released while this thread was occupied queueing
@ -553,7 +553,7 @@ void ObjectMonitor::EnterI(TRAPS) {
assert(_owner != Self, "invariant");
if ((SyncFlags & 2) && _Responsible == NULL) {
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
Atomic::replace_if_null(Self, &_Responsible);
}
// park self
@ -1007,7 +1007,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// to reacquire the lock the responsibility for ensuring succession
// falls to the new owner.
//
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
if (!Atomic::replace_if_null(THREAD, &_owner)) {
return;
}
TEVENT(Exit - Reacquired);
@ -1032,7 +1032,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
// B. If the elements forming the EntryList|cxq are TSM
// we could simply unpark() the lead thread and return
// without having set _succ.
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
if (!Atomic::replace_if_null(THREAD, &_owner)) {
TEVENT(Inflated exit - reacquired succeeded);
return;
}
@ -1714,7 +1714,7 @@ void ObjectMonitor::INotify(Thread * Self) {
ObjectWaiter * tail = _cxq;
if (tail == NULL) {
iterator->_next = NULL;
if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
if (Atomic::replace_if_null(iterator, &_cxq)) {
break;
}
} else {

View File

@ -103,7 +103,7 @@ void SafepointSynchronize::begin() {
int nof_threads = Threads::number_of_threads();
log_debug(safepoint)("Safepoint synchronization initiated. (%d)", nof_threads);
log_debug(safepoint)("Safepoint synchronization initiated. (%d threads)", nof_threads);
RuntimeService::record_safepoint_begin();
@ -407,9 +407,7 @@ void SafepointSynchronize::begin() {
// Update the count of active JNI critical regions
GCLocker::set_jni_lock_count(_current_jni_active_count);
if (log_is_enabled(Debug, safepoint)) {
log_debug(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description());
}
log_info(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description());
RuntimeService::record_safepoint_synchronized();
if (PrintSafepointStatistics) {
@ -496,14 +494,14 @@ void SafepointSynchronize::end() {
cur_state->restart(); // TSS _running
SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
}
log_debug(safepoint)("Leaving safepoint region");
log_info(safepoint)("Leaving safepoint region");
} else {
// Set to not synchronized, so the threads will not go into the signal_thread_blocked method
// when they get restarted.
_state = _not_synchronized;
OrderAccess::fence();
log_debug(safepoint)("Leaving safepoint region");
log_info(safepoint)("Leaving safepoint region");
// Start suspended threads
jtiwh.rewind();

View File

@ -62,12 +62,11 @@ address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL;
address StubRoutines::_atomic_xchg_long_entry = NULL;
address StubRoutines::_atomic_store_entry = NULL;
address StubRoutines::_atomic_store_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_byte_entry = NULL;
address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
address StubRoutines::_atomic_add_entry = NULL;
address StubRoutines::_atomic_add_ptr_entry = NULL;
address StubRoutines::_atomic_add_long_entry = NULL;
address StubRoutines::_fence_entry = NULL;
address StubRoutines::_d2i_wrapper = NULL;
address StubRoutines::_d2l_wrapper = NULL;

View File

@ -103,12 +103,11 @@ class StubRoutines: AllStatic {
static address _atomic_xchg_entry;
static address _atomic_xchg_long_entry;
static address _atomic_store_entry;
static address _atomic_store_ptr_entry;
static address _atomic_cmpxchg_entry;
static address _atomic_cmpxchg_byte_entry;
static address _atomic_cmpxchg_long_entry;
static address _atomic_add_entry;
static address _atomic_add_ptr_entry;
static address _atomic_add_long_entry;
static address _fence_entry;
static address _d2i_wrapper;
static address _d2l_wrapper;
@ -277,12 +276,11 @@ class StubRoutines: AllStatic {
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; }
static address atomic_store_entry() { return _atomic_store_entry; }
static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; }
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
static address atomic_add_entry() { return _atomic_add_entry; }
static address atomic_add_ptr_entry() { return _atomic_add_ptr_entry; }
static address atomic_add_long_entry() { return _atomic_add_long_entry; }
static address fence_entry() { return _fence_entry; }
static address d2i_wrapper() { return _d2i_wrapper; }

View File

@ -238,8 +238,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markOopDesc::unused_mark());
if (owner == NULL &&
Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
assert(m->_recursions == 0, "invariant");
assert(m->_owner == Self, "invariant");
return true;

View File

@ -4039,9 +4039,16 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
}
if (library == NULL) {
const char *sub_msg = " on the library path, with error: ";
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
const char *sub_msg2 = "\nModule java.instrument may be missing from runtime image.";
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) +
strlen(ebuf) + strlen(sub_msg2) + 1;
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
if (!agent->is_instrument_lib()) {
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
} else {
jio_snprintf(buf, len, "%s%s%s%s%s", msg, name, sub_msg, ebuf, sub_msg2);
}
// If we can't find the agent, exit.
vm_exit_during_initialization(buf, NULL);
FREE_C_HEAP_ARRAY(char, buf);

View File

@ -147,7 +147,7 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b
if (entry == NULL) return NULL;
// swap in the head
if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
if (Atomic::replace_if_null(entry, &_table[index])) {
return entry->data();
}
@ -259,5 +259,5 @@ void MallocSiteTable::AccessLock::exclusiveLock() {
}
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
return Atomic::replace_if_null(entry, &_next);
}

View File

@ -628,7 +628,7 @@ void BitMap::init_pop_count_table() {
table[i] = num_set_bits(i);
}
if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) {
if (!Atomic::replace_if_null(table, &_pop_count_table)) {
guarantee(_pop_count_table != NULL, "invariant");
FREE_C_HEAP_ARRAY(idx_t, table);
}

View File

@ -64,14 +64,6 @@ public class Reflection {
@HotSpotIntrinsicCandidate
public static native Class<?> getCallerClass();
/**
* @deprecated This method will be removed.
* This method is a private JDK API and retained temporarily to
* simplify the implementation of sun.misc.Reflection.getCallerClass.
*/
@Deprecated(forRemoval=true)
public static native Class<?> getCallerClass(int depth);
/** Retrieves the access flags written to the class file. For
inner classes these flags may differ from those returned by
Class.getModifiers(), which searches the InnerClasses

View File

@ -30,13 +30,7 @@
JNIEXPORT jclass JNICALL
Java_jdk_internal_reflect_Reflection_getCallerClass__(JNIEnv *env, jclass unused)
{
return JVM_GetCallerClass(env, JVM_CALLER_DEPTH);
}
JNIEXPORT jclass JNICALL
Java_jdk_internal_reflect_Reflection_getCallerClass__I(JNIEnv *env, jclass unused, jint depth)
{
return JVM_GetCallerClass(env, depth);
return JVM_GetCallerClass(env);
}
JNIEXPORT jint JNICALL

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -46,9 +46,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// will not be able to find all Hotspot processes.
// Any changes to this needs to be synchronized with HotSpot.
private static final String tmpdir = "/tmp";
// The patch to the socket file created by the target VM
String path;
String socket_path;
/**
* Attaches to the target VM
@ -69,8 +67,9 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Find the socket file. If not found then we attempt to start the
// attach mechanism in the target VM by sending it a QUIT signal.
// Then we attempt to find the socket file again.
path = findSocketFile(pid);
if (path == null) {
File socket_file = new File(tmpdir, ".java_pid" + pid);
socket_path = socket_file.getPath();
if (!socket_file.exists()) {
File f = createAttachFile(pid);
try {
sendQuitTo(pid);
@ -86,19 +85,19 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
try {
Thread.sleep(delay);
} catch (InterruptedException x) { }
path = findSocketFile(pid);
time_spend += delay;
if (time_spend > timeout/2 && path == null) {
if (time_spend > timeout/2 && !socket_file.exists()) {
// Send QUIT again to give target VM the last chance to react
sendQuitTo(pid);
}
} while (time_spend <= timeout && path == null);
if (path == null) {
} while (time_spend <= timeout && !socket_file.exists());
if (!socket_file.exists()) {
throw new AttachNotSupportedException(
String.format("Unable to open socket file %s: " +
"target process %d doesn't respond within %dms " +
"or HotSpot VM not loaded", f.getPath(), pid, time_spend));
"or HotSpot VM not loaded", socket_path, pid,
time_spend));
}
} finally {
f.delete();
@ -107,14 +106,14 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Check that the file owner/permission to avoid attaching to
// bogus process
checkPermissions(path);
checkPermissions(socket_path);
// Check that we can connect to the process
// - this ensures we throw the permission denied error now rather than
// later when we attempt to enqueue a command.
int s = socket();
try {
connect(s, path);
connect(s, socket_path);
} finally {
close(s);
}
@ -125,8 +124,8 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
*/
public void detach() throws IOException {
synchronized (this) {
if (this.path != null) {
this.path = null;
if (socket_path != null) {
socket_path = null;
}
}
}
@ -144,12 +143,10 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
assert args.length <= 3; // includes null
// did we detach?
String p;
synchronized (this) {
if (this.path == null) {
if (socket_path == null) {
throw new IOException("Detached from target VM");
}
p = this.path;
}
// create UNIX socket
@ -157,7 +154,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// connect to target VM
try {
connect(s, p);
connect(s, socket_path);
} catch (IOException x) {
close(s);
throw x;
@ -264,15 +261,6 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
}
}
// Return the socket file for the given process.
private String findSocketFile(int pid) {
File f = new File(tmpdir, ".java_pid" + pid);
if (!f.exists()) {
return null;
}
return f.getPath();
}
// On Solaris/Linux/Aix a simple handshake is used to start the attach mechanism
// if not already started. The client creates a .attach_pid<pid> file in the
// target VM's working directory (or temp directory), and the SIGQUIT handler

View File

@ -47,10 +47,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// will not be able to find all Hotspot processes.
// Any changes to this needs to be synchronized with HotSpot.
private static final String tmpdir = "/tmp";
// The patch to the socket file created by the target VM
String path;
String socket_path;
/**
* Attaches to the target VM
*/
@ -73,8 +70,9 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Find the socket file. If not found then we attempt to start the
// attach mechanism in the target VM by sending it a QUIT signal.
// Then we attempt to find the socket file again.
path = findSocketFile(pid, ns_pid);
if (path == null) {
File socket_file = findSocketFile(pid, ns_pid);
socket_path = socket_file.getPath();
if (!socket_file.exists()) {
File f = createAttachFile(pid, ns_pid);
try {
sendQuitTo(pid);
@ -90,19 +88,19 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
try {
Thread.sleep(delay);
} catch (InterruptedException x) { }
path = findSocketFile(pid, ns_pid);
time_spend += delay;
if (time_spend > timeout/2 && path == null) {
if (time_spend > timeout/2 && !socket_file.exists()) {
// Send QUIT again to give target VM the last chance to react
sendQuitTo(pid);
}
} while (time_spend <= timeout && path == null);
if (path == null) {
} while (time_spend <= timeout && !socket_file.exists());
if (!socket_file.exists()) {
throw new AttachNotSupportedException(
String.format("Unable to open socket file %s: " +
"target process %d doesn't respond within %dms " +
"or HotSpot VM not loaded", f.getPath(), pid, time_spend));
"or HotSpot VM not loaded", socket_path, pid,
time_spend));
}
} finally {
f.delete();
@ -111,14 +109,14 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Check that the file owner/permission to avoid attaching to
// bogus process
checkPermissions(path);
checkPermissions(socket_path);
// Check that we can connect to the process
// - this ensures we throw the permission denied error now rather than
// later when we attempt to enqueue a command.
int s = socket();
try {
connect(s, path);
connect(s, socket_path);
} finally {
close(s);
}
@ -129,8 +127,8 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
*/
public void detach() throws IOException {
synchronized (this) {
if (this.path != null) {
this.path = null;
if (socket_path != null) {
socket_path = null;
}
}
}
@ -148,12 +146,10 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
assert args.length <= 3; // includes null
// did we detach?
String p;
synchronized (this) {
if (this.path == null) {
if (socket_path == null) {
throw new IOException("Detached from target VM");
}
p = this.path;
}
// create UNIX socket
@ -161,7 +157,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// connect to target VM
try {
connect(s, p);
connect(s, socket_path);
} catch (IOException x) {
close(s);
throw x;
@ -257,8 +253,9 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
if ((off < 0) || (off > bs.length) || (len < 0) ||
((off + len) > bs.length) || ((off + len) < 0)) {
throw new IndexOutOfBoundsException();
} else if (len == 0)
} else if (len == 0) {
return 0;
}
return VirtualMachineImpl.read(s, bs, off, len);
}
@ -269,16 +266,12 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
}
// Return the socket file for the given process.
private String findSocketFile(int pid, int ns_pid) {
private File findSocketFile(int pid, int ns_pid) {
// A process may not exist in the same mount namespace as the caller.
// Instead, attach relative to the target root filesystem as exposed by
// procfs regardless of namespaces.
String root = "/proc/" + pid + "/root/" + tmpdir;
File f = new File(root, ".java_pid" + ns_pid);
if (!f.exists()) {
return null;
}
return f.getPath();
return new File(root, ".java_pid" + ns_pid);
}
// On Solaris/Linux a simple handshake is used to start the attach mechanism

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,9 +45,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// the latter can be changed by the user.
// Any changes to this needs to be synchronized with HotSpot.
private static final String tmpdir;
// The patch to the socket file created by the target VM
String path;
String socket_path;
/**
* Attaches to the target VM
@ -68,8 +66,9 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Find the socket file. If not found then we attempt to start the
// attach mechanism in the target VM by sending it a QUIT signal.
// Then we attempt to find the socket file again.
path = findSocketFile(pid);
if (path == null) {
File socket_file = new File(tmpdir, ".java_pid" + pid);
socket_path = socket_file.getPath();
if (!socket_file.exists()) {
File f = createAttachFile(pid);
try {
sendQuitTo(pid);
@ -85,19 +84,19 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
try {
Thread.sleep(delay);
} catch (InterruptedException x) { }
path = findSocketFile(pid);
time_spend += delay;
if (time_spend > timeout/2 && path == null) {
if (time_spend > timeout/2 && !socket_file.exists()) {
// Send QUIT again to give target VM the last chance to react
sendQuitTo(pid);
}
} while (time_spend <= timeout && path == null);
if (path == null) {
} while (time_spend <= timeout && !socket_file.exists());
if (!socket_file.exists()) {
throw new AttachNotSupportedException(
String.format("Unable to open socket file %s: " +
"target process %d doesn't respond within %dms " +
"or HotSpot VM not loaded", f.getPath(), pid, time_spend));
"target process %d doesn't respond within %dms " +
"or HotSpot VM not loaded", socket_path,
pid, time_spend));
}
} finally {
f.delete();
@ -106,14 +105,14 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// Check that the file owner/permission to avoid attaching to
// bogus process
checkPermissions(path);
checkPermissions(socket_path);
// Check that we can connect to the process
// - this ensures we throw the permission denied error now rather than
// later when we attempt to enqueue a command.
int s = socket();
try {
connect(s, path);
connect(s, socket_path);
} finally {
close(s);
}
@ -124,8 +123,8 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
*/
public void detach() throws IOException {
synchronized (this) {
if (this.path != null) {
this.path = null;
if (socket_path != null) {
socket_path = null;
}
}
}
@ -143,12 +142,10 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
assert args.length <= 3; // includes null
// did we detach?
String p;
synchronized (this) {
if (this.path == null) {
if (socket_path == null) {
throw new IOException("Detached from target VM");
}
p = this.path;
}
// create UNIX socket
@ -156,7 +153,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// connect to target VM
try {
connect(s, p);
connect(s, socket_path);
} catch (IOException x) {
close(s);
throw x;
@ -264,14 +261,6 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
}
}
// Return the socket file for the given process.
// Checks temp directory for .java_pid<pid>.
private String findSocketFile(int pid) {
String fn = ".java_pid" + pid;
File f = new File(tmpdir, fn);
return f.exists() ? f.getPath() : null;
}
/*
* Write/sends the given to the target VM. String is transmitted in
* UTF-8 encoding.
@ -282,7 +271,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
try {
b = s.getBytes("UTF-8");
} catch (java.io.UnsupportedEncodingException x) {
throw new InternalError();
throw new InternalError(x);
}
VirtualMachineImpl.write(fd, b, 0, b.length);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,6 +47,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// door descriptor;
private int fd = -1;
String socket_path;
/**
* Attaches to the target VM
@ -60,7 +61,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
try {
pid = Integer.parseInt(vmid);
} catch (NumberFormatException x) {
throw new AttachNotSupportedException("invalid process identifier");
throw new AttachNotSupportedException("Invalid process identifier");
}
// Opens the door file to the target VM. If the file is not
@ -100,7 +101,7 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
throw new AttachNotSupportedException(
String.format("Unable to open door %s: " +
"target process %d doesn't respond within %dms " +
"or HotSpot VM not loaded", f.getPath(), pid, time_spend));
"or HotSpot VM not loaded", socket_path, pid, time_spend));
}
} finally {
f.delete();
@ -210,13 +211,13 @@ public class VirtualMachineImpl extends HotSpotVirtualMachine {
// The door is attached to .java_pid<pid> in the temporary directory.
private int openDoor(int pid) throws IOException {
String path = tmpdir + "/.java_pid" + pid;;
fd = open(path);
socket_path = tmpdir + "/.java_pid" + pid;
fd = open(socket_path);
// Check that the file owner/permission to avoid attaching to
// bogus process
try {
checkPermissions(path);
checkPermissions(socket_path);
} catch (IOException ioe) {
close(fd);
throw ioe;

View File

@ -135,8 +135,9 @@ final class CompilerToVM {
/**
* Gets the implementor for the interface class {@code type}.
*
* @return the implementor if there is a single implementor, 0 if there is no implementor, or
* {@code type} itself if there is more than one implementor
* @return the implementor if there is a single implementor, {@code null} if there is no
* implementor, or {@code type} itself if there is more than one implementor
* @throws IllegalArgumentException if type is not an interface type
*/
native HotSpotResolvedObjectTypeImpl getImplementor(HotSpotResolvedObjectTypeImpl type);
@ -256,14 +257,13 @@ final class CompilerToVM {
native void resolveInvokeHandleInPool(HotSpotConstantPool constantPool, int cpi);
/**
* If {@code cpi} denotes an entry representing a resolved dynamic adapter
* (see {@code resolveInvokeDynamicInPool} and {@code resolveInvokeHandleInPool}),
* return the opcode of the instruction for which the resolution was performed
* ({@code invokedynamic} or {@code invokevirtual}}, or {@code -1} otherwise.
* If {@code cpi} denotes an entry representing a resolved dynamic adapter (see
* {@code resolveInvokeDynamicInPool} and {@code resolveInvokeHandleInPool}), return the opcode
* of the instruction for which the resolution was performed ({@code invokedynamic} or
* {@code invokevirtual}}, or {@code -1} otherwise.
*/
native int isResolvedInvokeHandleInPool(HotSpotConstantPool constantPool, int cpi);
/**
* Gets the list of type names (in the format of {@link JavaType#getName()}) denoting the
* classes that define signature polymorphic methods.
@ -388,7 +388,7 @@ final class CompilerToVM {
/**
* Gets the static initializer of {@code type}.
*
* @return 0 if {@code type} has no static initializer
* @return {@code null} if {@code type} has no static initializer
*/
native HotSpotResolvedJavaMethodImpl getClassInitializer(HotSpotResolvedObjectTypeImpl type);
@ -468,7 +468,8 @@ final class CompilerToVM {
native long getLocalVariableTableStart(HotSpotResolvedJavaMethodImpl method);
/**
* Sets flags on {@code method} indicating that it should never be inlined or compiled by the VM.
* Sets flags on {@code method} indicating that it should never be inlined or compiled by the
* VM.
*/
native void setNotInlinableOrCompilable(HotSpotResolvedJavaMethodImpl method);

View File

@ -922,7 +922,10 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
}
public ResolvedJavaMethod getClassInitializer() {
return compilerToVM().getClassInitializer(this);
if (!isArray()) {
return compilerToVM().getClassInitializer(this);
}
return null;
}
@Override

View File

@ -103,6 +103,14 @@ public class GetImplementorTest {
HotSpotResolvedObjectType resolvedIface = CompilerToVMHelper
.lookupTypeHelper(Utils.toJVMTypeSignature(tcase.anInterface),
getClass(), /* resolve = */ true);
if (!resolvedIface.isInterface()) {
try {
CompilerToVMHelper.getImplementor(resolvedIface);
Asserts.fail("Expected " + IllegalArgumentException.class.getName());
} catch (IllegalArgumentException e) {
}
return;
}
HotSpotResolvedObjectType resolvedImplementer = CompilerToVMHelper
.getImplementor(resolvedIface);
HotSpotResolvedObjectType resolvedExpected = null;

View File

@ -473,6 +473,20 @@ public class TestResolvedJavaType extends TypeUniverse {
metaAccess.lookupJavaType(ConcreteTransitiveImplementor1.class);
metaAccess.lookupJavaType(ConcreteTransitiveImplementor2.class);
assertEquals(aSai2, iSai2.getSingleImplementor());
for (Class<?> c : classes) {
ResolvedJavaType type = metaAccess.lookupJavaType(c);
try {
type.getSingleImplementor();
if (!c.isInterface()) {
throw new AssertionError("Expected exception for calling getSingleImplmentor on " + c.getName());
}
} catch (JVMCIError e) {
if (c.isInterface()) {
throw new AssertionError("Unexpected exception", e);
}
}
}
}
@Test(expected = JVMCIError.class)
@ -830,6 +844,10 @@ public class TestResolvedJavaType extends TypeUniverse {
assertNull(metaAccess.lookupJavaType(C.class).getClassInitializer());
assertNull(metaAccess.lookupJavaType(int.class).getClassInitializer());
assertNull(metaAccess.lookupJavaType(void.class).getClassInitializer());
for (Class<?> c : classes) {
ResolvedJavaType type = metaAccess.lookupJavaType(c);
type.getClassInitializer();
}
}
@Test

View File

@ -28,7 +28,7 @@
* attempting to use CDS archive. JVM should exit gracefully
* when sharing mode is ON, and continue w/o sharing if sharing
* mode is AUTO.
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -25,7 +25,7 @@
* @test
* @requires vm.cds & !vm.graal.enabled
* @summary Testing -Xbootclasspath/a support for CDS
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -26,7 +26,7 @@
* @requires vm.cds
* @summary CDS (class data sharing) requires the same -XX:[+-]CompactStrings
* setting between archive creation time and load time.
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -29,7 +29,7 @@
* This is a negative test; using object alignment for loading that
* is different from object alignment for creating a CDS file
* should fail when loading.
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @bug 8025642
* @modules java.base/jdk.internal.misc

View File

@ -26,7 +26,7 @@
* @requires vm.cds
* @summary Testing CDS (class data sharing) using varying object alignment.
* Using same object alignment for each dump/load pair
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -24,7 +24,7 @@
/*
* @test DefaultUseWithClient
* @summary Test default behavior of sharing with -client
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -28,7 +28,7 @@
* Feature support: compressed oops/kptrs, 64-bit os, not on windows
* @requires vm.cds
* @requires (sun.arch.data.model != "32") & (os.family != "windows")
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* @run main DumpSharedDictionary

View File

@ -24,7 +24,7 @@
/*
* @test NonBootLoaderClasses
* @summary Test to ensure platform and app classes are not being archived
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires vm.cds
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

Some files were not shown because too many files have changed in this diff Show More