This commit is contained in:
Jesper Wilhelmsson 2018-02-25 04:59:43 +01:00
commit 1739105160
285 changed files with 4287 additions and 4218 deletions

View File

@ -423,7 +423,7 @@ _sequence-do = \
################################################################################
MAX_PARAMS := 35
MAX_PARAMS := 36
PARAM_SEQUENCE := $(call sequence, 2, $(MAX_PARAMS))
# Template for creating a macro taking named parameters. To use it, assign the

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -87,6 +87,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
DISABLED_WARNINGS_clang := undef switch format-nonliteral \
tautological-undefined-compare $(BUILD_LIBJVM_DISABLED_WARNINGS_clang), \
DISABLED_WARNINGS_solstudio := identexpected, \
DISABLED_WARNINGS_CXX_microsoft := 4996, \
LDFLAGS := $(JVM_LDFLAGS), \
LDFLAGS_solaris := -library=stlport4 $(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_aix := -bbigtoc, \

View File

@ -43,7 +43,6 @@ $(eval $(call SetupNativeCompilation,BUILD_LIBDT_SOCKET, \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP \
$(LIBDT_SOCKET_CPPFLAGS), \
DISABLED_WARNINGS_gcc := shift-negative-value, \
MAPFILE := $(TOPDIR)/make/mapfiles/libdt_socket/mapfile-vers, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \

View File

@ -261,7 +261,6 @@ jdk.jshell
jdk.jshell.execution
jdk.jshell.spi
jdk.jshell.tool
jdk.management.cmm
jdk.management.jfr
jdk.management.resource
jdk.nashorn.api.scripting

View File

@ -684,14 +684,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
UseTLAB && FastTLABRefill) {
UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
Label slow_path;
Register obj_size = r2;
Register t1 = r19;
Register t2 = r4;
assert_different_registers(klass, obj, obj_size, t1, t2);
__ stp(r5, r19, Address(__ pre(sp, -2 * wordSize)));
__ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
@ -716,24 +716,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
#endif // ASSERT
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5
__ bind(retry_tlab);
// get the instance size (size is postive so movl is fine for 64bit)
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
__ verify_oop(obj);
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
__ ret(lr);
__ bind(try_eden);
// get the instance size (size is postive so movl is fine for 64bit)
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
@ -742,11 +724,11 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
__ verify_oop(obj);
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
__ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
__ ret(lr);
__ bind(slow_path);
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
__ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
}
__ enter();
@ -814,7 +796,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
#endif // ASSERT
if (UseTLAB && FastTLABRefill) {
if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
Register arr_size = r4;
Register t1 = r2;
Register t2 = r5;
@ -826,45 +808,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ cmpw(length, rscratch1);
__ br(Assembler::HI, slow_path);
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
const Register thread =
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread
__ bind(retry_tlab);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive ldrw does right thing on 64bit
__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
__ lslvw(arr_size, length, t1);
__ ubfx(t1, t1, Klass::_lh_header_size_shift,
exact_log2(Klass::_lh_header_size_mask + 1));
__ add(arr_size, arr_size, t1);
__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
__ initialize_header(obj, klass, length, t1, t2);
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andr(t1, t1, Klass::_lh_header_size_mask);
__ sub(arr_size, arr_size, t1); // body length
__ add(t1, t1, obj); // body start
if (!ZeroTLAB) {
__ initialize_body(t1, arr_size, 0, t2);
}
__ verify_oop(obj);
__ ret(lr);
__ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive ldrw does right thing on 64bit
__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movw does right thing on 64bit
// since size is positive movw does right thing on 64bit
__ movw(arr_size, length);
__ lslvw(arr_size, length, t1);
__ ubfx(t1, t1, Klass::_lh_header_size_shift,
@ -874,7 +821,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);
__ incr_allocated_bytes(rthread, arr_size, 0, rscratch1);
__ initialize_header(obj, klass, length, t1, t2);
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));

View File

@ -4096,131 +4096,6 @@ void MacroAssembler::tlab_allocate(Register obj,
// verify_tlab();
}
// Preserves r19, and r3.
Register MacroAssembler::tlab_refill(Label& retry,
Label& try_eden,
Label& slow_case) {
Register top = r0;
Register t1 = r2;
Register t2 = r4;
assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3);
Label do_refill, discard_tlab;
if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
b(slow_case);
}
ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
// calculate amount of free space
sub(t1, t1, top);
lsr(t1, t1, LogHeapWordSize);
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
cmp(t1, rscratch1);
br(Assembler::LE, discard_tlab);
// Retain
// ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
add(rscratch1, rscratch1, t2);
str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
if (TLABStats) {
// increment number of slow_allocations
addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())),
1, rscratch1);
}
b(try_eden);
bind(discard_tlab);
if (TLABStats) {
// increment number of refills
addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1,
rscratch1);
// accumulate wastage -- t1 is amount free in tlab
addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1,
rscratch1);
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
cbz(top, do_refill);
// set up the mark word
mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes()));
// set the length to the remaining space
sub(t1, t1, typeArrayOopDesc::header_size(T_INT));
add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint)));
strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes()));
// set klass to intArrayKlass
{
unsigned long offset;
// dubious reloc why not an oop reloc?
adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()),
offset);
ldr(t1, Address(rscratch1, offset));
}
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
store_klass(top, t1);
mov(t1, top);
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
sub(t1, t1, rscratch1);
incr_allocated_bytes(rthread, t1, 0, rscratch1);
// refill the tlab with an eden allocation
bind(do_refill);
ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
lsl(t1, t1, LogHeapWordSize);
// allocate new tlab, address returned in top
eden_allocate(top, t1, 0, t2, slow_case);
// Check that t1 was preserved in eden_allocate.
#ifdef ASSERT
if (UseTLAB) {
Label ok;
Register tsize = r4;
assert_different_registers(tsize, rthread, t1);
str(tsize, Address(pre(sp, -16)));
ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
lsl(tsize, tsize, LogHeapWordSize);
cmp(t1, tsize);
br(Assembler::EQ, ok);
STOP("assert(t1 != tlab size)");
should_not_reach_here();
bind(ok);
ldr(tsize, Address(post(sp, 16)));
}
#endif
str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
add(top, top, t1);
sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
if (ZeroTLAB) {
// This is a fast TLAB refill, therefore the GC is not notified of it.
// So compiled code must fill the new TLAB with zeroes.
ldr(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
zero_memory(top,t1,t2);
}
verify_tlab();
b(retry);
return rthread; // for use by caller
}
// Zero words; len is in bytes
// Destroys all registers except addr
// len must be a nonzero multiple of wordSize

View File

@ -861,7 +861,6 @@ public:
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
void zero_memory(Register addr, Register len, Register t1);
void verify_tlab();

View File

@ -722,10 +722,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
const Register result = R0;
const Register klass = R1;
if (UseTLAB && FastTLABRefill && id != new_instance_id) {
if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) {
// We come here when TLAB allocation failed.
// In this case we either refill TLAB or allocate directly from eden.
Label retry_tlab, try_eden, slow_case, slow_case_no_pop;
// In this case we try to allocate directly from eden.
Label slow_case, slow_case_no_pop;
// Make sure the class is fully initialized
if (id == fast_new_instance_init_check_id) {
@ -742,17 +742,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ raw_push(R4, R5, LR);
__ tlab_refill(result, obj_size, tmp1, tmp2, obj_end, try_eden, slow_case);
__ bind(retry_tlab);
__ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(result, obj_end, tmp1, obj_size, slow_case); // initializes result and obj_end
__ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2,
instanceOopDesc::header_size() * HeapWordSize, -1,
/* is_tlab_allocated */ true);
__ raw_pop_and_ret(R4, R5);
__ bind(try_eden);
__ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset()));
__ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end
__ incr_allocated_bytes(obj_size, tmp2);
@ -803,10 +792,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
const Register klass = R1;
const Register length = R2;
if (UseTLAB && FastTLABRefill) {
if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
// We come here when TLAB allocation failed.
// In this case we either refill TLAB or allocate directly from eden.
Label retry_tlab, try_eden, slow_case, slow_case_no_pop;
// In this case we try to allocate directly from eden.
Label slow_case, slow_case_no_pop;
#ifdef AARCH64
__ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length);
@ -825,40 +814,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ raw_push(R4, R5, LR);
__ tlab_refill(result, arr_size, tmp1, tmp2, tmp3, try_eden, slow_case);
__ bind(retry_tlab);
// Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size)
__ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset()));
__ mov(arr_size, MinObjAlignmentInBytesMask);
__ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift));
#ifdef AARCH64
__ lslv_w(tmp3, length, tmp1);
__ add(arr_size, arr_size, tmp3);
#else
__ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1));
#endif // AARCH64
__ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift));
__ align_reg(arr_size, arr_size, MinObjAlignmentInBytes);
// tlab_allocate initializes result and obj_end, and preserves tmp2 which contains header_size
__ tlab_allocate(result, obj_end, tmp1, arr_size, slow_case);
assert_different_registers(result, obj_end, klass, length, tmp1, tmp2);
__ initialize_header(result, klass, length, tmp1);
__ add(tmp2, result, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift));
if (!ZeroTLAB) {
__ initialize_body(tmp2, obj_end, tmp1);
}
__ membar(MacroAssembler::StoreStore, tmp1);
__ raw_pop_and_ret(R4, R5);
__ bind(try_eden);
// Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size)
__ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset()));
__ mov(arr_size, MinObjAlignmentInBytesMask);

View File

@ -1316,98 +1316,6 @@ void MacroAssembler::tlab_allocate(Register obj, Register obj_end, Register tmp1
str(obj_end, Address(Rthread, JavaThread::tlab_top_offset()));
}
void MacroAssembler::tlab_refill(Register top, Register tmp1, Register tmp2,
Register tmp3, Register tmp4,
Label& try_eden, Label& slow_case) {
if (!Universe::heap()->supports_inline_contig_alloc()) {
b(slow_case);
return;
}
InlinedAddress intArrayKlass_addr((address)Universe::intArrayKlassObj_addr());
Label discard_tlab, do_refill;
ldr(top, Address(Rthread, JavaThread::tlab_top_offset()));
ldr(tmp1, Address(Rthread, JavaThread::tlab_end_offset()));
ldr(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset()));
// Calculate amount of free space
sub(tmp1, tmp1, top);
// Retain tlab and allocate in shared space
// if the amount of free space in tlab is too large to discard
cmp(tmp2, AsmOperand(tmp1, lsr, LogHeapWordSize));
b(discard_tlab, ge);
// Increment waste limit to prevent getting stuck on this slow path
mov_slow(tmp3, ThreadLocalAllocBuffer::refill_waste_limit_increment());
add(tmp2, tmp2, tmp3);
str(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset()));
if (TLABStats) {
ldr_u32(tmp2, Address(Rthread, JavaThread::tlab_slow_allocations_offset()));
add_32(tmp2, tmp2, 1);
str_32(tmp2, Address(Rthread, JavaThread::tlab_slow_allocations_offset()));
}
b(try_eden);
bind_literal(intArrayKlass_addr);
bind(discard_tlab);
if (TLABStats) {
ldr_u32(tmp2, Address(Rthread, JavaThread::tlab_number_of_refills_offset()));
ldr_u32(tmp3, Address(Rthread, JavaThread::tlab_fast_refill_waste_offset()));
add_32(tmp2, tmp2, 1);
add_32(tmp3, tmp3, AsmOperand(tmp1, lsr, LogHeapWordSize));
str_32(tmp2, Address(Rthread, JavaThread::tlab_number_of_refills_offset()));
str_32(tmp3, Address(Rthread, JavaThread::tlab_fast_refill_waste_offset()));
}
// If tlab is currently allocated (top or end != null)
// then fill [top, end + alignment_reserve) with array object
cbz(top, do_refill);
// Set up the mark word
mov_slow(tmp2, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
str(tmp2, Address(top, oopDesc::mark_offset_in_bytes()));
// Set klass to intArrayKlass and the length to the remaining space
ldr_literal(tmp2, intArrayKlass_addr);
add(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes() -
typeArrayOopDesc::header_size(T_INT) * HeapWordSize);
Register klass = tmp2;
ldr(klass, Address(tmp2));
logical_shift_right(tmp1, tmp1, LogBytesPerInt); // divide by sizeof(jint)
str_32(tmp1, Address(top, arrayOopDesc::length_offset_in_bytes()));
store_klass(klass, top); // blows klass:
klass = noreg;
ldr(tmp1, Address(Rthread, JavaThread::tlab_start_offset()));
sub(tmp1, top, tmp1); // size of tlab's allocated portion
incr_allocated_bytes(tmp1, tmp2);
bind(do_refill);
// Refill the tlab with an eden allocation
ldr(tmp1, Address(Rthread, JavaThread::tlab_size_offset()));
logical_shift_left(tmp4, tmp1, LogHeapWordSize);
eden_allocate(top, tmp1, tmp2, tmp3, tmp4, slow_case);
str(top, Address(Rthread, JavaThread::tlab_start_offset()));
str(top, Address(Rthread, JavaThread::tlab_top_offset()));
#ifdef ASSERT
// Verify that tmp1 contains tlab_end
ldr(tmp2, Address(Rthread, JavaThread::tlab_size_offset()));
add(tmp2, top, AsmOperand(tmp2, lsl, LogHeapWordSize));
cmp(tmp1, tmp2);
breakpoint(ne);
#endif
sub(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
str(tmp1, Address(Rthread, JavaThread::tlab_end_offset()));
if (ZeroTLAB) {
// clobbers start and tmp
// top must be preserved!
add(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
ldr(tmp2, Address(Rthread, JavaThread::tlab_start_offset()));
zero_memory(tmp2, tmp1, tmp3);
}
}
// Fills memory regions [start..end] with zeroes. Clobbers `start` and `tmp` registers.
void MacroAssembler::zero_memory(Register start, Register end, Register tmp) {
Label loop;

View File

@ -359,8 +359,6 @@ public:
void tlab_allocate(Register obj, Register obj_end, Register tmp1,
RegisterOrConstant size_expression, Label& slow_case);
void tlab_refill(Register top, Register tmp1, Register tmp2, Register tmp3, Register tmp4,
Label& try_eden, Label& slow_case);
void zero_memory(Register start, Register end, Register tmp);
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp);

View File

@ -413,34 +413,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
assert(id == fast_new_instance_init_check_id, "bad StubID");
__ set_info("fast new_instance init check", dont_gc_arguments);
}
// We don't support eden allocation.
// if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
// UseTLAB && FastTLABRefill) {
// if (id == fast_new_instance_init_check_id) {
// // make sure the klass is initialized
// __ lbz(R0, in_bytes(InstanceKlass::init_state_offset()), R3_ARG1);
// __ cmpwi(CCR0, R0, InstanceKlass::fully_initialized);
// __ bne(CCR0, slow_path);
// }
//#ifdef ASSERT
// // assert object can be fast path allocated
// {
// Label ok, not_ok;
// __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R3_ARG1);
// // make sure it's an instance (LH > 0)
// __ cmpwi(CCR0, R0, 0);
// __ ble(CCR0, not_ok);
// __ testbitdi(CCR0, R0, R0, Klass::_lh_instance_slow_path_bit);
// __ beq(CCR0, ok);
//
// __ bind(not_ok);
// __ stop("assert(can be fast path allocated)");
// __ bind(ok);
// }
//#endif // ASSERT
// // We don't support eden allocation.
// __ bind(slow_path);
// }
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2);
}
break;

View File

@ -2336,9 +2336,6 @@ void MacroAssembler::tlab_allocate(
std(new_top, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
//verify_tlab(); not implemented
}
void MacroAssembler::tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case) {
unimplemented("tlab_refill");
}
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2) {
unimplemented("incr_allocated_bytes");
}

View File

@ -602,7 +602,6 @@ class MacroAssembler: public Assembler {
Register t1, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2);
enum { trampoline_stub_size = 6 * 4 };

View File

@ -346,11 +346,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ set_info("fast new_instance init check", dont_gc_arguments);
}
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
UseTLAB && FastTLABRefill) {
// Sapjvm: must call RT to generate allocation events.
}
OopMap* map = save_live_registers_except_r2(sasm);
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
oop_maps = new OopMapSet();
@ -411,10 +406,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
#endif // ASSERT
if (UseTLAB && FastTLABRefill) {
// sapjvm: must call RT to generate allocation events.
}
OopMap* map = save_live_registers_except_r2(sasm);
int call_offset;
if (id == new_type_array_id) {

View File

@ -389,7 +389,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
UseTLAB && FastTLABRefill) {
UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
Label slow_path;
Register G1_obj_size = G1;
Register G3_t1 = G3;
@ -424,25 +424,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(ok);
}
#endif // ASSERT
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
__ bind(retry_tlab);
// get the instance size
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
__ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2, /* is_tlab_allocated */ true);
__ verify_oop(O0_obj);
__ mov(O0, I0);
__ ret();
__ delayed()->restore();
__ bind(try_eden);
// If we got here then the TLAB allocation failed, so try allocating directly from eden.
// get the instance size
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
@ -508,73 +491,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
#endif // ASSERT
if (UseTLAB && FastTLABRefill) {
Label slow_path;
Register G1_arr_size = G1;
Register G3_t1 = G3;
Register O1_t2 = O1;
assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
// check that array length is small enough for fast path
__ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
__ cmp(G4_length, G3_t1);
__ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
__ delayed()->nop();
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
__ bind(retry_tlab);
// get the allocation size: (length << (layout_helper & 0x1F)) + header_size
__ ld(klass_lh, G3_t1);
__ sll(G4_length, G3_t1, G1_arr_size);
__ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
__ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
__ add(G1_arr_size, G3_t1, G1_arr_size);
__ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up
__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
__ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size
__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
__ sub(G1_arr_size, G3_t1, O1_t2); // body length
__ add(O0_obj, G3_t1, G3_t1); // body start
if (!ZeroTLAB) {
__ initialize_body(G3_t1, O1_t2);
}
__ verify_oop(O0_obj);
__ retl();
__ delayed()->nop();
__ bind(try_eden);
// get the allocation size: (length << (layout_helper & 0x1F)) + header_size
__ ld(klass_lh, G3_t1);
__ sll(G4_length, G3_t1, G1_arr_size);
__ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
__ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
__ add(G1_arr_size, G3_t1, G1_arr_size);
__ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
__ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size
__ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
__ sub(G1_arr_size, G3_t1, O1_t2); // body length
__ add(O0_obj, G3_t1, G3_t1); // body start
__ initialize_body(G3_t1, O1_t2);
__ verify_oop(O0_obj);
__ retl();
__ delayed()->nop();
__ bind(slow_path);
}
if (id == new_type_array_id) {
oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
} else {

View File

@ -3242,127 +3242,6 @@ void MacroAssembler::tlab_allocate(
verify_tlab();
}
void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
Register top = O0;
Register t1 = G1;
Register t2 = G3;
Register t3 = O1;
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
Label do_refill, discard_tlab;
if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
ba(slow_case);
delayed()->nop();
}
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
// calculate amount of free space
sub(t1, top, t1);
srl_ptr(t1, LogHeapWordSize, t1);
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
cmp(t1, t2);
brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
// increment waste limit to prevent getting stuck on this slow path
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
} else {
delayed()->nop();
// set64 does not use the temp register if the given constant is 32 bit. So
// we can just use any register; using G0 results in ignoring of the upper 32 bit
// of that value.
set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0);
add(t2, t3, t2);
}
st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
if (TLABStats) {
// increment number of slow_allocations
ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
add(t2, 1, t2);
stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
}
ba(try_eden);
delayed()->nop();
bind(discard_tlab);
if (TLABStats) {
// increment number of refills
ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
add(t2, 1, t2);
stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
// accumulate wastage
ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
add(t2, t1, t2);
stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
br_null_short(top, Assembler::pn, do_refill);
set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
// set klass to intArrayKlass
sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
st(t1, top, arrayOopDesc::length_offset_in_bytes());
set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
ld_ptr(t2, 0, t2);
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
store_klass(t2, top);
verify_oop(top);
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
sub(top, t1, t1); // size of tlab's allocated portion
incr_allocated_bytes(t1, t2, t3);
// refill the tlab with an eden allocation
bind(do_refill);
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
sll_ptr(t1, LogHeapWordSize, t1);
// allocate new tlab, address returned in top
eden_allocate(top, t1, 0, t2, t3, slow_case);
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
#ifdef ASSERT
// check that tlab_size (t1) is still valid
{
Label ok;
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
sll_ptr(t2, LogHeapWordSize, t2);
cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
STOP("assert(t1 == tlab_size)");
should_not_reach_here();
bind(ok);
}
#endif // ASSERT
add(top, t1, top); // t1 is tlab_size
sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
if (ZeroTLAB) {
// This is a fast TLAB refill, therefore the GC is not notified of it.
// So compiled code must fill the new TLAB with zeroes.
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
zero_memory(t2, t1);
}
verify_tlab();
ba(retry);
delayed()->nop();
}
void MacroAssembler::zero_memory(Register base, Register index) {
assert_different_registers(base, index);
Label loop;

View File

@ -1266,7 +1266,6 @@ public:
Register t1, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
void zero_memory(Register base, Register index);
void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
Register t1, Register t2);

View File

@ -529,12 +529,16 @@ void LIR_Assembler::return_op(LIR_Opr result) {
if (SafepointMechanism::uses_thread_local_poll()) {
#ifdef _LP64
__ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
__ relocate(relocInfo::poll_return_type);
__ testl(rax, Address(rscratch1, 0));
const Register poll_addr = rscratch1;
__ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
#else
ShouldNotReachHere();
const Register poll_addr = rbx;
assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite");
__ get_thread(poll_addr);
__ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset()));
#endif
__ relocate(relocInfo::poll_return_type);
__ testl(rax, Address(poll_addr, 0));
} else {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
@ -555,16 +559,20 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
int offset = __ offset();
if (SafepointMechanism::uses_thread_local_poll()) {
#ifdef _LP64
__ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
const Register poll_addr = rscratch1;
__ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
#else
assert(tmp->is_cpu_register(), "needed");
const Register poll_addr = tmp->as_register();
__ get_thread(poll_addr);
__ movptr(poll_addr, Address(poll_addr, in_bytes(Thread::polling_page_offset())));
#endif
add_debug_info_for_branch(info);
__ relocate(relocInfo::poll_type);
address pre_pc = __ pc();
__ testl(rax, Address(rscratch1, 0));
__ testl(rax, Address(poll_addr, 0));
address post_pc = __ pc();
guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
#else
ShouldNotReachHere();
#endif
guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
} else {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
if (Assembler::is_polling_page_far()) {

View File

@ -143,6 +143,7 @@ bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
LIR_Opr LIRGenerator::safepoint_poll_register() {
NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } )
return LIR_OprFact::illegalOpr;
}
@ -1515,7 +1516,7 @@ void LIRGenerator::do_If(If* x) {
if (x->is_safepoint()) {
// increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
}
set_no_result(x);

View File

@ -994,8 +994,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ set_info("fast new_instance init check", dont_gc_arguments);
}
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
UseTLAB && FastTLABRefill) {
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB
&& Universe::heap()->supports_inline_contig_alloc()) {
Label slow_path;
Register obj_size = rcx;
Register t1 = rbx;
@ -1030,21 +1030,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
const Register thread =
__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
__ bind(retry_tlab);
// get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
__ verify_oop(obj);
__ pop(rbx);
__ pop(rdi);
__ ret(0);
const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
NOT_LP64(__ get_thread(thread));
__ bind(try_eden);
// get the instance size (size is postive so movl is fine for 64bit)
@ -1128,54 +1115,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
#endif // ASSERT
if (UseTLAB && FastTLABRefill) {
// If we got here, the TLAB allocation failed, so try allocating from
// eden if inline contiguous allocations are supported.
if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
Register arr_size = rsi;
Register t1 = rcx; // must be rcx for use as shift count
Register t2 = rdi;
Label slow_path;
assert_different_registers(length, klass, obj, arr_size, t1, t2);
// check that array length is small enough for fast path.
__ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
__ jcc(Assembler::above, slow_path);
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
const Register thread =
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
__ bind(retry_tlab);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
__ shlptr(arr_size /* by t1=rcx, mod 32 */);
__ shrptr(t1, Klass::_lh_header_size_shift);
__ andptr(t1, Klass::_lh_header_size_mask);
__ addptr(arr_size, t1);
__ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
__ andptr(arr_size, ~MinObjAlignmentInBytesMask);
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
__ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask);
__ subptr(arr_size, t1); // body length
__ addptr(t1, obj); // body start
if (!ZeroTLAB) {
__ initialize_body(t1, arr_size, 0, t2);
}
__ verify_oop(obj);
__ ret(0);
__ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
@ -1190,6 +1137,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ andptr(arr_size, ~MinObjAlignmentInBytesMask);
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
// Using t2 for non 64-bit.
const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);
NOT_LP64(__ get_thread(thread));
__ incr_allocated_bytes(thread, arr_size, 0);
__ initialize_header(obj, klass, length, t1, t2);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,9 +65,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA
#endif
#ifdef _LP64
// X64 have implemented the local polling
#define THREAD_LOCAL_POLL
#endif
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -97,9 +97,10 @@ define_pd_global(bool, PreserveFramePointer, false);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
#ifdef _LP64
#if defined(_LP64) || defined(_WINDOWS)
define_pd_global(bool, ThreadLocalHandshakes, true);
#else
// get_thread() is slow on linux 32 bit, therefore off by default
define_pd_global(bool, ThreadLocalHandshakes, false);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -830,13 +830,12 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
if (verifyoop) {
verify_oop(rax, state);
}
#ifdef _LP64
Label no_safepoint, dispatch;
address* const safepoint_table = Interpreter::safept_table(state);
#ifdef _LP64
Label no_safepoint, dispatch;
if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
jccb(Assembler::zero, no_safepoint);
@ -851,9 +850,23 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
#else
Address index(noreg, rbx, Address::times_ptr);
ExternalAddress tbl((address)table);
ArrayAddress dispatch(tbl, index);
jump(dispatch);
if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
Label no_safepoint;
const Register thread = rcx;
get_thread(thread);
testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
jccb(Assembler::zero, no_safepoint);
ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
jump(dispatch_addr);
bind(no_safepoint);
}
{
ArrayAddress dispatch_addr(ExternalAddress((address)table), index);
jump(dispatch_addr);
}
#endif // _LP64
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3767,10 +3767,17 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp) {
movl(as_Address(ArrayAddress(page, index)), tmp);
}
#ifdef _LP64
void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) {
if (SafepointMechanism::uses_thread_local_poll()) {
testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
#ifdef _LP64
assert(thread_reg == r15_thread, "should be");
#else
if (thread_reg == noreg) {
thread_reg = temp_reg;
get_thread(thread_reg);
}
#endif
testb(Address(thread_reg, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
} else {
cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
@ -3778,13 +3785,6 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Regis
jcc(Assembler::notEqual, slow_path);
}
}
#else
void MacroAssembler::safepoint_poll(Label& slow_path) {
cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
jcc(Assembler::notEqual, slow_path);
}
#endif
// Calls to C land
//
@ -5604,121 +5604,6 @@ void MacroAssembler::tlab_allocate(Register obj,
verify_tlab();
}
// Preserves rbx, and rdx.
Register MacroAssembler::tlab_refill(Label& retry,
Label& try_eden,
Label& slow_case) {
Register top = rax;
Register t1 = rcx; // object size
Register t2 = rsi;
Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
Label do_refill, discard_tlab;
if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
jmp(slow_case);
}
NOT_LP64(get_thread(thread_reg));
movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
// calculate amount of free space
subptr(t1, top);
shrptr(t1, LogHeapWordSize);
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
jcc(Assembler::lessEqual, discard_tlab);
// Retain
// %%% yuck as movptr...
movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
if (TLABStats) {
// increment number of slow_allocations
addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
}
jmp(try_eden);
bind(discard_tlab);
if (TLABStats) {
// increment number of refills
addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
// accumulate wastage -- t1 is amount free in tlab
addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
testptr(top, top);
jcc(Assembler::zero, do_refill);
// set up the mark word
movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
// set the length to the remaining space
subptr(t1, typeArrayOopDesc::header_size(T_INT));
addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
// set klass to intArrayKlass
// dubious reloc why not an oop reloc?
movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
store_klass(top, t1);
movptr(t1, top);
subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
incr_allocated_bytes(thread_reg, t1, 0);
// refill the tlab with an eden allocation
bind(do_refill);
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
shlptr(t1, LogHeapWordSize);
// allocate new tlab, address returned in top
eden_allocate(top, t1, 0, t2, slow_case);
// Check that t1 was preserved in eden_allocate.
#ifdef ASSERT
if (UseTLAB) {
Label ok;
Register tsize = rsi;
assert_different_registers(tsize, thread_reg, t1);
push(tsize);
movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
shlptr(tsize, LogHeapWordSize);
cmpptr(t1, tsize);
jcc(Assembler::equal, ok);
STOP("assert(t1 != tlab size)");
should_not_reach_here();
bind(ok);
pop(tsize);
}
#endif
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
addptr(top, t1);
subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
if (ZeroTLAB) {
// This is a fast TLAB refill, therefore the GC is not notified of it.
// So compiled code must fill the new TLAB with zeroes.
movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
zero_memory(top, t1, 0, t2);
}
verify_tlab();
jmp(retry);
return thread_reg; // for use by caller
}
// Preserves the contents of address, destroys the contents length_in_bytes and temp.
void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) {
assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -531,7 +531,6 @@ class MacroAssembler: public Assembler {
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
void incr_allocated_bytes(Register thread,
@ -657,11 +656,9 @@ class MacroAssembler: public Assembler {
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp);
#ifdef _LP64
// If thread_reg is != noreg the code assumes the register passed contains
// the thread (required on 64 bit).
void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
#else
void safepoint_poll(Label& slow_path);
#endif
void verify_tlab();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -704,14 +704,18 @@ inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); }
inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64
if (SafepointMechanism::uses_thread_local_poll()) {
#ifdef AMD64
const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
const int test_offset = has_rex_prefix ? 1 : 0;
#else
const int test_offset = 0;
#endif
const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
return is_test_opcode && is_rax_target;
}
#ifdef AMD64
// Try decoding a near safepoint first:
if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
ubyte_at(1) == 0x05) { // 00 rax 101

View File

@ -2111,16 +2111,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label after_transition;
// check for safepoint operation in progress and/or pending suspend requests
{ Label Continue;
{ Label Continue, slow_path;
__ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ safepoint_poll(slow_path, thread, noreg);
Label L;
__ jcc(Assembler::notEqual, L);
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue);
__ bind(L);
__ bind(slow_path);
// Don't use call_VM as it will see a possible pending exception and forward it
// and never return here preventing us from clearing _last_native_pc down below.
@ -2996,8 +2993,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// if this was not a poll_return then we need to correct the return address now.
if (!cause_return) {
__ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
__ movptr(Address(rbp, wordSize), rax);
// Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
// Additionally, rbx is a callee saved register and we can look at it later to determine
// if someone changed the return address for us!
__ movptr(rbx, Address(java_thread, JavaThread::saved_exception_pc_offset()));
__ movptr(Address(rbp, wordSize), rbx);
}
// do the call
@ -3029,11 +3029,63 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
__ bind(noException);
Label no_adjust, bail, not_special;
if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
// If our stashed return pc was modified by the runtime we avoid touching it
__ cmpptr(rbx, Address(rbp, wordSize));
__ jccb(Assembler::notEqual, no_adjust);
// Skip over the poll instruction.
// See NativeInstruction::is_safepoint_poll()
// Possible encodings:
// 85 00 test %eax,(%rax)
// 85 01 test %eax,(%rcx)
// 85 02 test %eax,(%rdx)
// 85 03 test %eax,(%rbx)
// 85 06 test %eax,(%rsi)
// 85 07 test %eax,(%rdi)
//
// 85 04 24 test %eax,(%rsp)
// 85 45 00 test %eax,0x0(%rbp)
#ifdef ASSERT
__ movptr(rax, rbx); // remember where 0x85 should be, for verification below
#endif
// rsp/rbp base encoding takes 3 bytes with the following register values:
// rsp 0x04
// rbp 0x05
__ movzbl(rcx, Address(rbx, 1));
__ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
__ subptr(rcx, 4); // looking for 0x00 .. 0x01
__ cmpptr(rcx, 1);
__ jcc(Assembler::above, not_special);
__ addptr(rbx, 1);
__ bind(not_special);
#ifdef ASSERT
// Verify the correct encoding of the poll we're about to skip.
__ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
__ jcc(Assembler::notEqual, bail);
// Mask out the modrm bits
__ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
// rax encodes to 0, so if the bits are nonzero it's incorrect
__ jcc(Assembler::notZero, bail);
#endif
// Adjust return pc forward to step over the safepoint poll instruction
__ addptr(rbx, 2);
__ movptr(Address(rbp, wordSize), rbx);
}
__ bind(no_adjust);
// Normal exit, register restoring and exit
RegisterSaver::restore_live_registers(masm, save_vectors);
__ ret(0);
#ifdef ASSERT
__ bind(bail);
__ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
#endif
// make sure all code is generated
masm->flush();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -303,45 +303,45 @@ ALIGNED_(64) juint StubRoutines::x86::_k256_W[2*sizeof(StubRoutines::x86::_k256)
// used in MacroAssembler::sha512_AVX2
ALIGNED_(64) julong StubRoutines::x86::_k512_W[] =
{
0x428a2f98d728ae22LL, 0x7137449123ef65cdLL,
0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL,
0x3956c25bf348b538LL, 0x59f111f1b605d019LL,
0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL,
0xd807aa98a3030242LL, 0x12835b0145706fbeLL,
0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL,
0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL,
0x9bdc06a725c71235LL, 0xc19bf174cf692694LL,
0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL,
0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL,
0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL,
0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL,
0x983e5152ee66dfabLL, 0xa831c66d2db43210LL,
0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL,
0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL,
0x06ca6351e003826fLL, 0x142929670a0e6e70LL,
0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL,
0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL,
0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL,
0x81c2c92e47edaee6LL, 0x92722c851482353bLL,
0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL,
0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL,
0xd192e819d6ef5218LL, 0xd69906245565a910LL,
0xf40e35855771202aLL, 0x106aa07032bbd1b8LL,
0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL,
0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL,
0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL,
0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL,
0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL,
0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL,
0x90befffa23631e28LL, 0xa4506cebde82bde9LL,
0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL,
0xca273eceea26619cLL, 0xd186b8c721c0c207LL,
0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL,
0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL,
0x113f9804bef90daeLL, 0x1b710b35131c471bLL,
0x28db77f523047d84LL, 0x32caab7b40c72493LL,
0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL,
0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL,
0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL,
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
};
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1148,7 +1148,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label slow_path;
#ifndef _LP64
__ safepoint_poll(slow_path);
__ safepoint_poll(slow_path, thread, noreg);
#else
__ safepoint_poll(slow_path, r15_thread, rscratch1);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,10 +61,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
__ safepoint_poll(slow_path, noreg, rdi);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
@ -113,10 +110,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
__ safepoint_poll(slow_path, noreg, rdi);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2692,11 +2692,16 @@ void TemplateTable::_return(TosState state) {
__ bind(skip_register_finalizer);
}
#ifdef _LP64
if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
Label no_safepoint;
NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
#ifdef _LP64
__ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
#else
const Register thread = rdi;
__ get_thread(thread);
__ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
#endif
__ jcc(Assembler::zero, no_safepoint);
__ push(state);
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -2704,7 +2709,6 @@ void TemplateTable::_return(TosState state) {
__ pop(state);
__ bind(no_safepoint);
}
#endif
// Narrow result if state is itos but result type is smaller.
// Need to narrow in the return bytecode rather than in generate_return_entry

View File

@ -317,7 +317,7 @@ int MachCallRuntimeNode::ret_addr_offset() {
// Indicate if the safepoint node needs the polling page as an input.
// Since x86 does have absolute addressing, it doesn't.
bool SafePointNode::needs_polling_address_input() {
return false;
return SafepointMechanism::uses_thread_local_poll();
}
//
@ -706,34 +706,25 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
}
if (do_polling() && C->is_method_compilation()) {
cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
emit_opcode(cbuf,0x85);
emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
emit_d32(cbuf, (intptr_t)os::get_polling_page());
if (SafepointMechanism::uses_thread_local_poll()) {
Register pollReg = as_Register(EBX_enc);
MacroAssembler masm(&cbuf);
masm.get_thread(pollReg);
masm.movl(pollReg, Address(pollReg, in_bytes(Thread::polling_page_offset())));
masm.relocate(relocInfo::poll_return_type);
masm.testl(rax, Address(pollReg, 0));
} else {
cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
emit_opcode(cbuf,0x85);
emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
emit_d32(cbuf, (intptr_t)os::get_polling_page());
}
}
}
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
// If method set FPU control word, restore to standard control word
int size = C->in_24_bit_fp_mode() ? 6 : 0;
if (C->max_vector_size() > 16) size += 3; // vzeroupper
if (do_polling() && C->is_method_compilation()) size += 6;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;
size++; // popl rbp,
if (framesize >= 128) {
size += 6;
} else {
size += framesize ? 3 : 0;
}
size += 64; // added to support ReservedStackAccess
return size;
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
int MachEpilogNode::reloc() const {
@ -13336,6 +13327,7 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
// ============================================================================
// Safepoint Instruction
instruct safePoint_poll(eFlagsReg cr) %{
predicate(SafepointMechanism::uses_global_page_poll());
match(SafePoint);
effect(KILL cr);
@ -13354,6 +13346,25 @@ instruct safePoint_poll(eFlagsReg cr) %{
ins_pipe( ialu_reg_mem );
%}
instruct safePoint_poll_tls(eFlagsReg cr, eRegP_no_EBP poll) %{
predicate(SafepointMechanism::uses_thread_local_poll());
match(SafePoint poll);
effect(KILL cr, USE poll);
format %{ "TSTL #EAX,[$poll]\t! Safepoint: poll for GC" %}
ins_cost(125);
// EBP would need size(3)
size(2); /* setting an explicit size will cause debug builds to assert if size is incorrect */
ins_encode %{
__ relocate(relocInfo::poll_type);
address pre_pc = __ pc();
__ testl(rax, Address($poll$$Register, 0));
address post_pc = __ pc();
guarantee(pre_pc[0] == 0x85, "must emit test-ax [reg]");
%}
ins_pipe(ialu_reg_mem);
%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.

View File

@ -27,6 +27,7 @@
#ifdef __APPLE__
#include "jvm.h"
#include "decoder_machO.hpp"
#include "memory/allocation.inline.hpp"
#include <cxxabi.h>
#include <mach-o/loader.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,6 +62,11 @@
product(bool, UseContainerSupport, true, \
"Enable detection and runtime container configuration support") \
\
product(bool, PreferContainerQuotaForCPUCount, true, \
"Calculate the container CPU availability based on the value" \
" of quotas (if set), when true. Otherwise, use the CPU" \
" shares value, provided it is less than quota.") \
\
diagnostic(bool, UseCpuAllocPath, false, \
"Use CPU_ALLOC code path in os::active_processor_count ")

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -499,11 +499,11 @@ jlong OSContainer::memory_max_usage_in_bytes() {
/* active_processor_count
*
* Calculate an appropriate number of active processors for the
* VM to use based on these three cgroup options.
* VM to use based on these three inputs.
*
* cpu affinity
* cpu quota & cpu period
* cpu shares
* cgroup cpu quota & cpu period
* cgroup cpu shares
*
* Algorithm:
*
@ -513,42 +513,61 @@ jlong OSContainer::memory_max_usage_in_bytes() {
* required CPUs by dividing quota by period.
*
* If shares are in effect (shares != -1), calculate the number
* of cpus required for the shares by dividing the share value
* of CPUs required for the shares by dividing the share value
* by PER_CPU_SHARES.
*
* All results of division are rounded up to the next whole number.
*
* Return the smaller number from the three different settings.
* If neither shares or quotas have been specified, return the
* number of active processors in the system.
*
* If both shares and quotas have been specified, the results are
* based on the flag PreferContainerQuotaForCPUCount. If true,
* return the quota value. If false return the smallest value
* between shares or quotas.
*
* If shares and/or quotas have been specified, the resulting number
* returned will never exceed the number of active processors.
*
* return:
* number of cpus
* OSCONTAINER_ERROR if failure occured during extract of cpuset info
* number of CPUs
*/
int OSContainer::active_processor_count() {
int cpu_count, share_count, quota_count;
int share, quota, period;
int quota_count = 0, share_count = 0;
int cpu_count, limit_count;
int result;
cpu_count = os::Linux::active_processor_count();
cpu_count = limit_count = os::Linux::active_processor_count();
int quota = cpu_quota();
int period = cpu_period();
int share = cpu_shares();
share = cpu_shares();
if (share > -1) {
share_count = ceilf((float)share / (float)PER_CPU_SHARES);
log_trace(os, container)("cpu_share count: %d", share_count);
} else {
share_count = cpu_count;
}
quota = cpu_quota();
period = cpu_period();
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
log_trace(os, container)("quota_count: %d", quota_count);
} else {
quota_count = cpu_count;
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
}
if (share > -1) {
share_count = ceilf((float)share / (float)PER_CPU_SHARES);
log_trace(os, container)("CPU Share count based on shares: %d", share_count);
}
result = MIN2(cpu_count, MIN2(share_count, quota_count));
// If both shares and quotas are setup results depend
// on flag PreferContainerQuotaForCPUCount.
// If true, limit CPU count to quota
// If false, use minimum of shares and quotas
if (quota_count !=0 && share_count != 0) {
if (PreferContainerQuotaForCPUCount) {
limit_count = quota_count;
} else {
limit_count = MIN2(quota_count, share_count);
}
} else if (quota_count != 0) {
limit_count = quota_count;
} else if (share_count != 0) {
limit_count = share_count;
}
result = MIN2(cpu_count, limit_count);
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
return result;
}

View File

@ -979,11 +979,6 @@ void os::shutdown() {
}
static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
PMINIDUMP_EXCEPTION_INFORMATION,
PMINIDUMP_USER_STREAM_INFORMATION,
PMINIDUMP_CALLBACK_INFORMATION);
static HANDLE dumpFile = NULL;
// Check if dump file can be created.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
#include <windef.h>
#include <windows.h>
class WindowsSemaphore : public CHeapObj<mtInternal> {
HANDLE _semaphore;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef _WIN64
// These are copied defines from fdlibm.h, this allows us to keep the code

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "utilities/globalDefinitions.hpp"
#include "symbolengine.hpp"
#include "utilities/debug.hpp"
#include "utilities/ostream.hpp"
#include "windbghelp.hpp"
#include <windows.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,8 @@
*
*/
#ifndef OS_WINDOWS_VM_DBGHELPLOADER_HPP
#define OS_WINDOWS_VM_DBGHELPLOADER_HPP
#ifndef OS_WINDOWS_WINDBGHELP_HPP
#define OS_WINDOWS_WINDBGHELP_HPP
#include <windows.h>
#include <imagehlp.h>
@ -71,6 +71,5 @@ namespace WindowsDbgHelp {
};
#endif // OS_WINDOWS_VM_DBGHELPLOADER_HPP
#endif // OS_WINDOWS_WINDBGHELP_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,6 +69,11 @@ class AllStatic {
//------------------------------Chunk------------------------------------------
// Linked list of raw memory chunks
class Chunk: public CHeapObj {
private:
// This ordinary operator delete is needed even though not used, so the
// below two-argument operator delete will be treated as a placement
// delete rather than an ordinary sized delete; see C++14 3.7.4.2/p2.
void operator delete(void* p);
public:
void* operator new(size_t size, size_t length) throw();
void operator delete(void* p, size_t length);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,7 +194,7 @@ void AOTLib::verify_config() {
}
AOTLib::~AOTLib() {
free((void*) _name);
os::free((void*) _name);
}
AOTCodeHeap::~AOTCodeHeap() {
@ -207,7 +207,7 @@ AOTCodeHeap::~AOTCodeHeap() {
}
AOTLib::AOTLib(void* handle, const char* name, int dso_id) : _valid(true), _dl_handle(handle), _dso_id(dso_id) {
_name = (const char*) strdup(name);
_name = (const char*) os::strdup(name);
// Verify that VM runs with the same parameters as AOT tool.
_config = (AOTConfiguration*) load_symbol("A.config");

View File

@ -47,6 +47,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"

View File

@ -30,7 +30,7 @@
#include "ci/ciUtilities.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.hpp"
#include "oops/typeArrayOop.inline.hpp"
// ciArray
//

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciTypeArray.hpp"
#include "ci/ciUtilities.hpp"
#include "oops/typeArrayOop.inline.hpp"
// ciTypeArray
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,7 +137,6 @@ PerfCounter* ClassLoader::_sync_JVMFindLoadedClassLockFreeCounter = NULL;
PerfCounter* ClassLoader::_sync_JVMDefineClassLockFreeCounter = NULL;
PerfCounter* ClassLoader::_sync_JNIDefineClassLockFreeCounter = NULL;
PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = NULL;
PerfCounter* ClassLoader::_isUnsyncloadClass = NULL;
PerfCounter* ClassLoader::_load_instance_class_failCounter = NULL;
GrowableArray<ModuleClassPathList*>* ClassLoader::_patch_mod_entries = NULL;
@ -1642,9 +1641,6 @@ void ClassLoader::initialize() {
// of the bug fix of 6365597. They are mainly focused on finding out
// the behavior of system & user-defined classloader lock, whether
// ClassLoader.loadClass/findClass is being called synchronized or not.
// Also two additional counters are created to see whether 'UnsyncloadClass'
// flag is being set or not and how many times load_instance_class call
// fails with linkageError etc.
NEWPERFEVENTCOUNTER(_sync_systemLoaderLockContentionRate, SUN_CLS,
"systemLoaderLockContentionRate");
NEWPERFEVENTCOUNTER(_sync_nonSystemLoaderLockContentionRate, SUN_CLS,
@ -1660,14 +1656,8 @@ void ClassLoader::initialize() {
NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS,
"unsafeDefineClassCalls");
NEWPERFEVENTCOUNTER(_isUnsyncloadClass, SUN_CLS, "isUnsyncloadClassSet");
NEWPERFEVENTCOUNTER(_load_instance_class_failCounter, SUN_CLS,
"loadInstanceClassFailRate");
// increment the isUnsyncloadClass counter if UnsyncloadClass is set.
if (UnsyncloadClass) {
_isUnsyncloadClass->inc();
}
}
// lookup zip library entry points

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -207,7 +207,6 @@ class ClassLoader: AllStatic {
static PerfCounter* _sync_JNIDefineClassLockFreeCounter;
static PerfCounter* _unsafe_defineClassCallCounter;
static PerfCounter* _isUnsyncloadClass;
static PerfCounter* _load_instance_class_failCounter;
// The boot class path consists of 3 ordered pieces:

View File

@ -1001,9 +1001,8 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
if (!is_anonymous) {
ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
// First, Atomically set it
ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL);
ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
if (old != NULL) {
delete cld;
// Returns the data.

View File

@ -31,6 +31,7 @@
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/handles.inline.hpp"
bool JavaAssertions::_userDefault = false;

View File

@ -46,7 +46,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "oops/typeArrayOop.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
@ -3403,7 +3403,7 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
intptr_t* vmdeps_addr = (intptr_t*)call_site->address_field_addr(_vmdependencies_offset);
intptr_t* vmdeps_addr = (intptr_t*)call_site->field_addr(_vmdependencies_offset);
DependencyContext dep_ctx(vmdeps_addr);
return dep_ctx;
}
@ -3458,13 +3458,14 @@ int java_lang_ClassLoader::parallelCapable_offset = -1;
int java_lang_ClassLoader::name_offset = -1;
int java_lang_ClassLoader::unnamedModule_offset = -1;
ClassLoaderData** java_lang_ClassLoader::loader_data_addr(oop loader) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
return (ClassLoaderData**) loader->address_field_addr(_loader_data_offset);
ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
return HeapAccess<>::load_at(loader, _loader_data_offset);
}
ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) {
return *java_lang_ClassLoader::loader_data_addr(loader);
ClassLoaderData* java_lang_ClassLoader::cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data);
}
void java_lang_ClassLoader::compute_offsets() {

View File

@ -881,15 +881,15 @@ class java_lang_ref_Reference: AllStatic {
static inline oop referent(oop ref);
static inline void set_referent(oop ref, oop value);
static inline void set_referent_raw(oop ref, oop value);
static inline HeapWord* referent_addr(oop ref);
static inline HeapWord* referent_addr_raw(oop ref);
static inline oop next(oop ref);
static inline void set_next(oop ref, oop value);
static inline void set_next_raw(oop ref, oop value);
static inline HeapWord* next_addr(oop ref);
static inline HeapWord* next_addr_raw(oop ref);
static inline oop discovered(oop ref);
static inline void set_discovered(oop ref, oop value);
static inline void set_discovered_raw(oop ref, oop value);
static inline HeapWord* discovered_addr(oop ref);
static inline HeapWord* discovered_addr_raw(oop ref);
static bool is_referent_field(oop obj, ptrdiff_t offset);
static inline bool is_phantom(oop ref);
};
@ -1229,8 +1229,8 @@ class java_lang_ClassLoader : AllStatic {
public:
static void compute_offsets();
static ClassLoaderData** loader_data_addr(oop loader);
static ClassLoaderData* loader_data(oop loader);
static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data);
static oop parent(oop loader);
static oop name(oop loader);

View File

@ -100,8 +100,8 @@ void java_lang_ref_Reference::set_referent(oop ref, oop value) {
void java_lang_ref_Reference::set_referent_raw(oop ref, oop value) {
ref->obj_field_put_raw(referent_offset, value);
}
HeapWord* java_lang_ref_Reference::referent_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(referent_offset);
HeapWord* java_lang_ref_Reference::referent_addr_raw(oop ref) {
return ref->obj_field_addr_raw<HeapWord>(referent_offset);
}
oop java_lang_ref_Reference::next(oop ref) {
return ref->obj_field(next_offset);
@ -112,8 +112,8 @@ void java_lang_ref_Reference::set_next(oop ref, oop value) {
void java_lang_ref_Reference::set_next_raw(oop ref, oop value) {
ref->obj_field_put_raw(next_offset, value);
}
HeapWord* java_lang_ref_Reference::next_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(next_offset);
HeapWord* java_lang_ref_Reference::next_addr_raw(oop ref) {
return ref->obj_field_addr_raw<HeapWord>(next_offset);
}
oop java_lang_ref_Reference::discovered(oop ref) {
return ref->obj_field(discovered_offset);
@ -124,8 +124,8 @@ void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
void java_lang_ref_Reference::set_discovered_raw(oop ref, oop value) {
ref->obj_field_put_raw(discovered_offset, value);
}
HeapWord* java_lang_ref_Reference::discovered_addr(oop ref) {
return ref->obj_field_addr<HeapWord>(discovered_offset);
HeapWord* java_lang_ref_Reference::discovered_addr_raw(oop ref) {
return ref->obj_field_addr_raw<HeapWord>(discovered_offset);
}
bool java_lang_ref_Reference::is_phantom(oop ref) {
return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,7 @@
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/diagnosticCommand.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -106,7 +106,6 @@ InstanceKlass* SystemDictionary::_box_klasses[T_VOID+1] = { NULL /*,
oop SystemDictionary::_java_system_loader = NULL;
oop SystemDictionary::_java_platform_loader = NULL;
bool SystemDictionary::_has_loadClassInternal = false;
bool SystemDictionary::_has_checkPackageAccess = false;
// lazily initialized klass variables
@ -159,7 +158,7 @@ ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
// Parallel class loading check
bool SystemDictionary::is_parallelCapable(Handle class_loader) {
if (UnsyncloadClass || class_loader.is_null()) return true;
if (class_loader.is_null()) return true;
if (AlwaysLockClassLoader) return false;
return java_lang_ClassLoader::parallelCapable(class_loader());
}
@ -503,8 +502,7 @@ void SystemDictionary::validate_protection_domain(InstanceKlass* klass,
//
// We only get here if
// 1) custom classLoader, i.e. not bootstrap classloader
// 2) UnsyncloadClass not set
// 3) custom classLoader has broken the class loader objectLock
// 2) custom classLoader has broken the class loader objectLock
// so another thread got here in parallel
//
// lockObject must be held.
@ -594,7 +592,6 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load(
} else {
placeholder = placeholders()->get_entry(p_index, p_hash, name, loader_data);
if (placeholder && placeholder->super_load_in_progress() ){
// Before UnsyncloadClass:
// We only get here if the application has released the
// classloader lock when another thread was in the middle of loading a
// superclass/superinterface for this class, and now
@ -687,9 +684,9 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// defining the class in parallel by accident.
// This lock must be acquired here so the waiter will find
// any successful result in the SystemDictionary and not attempt
// the define
// ParallelCapable Classloaders and the bootstrap classloader,
// or all classloaders with UnsyncloadClass do not acquire lock here
// the define.
// ParallelCapable Classloaders and the bootstrap classloader
// do not acquire lock here.
bool DoObjectLock = true;
if (is_parallelCapable(class_loader)) {
DoObjectLock = false;
@ -765,14 +762,11 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// and that lock is still held when calling classloader's loadClass.
// For these classloaders, we ensure that the first requestor
// completes the load and other requestors wait for completion.
// case 3. UnsyncloadClass - don't use objectLocker
// With this flag, we allow parallel classloading of a
// class/classloader pair
// case4. Bootstrap classloader - don't own objectLocker
// case 3. Bootstrap classloader - don't own objectLocker
// This classloader supports parallelism at the classloader level,
// but only allows a single load of a class/classloader pair.
// No performance benefit and no deadlock issues.
// case 5. parallelCapable user level classloaders - without objectLocker
// case 4. parallelCapable user level classloaders - without objectLocker
// Allow parallel classloading of a class/classloader pair
{
@ -788,7 +782,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// case 1: traditional: should never see load_in_progress.
while (!class_has_been_loaded && oldprobe && oldprobe->instance_load_in_progress()) {
// case 4: bootstrap classloader: prevent futile classloading,
// case 3: bootstrap classloader: prevent futile classloading,
// wait on first requestor
if (class_loader.is_null()) {
SystemDictionary_lock->wait();
@ -811,7 +805,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
}
}
// All cases: add LOAD_INSTANCE holding SystemDictionary_lock
// case 3: UnsyncloadClass || case 5: parallelCapable: allow competing threads to try
// case 4: parallelCapable: allow competing threads to try
// LOAD_INSTANCE in parallel
if (!throw_circularity_error && !class_has_been_loaded) {
@ -844,28 +838,6 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// Do actual loading
k = load_instance_class(name, class_loader, THREAD);
// For UnsyncloadClass only
// If they got a linkageError, check if a parallel class load succeeded.
// If it did, then for bytecode resolution the specification requires
// that we return the same result we did for the other thread, i.e. the
// successfully loaded InstanceKlass
// Should not get here for classloaders that support parallelism
// with the new cleaner mechanism, even with AllowParallelDefineClass
// Bootstrap goes through here to allow for an extra guarantee check
if (UnsyncloadClass || (class_loader.is_null())) {
if (k == NULL && HAS_PENDING_EXCEPTION
&& PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
MutexLocker mu(SystemDictionary_lock, THREAD);
InstanceKlass* check = find_class(d_hash, name, dictionary);
if (check != NULL) {
// Klass is already loaded, so just use it
k = check;
CLEAR_PENDING_EXCEPTION;
guarantee((!class_loader.is_null()), "dup definition for bootstrap loader?");
}
}
}
// If everything was OK (no exceptions, no null return value), and
// class_loader is NOT the defining loader, do a little more bookkeeping.
if (!HAS_PENDING_EXCEPTION && k != NULL &&
@ -1097,7 +1069,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
HandleMark hm(THREAD);
// Classloaders that support parallelism, e.g. bootstrap classloader,
// or all classloaders with UnsyncloadClass do not acquire lock here
// do not acquire lock here
bool DoObjectLock = true;
if (is_parallelCapable(class_loader)) {
DoObjectLock = false;
@ -1556,40 +1528,17 @@ InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle
InstanceKlass* spec_klass = SystemDictionary::ClassLoader_klass();
// Call public unsynchronized loadClass(String) directly for all class loaders
// for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
// Call public unsynchronized loadClass(String) directly for all class loaders.
// For parallelCapable class loaders, JDK >=7, loadClass(String, boolean) will
// acquire a class-name based lock rather than the class loader object lock.
// JDK < 7 already acquire the class loader lock in loadClass(String, boolean),
// so the call to loadClassInternal() was not required.
//
// UnsyncloadClass flag means both call loadClass(String) and do
// not acquire the class loader lock even for class loaders that are
// not parallelCapable. This was a risky transitional
// flag for diagnostic purposes only. It is risky to call
// custom class loaders without synchronization.
// WARNING If a custom class loader does NOT synchronizer findClass, or callers of
// findClass, the UnsyncloadClass flag risks unexpected timing bugs in the field.
// Do NOT assume this will be supported in future releases.
//
// Added MustCallLoadClassInternal in case we discover in the field
// a customer that counts on this call
if (MustCallLoadClassInternal && has_loadClassInternal()) {
JavaCalls::call_special(&result,
class_loader,
spec_klass,
vmSymbols::loadClassInternal_name(),
vmSymbols::string_class_signature(),
string,
CHECK_NULL);
} else {
JavaCalls::call_virtual(&result,
class_loader,
spec_klass,
vmSymbols::loadClass_name(),
vmSymbols::string_class_signature(),
string,
CHECK_NULL);
}
// JDK < 7 already acquire the class loader lock in loadClass(String, boolean).
JavaCalls::call_virtual(&result,
class_loader,
spec_klass,
vmSymbols::loadClass_name(),
vmSymbols::string_class_signature(),
string,
CHECK_NULL);
assert(result.get_type() == T_OBJECT, "just checking");
oop obj = (oop) result.get_jobject();
@ -1718,7 +1667,7 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam
{
MutexLocker mu(SystemDictionary_lock, THREAD);
// First check if class already defined
if (UnsyncloadClass || (is_parallelDefine(class_loader))) {
if (is_parallelDefine(class_loader)) {
InstanceKlass* check = find_class(d_hash, name_h, dictionary);
if (check != NULL) {
return check;
@ -1737,7 +1686,7 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam
// Only special cases allow parallel defines and can use other thread's results
// Other cases fall through, and may run into duplicate defines
// caught by finding an entry in the SystemDictionary
if ((UnsyncloadClass || is_parallelDefine(class_loader)) && (probe->instance_klass() != NULL)) {
if (is_parallelDefine(class_loader) && (probe->instance_klass() != NULL)) {
placeholders()->find_and_remove(p_index, p_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD);
SystemDictionary_lock->notify_all();
#ifdef ASSERT
@ -2174,10 +2123,6 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
//_box_klasses[T_OBJECT] = WK_KLASS(object_klass);
//_box_klasses[T_ARRAY] = WK_KLASS(object_klass);
{ // Compute whether we should use loadClass or loadClassInternal when loading classes.
Method* method = InstanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
_has_loadClassInternal = (method != NULL);
}
{ // Compute whether we should use checkPackageAccess or NOT
Method* method = InstanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
_has_checkPackageAccess = (method != NULL);

View File

@ -467,9 +467,6 @@ public:
static void load_abstract_ownable_synchronizer_klass(TRAPS);
protected:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }
// Returns the class loader data to be used when looking up/updating the
// system dictionary.
static ClassLoaderData *class_loader_data(Handle class_loader) {
@ -746,7 +743,6 @@ protected:
static oop _java_system_loader;
static oop _java_platform_loader;
static bool _has_loadClassInternal;
static bool _has_checkPackageAccess;
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"

View File

@ -360,7 +360,6 @@
template(run_finalization_name, "runFinalization") \
template(dispatchUncaughtException_name, "dispatchUncaughtException") \
template(loadClass_name, "loadClass") \
template(loadClassInternal_name, "loadClassInternal") \
template(get_name, "get") \
template(put_name, "put") \
template(type_name, "type") \

View File

@ -51,26 +51,3 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
_space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}
void ConcurrentMarkSweepPolicy::initialize_generations() {
_young_gen_spec = new GenerationSpec(Generation::ParNew, _initial_young_size,
_max_young_size, _gen_alignment);
_old_gen_spec = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size, _gen_alignment);
}
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size) {
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
_size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_pause_sec,
GCTimeRatio);
}
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
// initialize the policy counters - 2 collectors, 2 generations
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 2);
}

View File

@ -30,18 +30,9 @@
class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
protected:
void initialize_alignments();
void initialize_generations();
public:
ConcurrentMarkSweepPolicy() {}
ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
void initialize_gc_policy_counters();
virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size);
};
#endif // SHARE_VM_GC_CMS_CMSCOLLECTORPOLICY_HPP

View File

@ -64,7 +64,13 @@ public:
};
CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
GenCollectedHeap(policy,
Generation::ParNew,
Generation::ConcurrentMarkSweep,
"ParNew::CMS"),
_eden_pool(NULL),
_survivor_pool(NULL),
_old_pool(NULL) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
@ -77,7 +83,6 @@ jint CMSHeap::initialize() {
// If we are running CMS, create the collector responsible
// for collecting the CMS generations.
assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
if (!create_cms_collector()) {
return JNI_ENOMEM;
}
@ -152,11 +157,10 @@ void CMSHeap::print_on_error(outputStream* st) const {
bool CMSHeap::create_cms_collector() {
assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds");
assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
CMSCollector* collector =
new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
rem_set(),
gen_policy()->as_concurrent_mark_sweep_policy());
(ConcurrentMarkSweepPolicy*) gen_policy());
if (collector == NULL || !collector->completed_initialization()) {
if (collector) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -300,8 +300,7 @@ void CMSCollector::ref_processor_init() {
}
AdaptiveSizePolicy* CMSCollector::size_policy() {
CMSHeap* heap = CMSHeap::heap();
return heap->gen_policy()->size_policy();
return CMSHeap::heap()->size_policy();
}
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
@ -1182,8 +1181,6 @@ bool CMSCollector::shouldConcurrentCollect() {
// this is not likely to be productive in practice because it's probably too
// late anyway.
CMSHeap* heap = CMSHeap::heap();
assert(heap->collector_policy()->is_generation_policy(),
"You may want to check the correctness of the following");
if (heap->incremental_collection_will_fail(true /* consult_young */)) {
log.print("CMSCollector: collect because incremental collection will fail ");
return true;
@ -1498,7 +1495,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
max_eden_size,
full,
gc_cause,
heap->collector_policy());
heap->soft_ref_policy());
// Reset the expansion cause, now that we just completed
// a collection cycle.
@ -1890,7 +1887,7 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
}
// Should this be in gc_epilogue?
collector_policy()->counters()->update_counters();
heap->counters()->update_counters();
{
// Clear _foregroundGCShouldWait and, in the event that the
@ -5551,7 +5548,7 @@ void CMSCollector::reset_stw() {
// already have the lock
assert(_collectorState == Resetting, "just checking");
assert_lock_strong(bitMapLock());
GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
GCIdMark gc_id_mark(_cmsThread->gc_id());
_markBitMap.clear_all();
_collectorState = Idling;
register_gc_end();

View File

@ -889,7 +889,7 @@ void ParNewGeneration::collect(bool full,
_gc_timer->register_gc_start();
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
AdaptiveSizePolicy* size_policy = gch->size_policy();
WorkGang* workers = gch->workers();
assert(workers != NULL, "Need workgang for parallel work");
uint active_workers =
@ -1490,4 +1490,3 @@ void ParNewGeneration::restore_preserved_marks() {
SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
_preserved_marks_set.restore(&task_executor);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -268,8 +268,6 @@ void ConcurrentMarkThread::run_service() {
cm()->concurrent_cycle_start();
assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
{
ResourceMark rm;

View File

@ -41,12 +41,4 @@ public:
}
};
class AllocationContextStats: public StackObj {
public:
inline void clear() { }
inline void update(bool full_gc) { }
inline void update_after_mark() { }
inline bool available() { return false; }
};
#endif // SHARE_VM_GC_G1_G1ALLOCATIONCONTEXT_HPP

View File

@ -40,9 +40,6 @@ size_t G1Arguments::conservative_max_heap_alignment() {
void G1Arguments::initialize_flags() {
GCArguments::initialize_flags();
assert(UseG1GC, "Error");
#if defined(COMPILER1) || INCLUDE_JVMCI
FastTLABRefill = false;
#endif
FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
if (ParallelGCThreads == 0) {
assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");

View File

@ -59,6 +59,7 @@
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/vm_operations_g1.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp"
@ -1168,7 +1169,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
}
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
soft_ref_policy()->should_clear_all_soft_refs();
G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
@ -1343,7 +1344,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
return result;
}
assert(!collector_policy()->should_clear_all_soft_refs(),
assert(!soft_ref_policy()->should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total
@ -1463,6 +1464,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
CollectedHeap(),
_young_gen_sampling_thread(NULL),
_collector_policy(collector_policy),
_soft_ref_policy(),
_memory_manager("G1 Young Generation", "end of minor GC"),
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
_eden_pool(NULL),
@ -1893,6 +1895,10 @@ CollectorPolicy* G1CollectedHeap::collector_policy() const {
return _collector_policy;
}
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
size_t G1CollectedHeap::capacity() const {
return _hrm.length() * HeapRegion::GrainBytes;
}
@ -1989,7 +1995,6 @@ bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause
switch (cause) {
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
case GCCause::_update_allocation_context_stats_inc: return true;
case GCCause::_wb_conc_mark: return true;
default : return false;
}
@ -2542,8 +2547,6 @@ void G1CollectedHeap::gc_epilogue(bool full) {
resize_all_tlabs();
g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
allocation_context_stats().update(full);
MemoryService::track_memory_usage();
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy

View File

@ -49,6 +49,7 @@
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/plab.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "memory/memRegion.hpp"
#include "services/memoryManager.hpp"
#include "utilities/stack.hpp"
@ -150,6 +151,8 @@ private:
WorkGang* _workers;
G1CollectorPolicy* _collector_policy;
SoftRefPolicy _soft_ref_policy;
GCMemoryManager _memory_manager;
GCMemoryManager _full_gc_memory_manager;
@ -222,9 +225,6 @@ private:
// Class that handles archive allocation ranges.
G1ArchiveAllocator* _archive_allocator;
// Statistics for each allocation context
AllocationContextStats _allocation_context_stats;
// GC allocation statistics policy for survivors.
G1EvacStats _survivor_evac_stats;
@ -277,8 +277,7 @@ private:
// (b) cause == _g1_humongous_allocation
// (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
// (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
// (e) cause == _update_allocation_context_stats_inc
// (f) cause == _wb_conc_mark
// (e) cause == _wb_conc_mark
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// indicates whether we are in young or mixed GC mode
@ -580,8 +579,6 @@ public:
// Determines PLAB size for a given destination.
inline size_t desired_plab_sz(InCSetState dest);
inline AllocationContextStats& allocation_context_stats();
// Do anything common to GC's.
void gc_prologue(bool full);
void gc_epilogue(bool full);
@ -998,8 +995,7 @@ public:
virtual CollectorPolicy* collector_policy() const;
// Adaptive size policy. No such thing for g1.
virtual AdaptiveSizePolicy* size_policy() { return NULL; }
virtual SoftRefPolicy* soft_ref_policy();
virtual GrowableArray<GCMemoryManager*> memory_managers();
virtual GrowableArray<MemoryPool*> memory_pools();
@ -1130,11 +1126,6 @@ public:
// "CollectedHeap" supports.
virtual void collect(GCCause::Cause cause);
virtual bool copy_allocation_context_stats(const jint* contexts,
jlong* totals,
jbyte* accuracy,
jint len);
// True iff an evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; }

View File

@ -57,10 +57,6 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
return _allocation_context_stats;
}
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }

View File

@ -30,13 +30,6 @@
class STWGCTimer;
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jlong* totals,
jbyte* accuracy,
jint len) {
return false;
}
G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) {
return new G1DefaultPolicy(gc_timer);
}

View File

@ -38,6 +38,7 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
@ -1275,7 +1276,6 @@ void G1ConcurrentMark::cleanup() {
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->allocation_context_stats().update_after_mark();
}
void G1ConcurrentMark::complete_cleanup() {

View File

@ -35,7 +35,7 @@ G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc,
_tracer(),
_active(),
_cpu_time(),
_soft_refs(clear_soft, _g1h->collector_policy()),
_soft_refs(clear_soft, _g1h->soft_ref_policy()),
_memory_stats(memory_manager, _g1h->gc_cause()),
_collector_stats(_g1h->g1mm()->full_collection_counters()),
_heap_transition(_g1h) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -235,13 +235,7 @@ private:
size_t const _page_size;
public:
G1PretouchTask(char* start_address, char* end_address, size_t page_size) :
AbstractGangTask("G1 PreTouch",
Universe::is_fully_initialized() &&
Thread::current()->is_Named_thread() ? GCId::current_raw() :
// During VM initialization there is
// no GC cycle that this task can be
// associated with.
GCId::undefined()),
AbstractGangTask("G1 PreTouch"),
_cur_addr(start_address),
_start_addr(start_address),
_end_addr(end_address),

View File

@ -220,8 +220,39 @@ bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
return *p == g1_young_card_val();
}
void G1SATBCardTableLoggingModRefBS::flush_deferred_barriers(JavaThread* thread) {
CardTableModRefBS::flush_deferred_barriers(thread);
void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
// This method initializes the SATB and dirty card queues before a
// JavaThread is added to the Java thread list. Right now, we don't
// have to do anything to the dirty card queue (it should have been
// activated when the thread was created), but we have to activate
// the SATB queue if the thread is created while a marking cycle is
// in progress. The activation / de-activation of the SATB queues at
// the beginning / end of a marking cycle is done during safepoints
// so we have to make sure this method is called outside one to be
// able to safely read the active field of the SATB queue set. Right
// now, it is called just before the thread is added to the Java
// thread list in the Threads::add() method. That method is holding
// the Threads_lock which ensures we are outside a safepoint. We
// cannot do the obvious and set the active field of the SATB queue
// when the thread is created given that, in some cases, safepoints
// might happen between the JavaThread constructor being called and the
// thread being added to the Java thread list (an example of this is
// when the structure for the DestroyJavaVM thread is created).
assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active");
assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty");
assert(thread->dirty_card_queue().is_active(), "Dirty card queue should be active");
// If we are creating the thread during a marking cycle, we should
// set the active field of the SATB queue to true.
if (thread->satb_mark_queue_set().is_active()) {
thread->satb_mark_queue().set_active(true);
}
}
void G1SATBCardTableLoggingModRefBS::on_thread_detach(JavaThread* thread) {
// Flush any deferred card marks, SATB buffers and dirty card queue buffers
CardTableModRefBS::on_thread_detach(thread);
thread->satb_mark_queue().flush();
thread->dirty_card_queue().flush();
}

View File

@ -154,7 +154,8 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
void write_ref_field_post(T* field, oop new_val);
void write_ref_field_post_slow(volatile jbyte* byte);
virtual void flush_deferred_barriers(JavaThread* thread);
virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
virtual bool card_mark_must_follow_store() const {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "gc/shared/gcLocker.hpp"
#include "logging/log.hpp"
#include "memory/padded.inline.hpp"
#include "oops/arrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.hpp"
#include "runtime/mutexLocker.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,8 @@ SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent) :
// them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active
// field to true. This is done in JavaThread::initialize_queues().
// field to true. This is done in G1SATBCardTableLoggingModRefBS::
// on_thread_attach().
PtrQueue(qset, permanent, false /* active */)
{ }

View File

@ -41,11 +41,5 @@ class GenerationSizer : public GenCollectorPolicy {
void initialize_alignments();
void initialize_flags();
void initialize_size_info();
public:
// We don't have associated counters and complain if this is invoked.
void initialize_gc_policy_counters() {
ShouldNotReachHere();
}
};
#endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP

View File

@ -331,7 +331,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// excesses). Fill op.result() with a filler object so that the
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;

View File

@ -34,6 +34,7 @@
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "memory/metaspace.hpp"
#include "utilities/growableArray.hpp"
@ -59,6 +60,8 @@ class ParallelScavengeHeap : public CollectedHeap {
GenerationSizer* _collector_policy;
SoftRefPolicy _soft_ref_policy;
// Collection of generations that are adjacent in the
// space reserved for the heap.
AdjoiningGenerations* _gens;
@ -106,6 +109,8 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
virtual GrowableArray<GCMemoryManager*> memory_managers();
virtual GrowableArray<MemoryPool*> memory_pools();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,7 +181,7 @@ void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionMana
template <class T>
static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
T heap_oop = oopDesc::load_heap_oop(referent_addr);
log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
if (!oopDesc::is_null(heap_oop)) {
@ -198,12 +198,12 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj,
cm->mark_and_push(referent_addr);
}
}
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
// Treat discovered as normal oop, if ref is not "active",
// i.e. if next is non-NULL.
T next_oop = oopDesc::load_heap_oop(next_addr);
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
cm->mark_and_push(discovered_addr);
}

View File

@ -29,7 +29,8 @@
#include "gc/parallel/psCompactionManager.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/arrayOop.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@ -117,7 +118,7 @@ inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCo
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
const size_t end_index = beg_index + stride;
T* const base = (T*)obj->base();
T* const base = (T*)obj->base_raw();
T* const beg = base + beg_index;
T* const end = base + end_index;

View File

@ -98,7 +98,7 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
}
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
heap->soft_ref_policy()->should_clear_all_soft_refs();
uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
@ -126,7 +126,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
@ -320,7 +320,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
max_eden_size,
true /* full gc*/,
gc_cause,
heap->collector_policy());
heap->soft_ref_policy());
size_policy->decay_supplemental_growth(true /* full gc*/);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1707,7 +1707,7 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
}
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
heap->soft_ref_policy()->should_clear_all_soft_refs();
PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
maximum_heap_compaction);
@ -1741,7 +1741,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
ClearedAllSoftRefs casr(maximum_heap_compaction,
heap->collector_policy());
heap->soft_ref_policy());
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
@ -1869,7 +1869,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
max_eden_size,
true /* full gc*/,
gc_cause,
heap->collector_policy());
heap->soft_ref_policy());
size_policy->decay_supplemental_growth(true /* full gc*/);
@ -3087,11 +3087,11 @@ template <class T> static void trace_reference_gc(const char *s, oop obj,
template <class T>
static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
PSParallelCompact::adjust_pointer(referent_addr, cm);
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
PSParallelCompact::adjust_pointer(next_addr, cm);
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
PSParallelCompact::adjust_pointer(discovered_addr, cm);
debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
referent_addr, next_addr, discovered_addr);)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,9 +38,11 @@
#include "memory/memRegion.hpp"
#include "memory/padded.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/arrayOop.inline.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "oops/instanceMirrorKlass.inline.hpp"
#include "oops/objArrayKlass.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
@ -434,7 +436,7 @@ void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager*
template <class T>
static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
if (PSScavenge::should_scavenge(referent_addr)) {
ReferenceProcessor* rp = PSScavenge::reference_processor();
if (rp->discover_reference(obj, klass->reference_type())) {
@ -448,10 +450,10 @@ static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, P
}
// Treat discovered as normal oop, if ref is not "active",
// i.e. if next is non-NULL.
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
T next_oop = oopDesc::load_heap_oop(next_addr);
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
if (PSScavenge::should_scavenge(discovered_addr)) {
pm->claim_or_forward_depth(discovered_addr);

View File

@ -228,8 +228,8 @@ bool PSScavenge::invoke() {
if (need_full_gc) {
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
CollectorPolicy* cp = heap->collector_policy();
const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
SoftRefPolicy* srp = heap->soft_ref_policy();
const bool clear_all_softrefs = srp->should_clear_all_soft_refs();
if (UseParallelOldGC) {
full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
@ -569,7 +569,7 @@ bool PSScavenge::invoke_no_policy() {
max_eden_size,
false /* not full gc*/,
gc_cause,
heap->collector_policy());
heap->soft_ref_policy());
size_policy->decay_supplemental_growth(false /* not full gc*/);
}

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/serial/defNewGeneration.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectorCounters.hpp"
@ -564,7 +565,7 @@ void DefNewGeneration::adjust_desired_tenuring_threshold() {
_tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
if (UsePerfData) {
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
}
@ -616,9 +617,6 @@ void DefNewGeneration::collect(bool full,
assert(gch->no_allocs_since_save_marks(),
"save marks have not been newly set.");
// Not very pretty.
CollectorPolicy* cp = gch->collector_policy();
FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true);
@ -688,7 +686,7 @@ void DefNewGeneration::collect(bool full,
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
AdaptiveSizePolicy* size_policy = gch->size_policy();
size_policy->reset_gc_overhead_limit_count();
assert(!gch->incremental_collection_failed(), "Should be clear");
} else {
@ -953,7 +951,7 @@ void DefNewGeneration::gc_epilogue(bool full) {
// update the generation and space performance counters
update_counters();
gch->gen_policy()->counters()->update_counters();
gch->counters()->update_counters();
}
void DefNewGeneration::record_spaces_top() {

View File

@ -60,7 +60,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
if (gch->collector_policy()->should_clear_all_soft_refs()) {
if (gch->soft_ref_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earlier");
}
#endif

View File

@ -29,7 +29,13 @@
#include "services/memoryManager.hpp"
SerialHeap::SerialHeap(GenCollectorPolicy* policy) :
GenCollectedHeap(policy), _eden_pool(NULL), _survivor_pool(NULL), _old_pool(NULL) {
GenCollectedHeap(policy,
Generation::DefNew,
Generation::MarkSweepCompact,
"Copy:MSC"),
_eden_pool(NULL),
_survivor_pool(NULL),
_old_pool(NULL) {
_young_manager = new GCMemoryManager("Copy", "end of minor GC");
_old_manager = new GCMemoryManager("MarkSweepCompact", "end of major GC");
}

View File

@ -27,10 +27,12 @@
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcUtil.inline.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
#include "utilities/ostream.hpp"
elapsedTimer AdaptiveSizePolicy::_minor_timer;
elapsedTimer AdaptiveSizePolicy::_major_timer;
bool AdaptiveSizePolicy::_debug_perturbation = false;
@ -409,7 +411,7 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy) {
SoftRefPolicy* soft_ref_policy) {
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
@ -506,7 +508,7 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
// The clearing will be done on the next GC.
bool near_limit = gc_overhead_limit_near();
if (near_limit) {
collector_policy->set_should_clear_all_soft_refs(true);
soft_ref_policy->set_should_clear_all_soft_refs(true);
log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference");
}
}

View File

@ -37,7 +37,7 @@
// Forward decls
class elapsedTimer;
class CollectorPolicy;
class SoftRefPolicy;
class AdaptiveSizePolicy : public CHeapObj<mtGC> {
friend class GCAdaptivePolicyCounters;
@ -486,7 +486,7 @@ class AdaptiveSizePolicy : public CHeapObj<mtGC> {
size_t max_eden_size,
bool is_full_gc,
GCCause::Cause gc_cause,
CollectorPolicy* collector_policy);
SoftRefPolicy* soft_ref_policy);
static bool should_update_promo_stats(GCCause::Cause cause) {
return ((GCCause::is_user_requested_gc(cause) &&

View File

@ -115,7 +115,8 @@ public:
// is redone until it succeeds. This can e.g. prevent allocations from the slow path
// to be in old.
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
virtual void flush_deferred_barriers(JavaThread* thread) {}
virtual void on_thread_attach(JavaThread* thread) {}
virtual void on_thread_detach(JavaThread* thread) {}
virtual void make_parsable(JavaThread* thread) {}
protected:
@ -272,6 +273,10 @@ public:
static void clone_in_heap(oop src, oop dst, size_t size) {
Raw::clone(src, dst, size);
}
static oop resolve(oop obj) {
return Raw::resolve(obj);
}
};
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,17 @@
// To enable runtime-resolution of GC barriers on primitives, please
// define SUPPORT_BARRIER_ON_PRIMITIVES.
#ifdef SUPPORT_BARRIER_ON_PRIMITIVES
#define BT_BUILDTIME_DECORATORS INTERNAL_BT_BARRIER_ON_PRIMITIVES
#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_BT_BARRIER_ON_PRIMITIVES
#else
#define BT_BUILDTIME_DECORATORS INTERNAL_EMPTY
#define ACCESS_PRIMITIVE_SUPPORT INTERNAL_EMPTY
#endif
#ifdef SUPPORT_NOT_TO_SPACE_INVARIANT
#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_EMPTY
#else
#define ACCESS_TO_SPACE_INVARIANT_SUPPORT INTERNAL_BT_TO_SPACE_INVARIANT
#endif
#define BT_BUILDTIME_DECORATORS (ACCESS_PRIMITIVE_SUPPORT | ACCESS_TO_SPACE_INVARIANT_SUPPORT)
#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_HPP

View File

@ -627,7 +627,7 @@ void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
#endif
}
void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) {
void CardTableModRefBS::on_thread_detach(JavaThread* thread) {
// The deferred store barriers must all have been flushed to the
// card-table (or other remembered set structure) before GC starts
// processing the card-table (or other remembered set).

View File

@ -357,7 +357,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
virtual bool is_in_young(oop obj) const = 0;
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
virtual void flush_deferred_barriers(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }

View File

@ -28,6 +28,7 @@
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
@ -41,9 +42,11 @@
#include "runtime/init.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "services/heapDumper.hpp"
#include "utilities/align.hpp"
class ClassLoaderData;
#ifdef ASSERT
int CollectedHeap::_fire_out_of_memory_count = 0;
@ -233,6 +236,80 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
}
MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t word_size,
Metaspace::MetadataType mdtype) {
uint loop_count = 0;
uint gc_count = 0;
uint full_gc_count = 0;
assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
do {
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result != NULL) {
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
// If the GCLocker is active, just expand and allocate.
// If that does not succeed, wait if this thread is not
// in a critical section itself.
result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
if (result != NULL) {
return result;
}
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
// The GC invoked by the last thread leaving the critical
// section will be a young collection and a full collection
// is (currently) needed for unloading classes so continue
// to the next iteration to get a full GC.
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
{ // Need lock to get self consistent gc_count's
MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections();
full_gc_count = Universe::heap()->total_full_collections();
}
// Generate a VM operation
VM_CollectForMetadataAllocation op(loader_data,
word_size,
mdtype,
gc_count,
full_gc_count,
GCCause::_metadata_GC_threshold);
VMThread::execute(&op);
// If GC was locked out, try again. Check before checking success because the
// prologue could have succeeded and the GC still have been locked out.
if (op.gc_locked()) {
continue;
}
if (op.prologue_succeeded()) {
return op.result();
}
loop_count++;
if ((QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
" size=" SIZE_FORMAT, loop_count, word_size);
}
} while (true); // Until a GC is done
}
void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
_barrier_set = barrier_set;
BarrierSet::set_bs(barrier_set);

View File

@ -50,6 +50,7 @@ class GCTracer;
class GCMemoryManager;
class MemoryPool;
class MetaspaceSummary;
class SoftRefPolicy;
class Thread;
class ThreadClosure;
class VirtualSpaceSummary;
@ -411,6 +412,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// the context of the vm thread.
virtual void collect_as_vm_thread(GCCause::Cause cause);
virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype);
// Returns the barrier set for this heap
BarrierSet* barrier_set() { return _barrier_set; }
void set_barrier_set(BarrierSet* barrier_set);
@ -438,6 +443,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
// Return the SoftRefPolicy for the heap;
virtual SoftRefPolicy* soft_ref_policy() = 0;
virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
virtual GrowableArray<MemoryPool*> memory_pools() = 0;
@ -492,7 +500,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
void pre_full_gc_dump(GCTimer* timer);
void post_full_gc_dump(GCTimer* timer);
VirtualSpaceSummary create_heap_space_summary();
virtual VirtualSpaceSummary create_heap_space_summary();
GCHeapSummary create_heap_summary();
MetaspaceSummary create_metaspace_summary();
@ -599,20 +607,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
}
#endif
public:
// Copy the current allocation context statistics for the specified contexts.
// For each context in contexts, set the corresponding entries in the totals
// and accuracy arrays to the current values held by the statistics. Each
// array should be of length len.
// Returns true if there are more stats available.
virtual bool copy_allocation_context_stats(const jint* contexts,
jlong* totals,
jbyte* accuracy,
jint len) {
return false;
}
};
// Class to set and reset the GC cause for a CollectedHeap.
@ -622,16 +616,12 @@ class GCCauseSetter : StackObj {
GCCause::Cause _previous_cause;
public:
GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
assert(SafepointSynchronize::is_at_safepoint(),
"This method manipulates heap state without locking");
_heap = heap;
_previous_cause = _heap->gc_cause();
_heap->set_gc_cause(cause);
}
~GCCauseSetter() {
assert(SafepointSynchronize::is_at_safepoint(),
"This method manipulates heap state without locking");
_heap->set_gc_cause(_previous_cause);
}
};

View File

@ -50,9 +50,7 @@ CollectorPolicy::CollectorPolicy() :
_heap_alignment(0),
_initial_heap_byte_size(InitialHeapSize),
_max_heap_byte_size(MaxHeapSize),
_min_heap_byte_size(Arguments::min_heap_size()),
_should_clear_all_soft_refs(false),
_all_soft_refs_clear(false)
_min_heap_byte_size(Arguments::min_heap_size())
{}
#ifdef ASSERT
@ -145,20 +143,6 @@ void CollectorPolicy::initialize_size_info() {
DEBUG_ONLY(CollectorPolicy::assert_size_info();)
}
bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
bool result = _should_clear_all_soft_refs;
set_should_clear_all_soft_refs(false);
return result;
}
CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
return new CardTableRS(whole_heap);
}
void CollectorPolicy::cleared_all_soft_refs() {
_all_soft_refs_clear = true;
}
size_t CollectorPolicy::compute_heap_alignment() {
// The card marking array and the offset arrays for old generations are
// committed in os pages as well. Make sure they are entirely full (to
@ -186,10 +170,7 @@ GenCollectorPolicy::GenCollectorPolicy() :
_min_old_size(0),
_initial_old_size(0),
_max_old_size(0),
_gen_alignment(0),
_young_gen_spec(NULL),
_old_gen_spec(NULL),
_size_policy(NULL)
_gen_alignment(0)
{}
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
@ -202,29 +183,6 @@ size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
return desired_size < max_minus ? desired_size : max_minus;
}
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size) {
const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
_size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_pause_sec,
GCTimeRatio);
}
void GenCollectorPolicy::cleared_all_soft_refs() {
// If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
// have been cleared in the last collection but if the gc overhear
// limit continues to be near, SoftRefs should still be cleared.
if (size_policy() != NULL) {
_should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
}
CollectorPolicy::cleared_all_soft_refs();
}
size_t GenCollectorPolicy::young_gen_size_lower_bound() {
// The young generation must be aligned and have room for eden + two survivors
return align_up(3 * _space_alignment, _gen_alignment);
@ -580,322 +538,6 @@ void GenCollectorPolicy::initialize_size_info() {
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
debug_only(gch->check_for_valid_allocation_state());
assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL;
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // Discard any handles allocated in each iteration.
// First allocation attempt is lock-free.
Generation *young = gch->young_gen();
assert(young->supports_inline_contig_alloc(),
"Otherwise, must do alloc within heap lock");
if (young->should_allocate(size, is_tlab)) {
result = young->par_allocate(size, is_tlab);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
}
uint gc_count_before; // Read inside the Heap_lock locked region.
{
MutexLocker ml(Heap_lock);
log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");
// Note that only large objects get a shot at being
// allocated in later generations.
bool first_only = ! should_try_older_generation_allocation(size);
result = gch->attempt_allocation(size, is_tlab, first_only);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
if (is_tlab) {
return NULL; // Caller will retry allocating individual object.
}
if (!gch->is_maximal_no_gc()) {
// Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab);
// Result could be null if we are out of space.
if (result != NULL) {
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return NULL; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
// Read the gc count while the heap lock is held.
gc_count_before = gch->total_collections();
}
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op);
if (op.prologue_succeeded()) {
result = op.result();
if (op.gc_locked()) {
assert(result == NULL, "must be NULL if gc_locked() is true");
continue; // Retry and/or stall as necessary.
}
// Allocation has failed and a collection
// has been done. If the gc time limit was exceeded the
// this time, return NULL so that an out-of-memory
// will be thrown. Clear gc_overhead_limit_exceeded
// so that the overhead exceeded does not persist.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = all_soft_refs_clear();
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
}
assert(result == NULL || gch->is_in_reserved(result),
"result not in heap");
return result;
}
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, ergo)("GenCollectorPolicy::mem_allocate_work retries %d times,"
" size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
}
}
}
HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
bool is_tlab) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
HeapWord* result = NULL;
Generation *old = gch->old_gen();
if (old->should_allocate(size, is_tlab)) {
result = old->expand_and_allocate(size, is_tlab);
}
if (result == NULL) {
Generation *young = gch->young_gen();
if (young->should_allocate(size, is_tlab)) {
result = young->expand_and_allocate(size, is_tlab);
}
}
assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
return result;
}
HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
bool is_tlab) {
GenCollectedHeap *gch = GenCollectedHeap::heap();
GCCauseSetter x(gch, GCCause::_allocation_failure);
HeapWord* result = NULL;
assert(size != 0, "Precondition violated");
if (GCLocker::is_active_and_needs_gc()) {
// GC locker is active; instead of a collection we will attempt
// to expand the heap, if there's room for expansion.
if (!gch->is_maximal_no_gc()) {
result = expand_heap_and_allocate(size, is_tlab);
}
return result; // Could be null if we are out of space.
} else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection.
gch->do_collection(false, // full
false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
GenCollectedHeap::OldGen); // max_generation
} else {
log_trace(gc)(" :: Trying full because partial may fail :: ");
// Try a full collection; see delta for bug id 6266275
// for the original code and why this has been simplified
// with from-space allocation criteria modified and
// such allocation moved out of the safepoint path.
gch->do_collection(true, // full
false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
GenCollectedHeap::OldGen); // max_generation
}
result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
// OK, collection failed, try expansion.
result = expand_heap_and_allocate(size, is_tlab);
if (result != NULL) {
return result;
}
// If we reach this point, we're really out of memory. Try every trick
// we can to reclaim memory. Force collection of soft references. Force
// a complete compaction of the heap. Any additional methods for finding
// free memory should be here, especially if they are expensive. If this
// attempt fails, an OOM exception will be thrown.
{
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
gch->do_collection(true, // full
true, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
GenCollectedHeap::OldGen); // max_generation
}
result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
if (result != NULL) {
assert(gch->is_in_reserved(result), "result not in heap");
return result;
}
assert(!should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
// appropriate.
return NULL;
}
MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
ClassLoaderData* loader_data,
size_t word_size,
Metaspace::MetadataType mdtype) {
uint loop_count = 0;
uint gc_count = 0;
uint full_gc_count = 0;
assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
do {
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result != NULL) {
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
// If the GCLocker is active, just expand and allocate.
// If that does not succeed, wait if this thread is not
// in a critical section itself.
result =
loader_data->metaspace_non_null()->expand_and_allocate(word_size,
mdtype);
if (result != NULL) {
return result;
}
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
// The GC invoked by the last thread leaving the critical
// section will be a young collection and a full collection
// is (currently) needed for unloading classes so continue
// to the next iteration to get a full GC.
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
{ // Need lock to get self consistent gc_count's
MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections();
full_gc_count = Universe::heap()->total_full_collections();
}
// Generate a VM operation
VM_CollectForMetadataAllocation op(loader_data,
word_size,
mdtype,
gc_count,
full_gc_count,
GCCause::_metadata_GC_threshold);
VMThread::execute(&op);
// If GC was locked out, try again. Check before checking success because the
// prologue could have succeeded and the GC still have been locked out.
if (op.gc_locked()) {
continue;
}
if (op.prologue_succeeded()) {
return op.result();
}
loop_count++;
if ((QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
" size=" SIZE_FORMAT, loop_count, word_size);
}
} while (true); // Until a GC is done
}
// Return true if any of the following is true:
// . the allocation won't fit into the current young gen heap
// . gc locker is occupied (jni critical section)
// . heap memory is tight -- the most recent previous collection
// was a full collection because a partial collection (would
// have) failed and is likely to fail again
bool GenCollectorPolicy::should_try_older_generation_allocation(
size_t word_size) const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t young_capacity = gch->young_gen()->capacity_before_gc();
return (word_size > heap_word_size(young_capacity))
|| GCLocker::is_active_and_needs_gc()
|| gch->incremental_collection_failed();
}
//
// MarkSweepPolicy methods
//
@ -904,14 +546,3 @@ void MarkSweepPolicy::initialize_alignments() {
_space_alignment = _gen_alignment = (size_t)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}
void MarkSweepPolicy::initialize_generations() {
_young_gen_spec = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size, _gen_alignment);
_old_gen_spec = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size, _gen_alignment);
}
void MarkSweepPolicy::initialize_gc_policy_counters() {
// Initialize the policy counters - 2 collectors, 2 generations.
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 2);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,6 @@ class ConcurrentMarkSweepPolicy;
class G1CollectorPolicy;
#endif // INCLUDE_ALL_GCS
class GCPolicyCounters;
class MarkSweepPolicy;
class CollectorPolicy : public CHeapObj<mtGC> {
@ -72,21 +71,10 @@ class CollectorPolicy : public CHeapObj<mtGC> {
size_t _space_alignment;
size_t _heap_alignment;
// Set to true when policy wants soft refs cleared.
// Reset to false by gc after it clears all soft refs.
bool _should_clear_all_soft_refs;
// Set to true by the GC if the just-completed gc cleared all
// softrefs. This is set to true whenever a gc clears all softrefs, and
// set to false each time gc returns to the mutator. For example, in the
// ParallelScavengeHeap case the latter would be done toward the end of
// mem_allocate() where it returns op.result()
bool _all_soft_refs_clear;
CollectorPolicy();
public:
virtual void initialize_all() {
void initialize_all() {
initialize_alignments();
initialize_flags();
initialize_size_info();
@ -101,58 +89,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
size_t max_heap_byte_size() { return _max_heap_byte_size; }
size_t min_heap_byte_size() { return _min_heap_byte_size; }
bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
// Returns the current value of _should_clear_all_soft_refs.
// _should_clear_all_soft_refs is set to false as a side effect.
bool use_should_clear_all_soft_refs(bool v);
bool all_soft_refs_clear() { return _all_soft_refs_clear; }
void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
// Called by the GC after Soft Refs have been cleared to indicate
// that the request in _should_clear_all_soft_refs has been fulfilled.
virtual void cleared_all_soft_refs();
// Identification methods.
virtual GenCollectorPolicy* as_generation_policy() { return NULL; }
virtual MarkSweepPolicy* as_mark_sweep_policy() { return NULL; }
#if INCLUDE_ALL_GCS
virtual ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return NULL; }
#endif // INCLUDE_ALL_GCS
// Note that these are not virtual.
bool is_generation_policy() { return as_generation_policy() != NULL; }
bool is_mark_sweep_policy() { return as_mark_sweep_policy() != NULL; }
#if INCLUDE_ALL_GCS
bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
#else // INCLUDE_ALL_GCS
bool is_concurrent_mark_sweep_policy() { return false; }
#endif // INCLUDE_ALL_GCS
virtual CardTableRS* create_rem_set(MemRegion reserved);
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype);
};
class ClearedAllSoftRefs : public StackObj {
bool _clear_all_soft_refs;
CollectorPolicy* _collector_policy;
public:
ClearedAllSoftRefs(bool clear_all_soft_refs,
CollectorPolicy* collector_policy) :
_clear_all_soft_refs(clear_all_soft_refs),
_collector_policy(collector_policy) {}
~ClearedAllSoftRefs() {
if (_clear_all_soft_refs) {
_collector_policy->cleared_all_soft_refs();
}
}
bool should_clear() { return _clear_all_soft_refs; }
};
class GenCollectorPolicy : public CollectorPolicy {
@ -171,27 +107,12 @@ protected:
// time. When using large pages they can differ.
size_t _gen_alignment;
GenerationSpec* _young_gen_spec;
GenerationSpec* _old_gen_spec;
GCPolicyCounters* _gc_policy_counters;
// The sizing of the heap is controlled by a sizing policy.
AdaptiveSizePolicy* _size_policy;
// Return true if an allocation should be attempted in the older generation
// if it fails in the younger generation. Return false, otherwise.
virtual bool should_try_older_generation_allocation(size_t word_size) const;
void initialize_flags();
void initialize_size_info();
DEBUG_ONLY(void assert_flags();)
DEBUG_ONLY(void assert_size_info();)
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
// Compute max heap alignment.
size_t compute_max_alignment();
@ -215,63 +136,17 @@ protected:
size_t initial_old_size() { return _initial_old_size; }
size_t max_old_size() { return _max_old_size; }
GenerationSpec* young_gen_spec() const {
assert(_young_gen_spec != NULL, "_young_gen_spec should have been initialized");
return _young_gen_spec;
}
GenerationSpec* old_gen_spec() const {
assert(_old_gen_spec != NULL, "_old_gen_spec should have been initialized");
return _old_gen_spec;
}
// Performance Counter support
GCPolicyCounters* counters() { return _gc_policy_counters; }
// Create the jstat counters for the GC policy.
virtual void initialize_gc_policy_counters() = 0;
virtual GenCollectorPolicy* as_generation_policy() { return this; }
virtual void initialize_generations() { };
virtual void initialize_all() {
CollectorPolicy::initialize_all();
initialize_generations();
}
size_t young_gen_size_lower_bound();
size_t old_gen_size_lower_bound();
HeapWord* mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
// Adaptive size policy
AdaptiveSizePolicy* size_policy() { return _size_policy; }
virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size);
virtual void cleared_all_soft_refs();
};
class MarkSweepPolicy : public GenCollectorPolicy {
protected:
void initialize_alignments();
void initialize_generations();
public:
MarkSweepPolicy() {}
MarkSweepPolicy* as_mark_sweep_policy() { return this; }
void initialize_gc_policy_counters();
};
#endif // SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP

View File

@ -60,10 +60,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _wb_full_gc:
return "WhiteBox Initiated Full GC";
case _update_allocation_context_stats_inc:
case _update_allocation_context_stats_full:
return "Update Allocation Context Stats";
case _no_gc:
return "No GC";

View File

@ -52,8 +52,6 @@ class GCCause : public AllStatic {
_wb_young_gc,
_wb_conc_mark,
_wb_full_gc,
_update_allocation_context_stats_inc,
_update_allocation_context_stats_full,
/* implementation independent, but reserved for GC use */
_no_gc,

Some files were not shown because too many files have changed in this diff Show More