This commit is contained in:
Jesper Wilhelmsson 2018-02-14 13:29:45 +01:00
commit b8689741ec
140 changed files with 4309 additions and 977 deletions

View File

@ -277,6 +277,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
// Add in the index
add(result, result, tmp);
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
// The resulting oop is null if the reference is not yet resolved.
// It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
@ -399,6 +401,13 @@ void InterpreterMacroAssembler::store_ptr(int n, Register val) {
str(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));
}
void InterpreterMacroAssembler::load_float(Address src) {
ldrs(v0, src);
}
void InterpreterMacroAssembler::load_double(Address src) {
ldrd(v0, src);
}
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
// set sender sp

View File

@ -158,6 +158,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
// Load float value from 'address'. The value is loaded onto the FPU register v0.
void load_float(Address src);
void load_double(Address src);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass.
void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );

View File

@ -2056,9 +2056,14 @@ void MacroAssembler::stop(const char* msg) {
}
void MacroAssembler::unimplemented(const char* what) {
char* b = new char[1024];
jio_snprintf(b, 1024, "unimplemented: %s", what);
stop(b);
const char* buf = NULL;
{
ResourceMark rm;
stringStream ss;
ss.print("unimplemented: %s", what);
buf = code_string(ss.as_string());
}
stop(buf);
}
// If a constant does not fit in an immediate field, generate some

View File

@ -370,7 +370,7 @@ void TemplateTable::sipush()
void TemplateTable::ldc(bool wide)
{
transition(vtos, vtos);
Label call_ldc, notFloat, notClass, Done;
Label call_ldc, notFloat, notClass, notInt, Done;
if (wide) {
__ get_unsigned_2_byte_index_at_bcp(r1, 1);
@ -417,20 +417,19 @@ void TemplateTable::ldc(bool wide)
__ b(Done);
__ bind(notFloat);
#ifdef ASSERT
{
Label L;
__ cmp(r3, JVM_CONSTANT_Integer);
__ br(Assembler::EQ, L);
// String and Object are rewritten to fast_aldc
__ stop("unexpected tag type in ldc");
__ bind(L);
}
#endif
// itos JVM_CONSTANT_Integer only
__ cmp(r3, JVM_CONSTANT_Integer);
__ br(Assembler::NE, notInt);
// itos
__ adds(r1, r2, r1, Assembler::LSL, 3);
__ ldrw(r0, Address(r1, base_offset));
__ push_i(r0);
__ b(Done);
__ bind(notInt);
condy_helper(Done);
__ bind(Done);
}
@ -441,6 +440,8 @@ void TemplateTable::fast_aldc(bool wide)
Register result = r0;
Register tmp = r1;
Register rarg = r2;
int index_size = wide ? sizeof(u2) : sizeof(u1);
Label resolved;
@ -455,12 +456,27 @@ void TemplateTable::fast_aldc(bool wide)
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
// first time invocation - must resolve first
__ mov(tmp, (int)bytecode());
__ call_VM(result, entry, tmp);
__ mov(rarg, (int)bytecode());
__ call_VM(result, entry, rarg);
__ bind(resolved);
{ // Check for the null sentinel.
// If we just called the VM, that already did the mapping for us,
// but it's harmless to retry.
Label notNull;
// Stash null_sentinel address to get its value later
__ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
__ ldr(tmp, Address(rarg));
__ cmp(result, tmp);
__ br(Assembler::NE, notNull);
__ mov(result, 0); // NULL object reference
__ bind(notNull);
}
if (VerifyOops) {
// Safe to call with 0 result
__ verify_oop(result);
}
}
@ -468,7 +484,7 @@ void TemplateTable::fast_aldc(bool wide)
void TemplateTable::ldc2_w()
{
transition(vtos, vtos);
Label Long, Done;
Label notDouble, notLong, Done;
__ get_unsigned_2_byte_index_at_bcp(r0, 1);
__ get_cpool_and_tags(r1, r2);
@ -479,22 +495,143 @@ void TemplateTable::ldc2_w()
__ lea(r2, Address(r2, r0, Address::lsl(0)));
__ load_unsigned_byte(r2, Address(r2, tags_offset));
__ cmpw(r2, (int)JVM_CONSTANT_Double);
__ br(Assembler::NE, Long);
__ br(Assembler::NE, notDouble);
// dtos
__ lea (r2, Address(r1, r0, Address::lsl(3)));
__ ldrd(v0, Address(r2, base_offset));
__ push_d();
__ b(Done);
__ bind(Long);
__ bind(notDouble);
__ cmpw(r2, (int)JVM_CONSTANT_Long);
__ br(Assembler::NE, notLong);
// ltos
__ lea(r0, Address(r1, r0, Address::lsl(3)));
__ ldr(r0, Address(r0, base_offset));
__ push_l();
__ b(Done);
__ bind(notLong);
condy_helper(Done);
__ bind(Done);
}
void TemplateTable::condy_helper(Label& Done)
{
Register obj = r0;
Register rarg = r1;
Register flags = r2;
Register off = r3;
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
__ mov(rarg, (int) bytecode());
__ call_VM(obj, entry, rarg);
__ get_vm_result_2(flags, rthread);
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mov(off, flags);
__ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
const Address field(obj, off);
// What sort of thing are we loading?
// x86 uses a shift and mask or wings it with a shift plus assert
// the mask is not needed. aarch64 just uses bitfield extract
__ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
ConstantPoolCacheEntry::tos_state_bits);
switch (bytecode()) {
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
{
// tos in (itos, ftos, stos, btos, ctos, ztos)
Label notInt, notFloat, notShort, notByte, notChar, notBool;
__ cmpw(flags, itos);
__ br(Assembler::NE, notInt);
// itos
__ ldrw(r0, field);
__ push(itos);
__ b(Done);
__ bind(notInt);
__ cmpw(flags, ftos);
__ br(Assembler::NE, notFloat);
// ftos
__ load_float(field);
__ push(ftos);
__ b(Done);
__ bind(notFloat);
__ cmpw(flags, stos);
__ br(Assembler::NE, notShort);
// stos
__ load_signed_short(r0, field);
__ push(stos);
__ b(Done);
__ bind(notShort);
__ cmpw(flags, btos);
__ br(Assembler::NE, notByte);
// btos
__ load_signed_byte(r0, field);
__ push(btos);
__ b(Done);
__ bind(notByte);
__ cmpw(flags, ctos);
__ br(Assembler::NE, notChar);
// ctos
__ load_unsigned_short(r0, field);
__ push(ctos);
__ b(Done);
__ bind(notChar);
__ cmpw(flags, ztos);
__ br(Assembler::NE, notBool);
// ztos
__ load_signed_byte(r0, field);
__ push(ztos);
__ b(Done);
__ bind(notBool);
break;
}
case Bytecodes::_ldc2_w:
{
Label notLong, notDouble;
__ cmpw(flags, ltos);
__ br(Assembler::NE, notLong);
// ltos
__ ldr(r0, field);
__ push(ltos);
__ b(Done);
__ bind(notLong);
__ cmpw(flags, dtos);
__ br(Assembler::NE, notDouble);
// dtos
__ load_double(field);
__ push(dtos);
__ b(Done);
__ bind(notDouble);
break;
}
default:
ShouldNotReachHere();
}
__ stop("bad ldc/condy");
}
void TemplateTable::locals_index(Register reg, int offset)
{
__ ldrb(reg, at_bcp(offset));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -32,11 +32,11 @@
// Inline functions for memory copy and fill.
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -52,7 +52,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
}
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -70,25 +70,25 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
// Template for atomic, element-wise copy.
template <class T>
static void copy_conjoint_atomic(T* from, T* to, size_t count) {
static void copy_conjoint_atomic(const T* from, T* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -104,44 +104,44 @@ static void copy_conjoint_atomic(T* from, T* to, size_t count) {
}
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
// TODO: contribute optimized version.
copy_conjoint_atomic<jshort>(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
// TODO: contribute optimized version.
copy_conjoint_atomic<jint>(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
copy_conjoint_atomic<jlong>(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
copy_conjoint_atomic<oop>(from, to, count);
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
// TODO: contribute optimized version.
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
// TODO: contribute optimized version.
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {

View File

@ -492,6 +492,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
// Add in the index.
add(result, tmp, result);
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
// The resulting oop is null if the reference is not yet resolved.
// It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
}
// load cpool->resolved_klass_at(index)

View File

@ -314,7 +314,7 @@ void TemplateTable::ldc(bool wide) {
Rcpool = R3_ARG1;
transition(vtos, vtos);
Label notInt, notClass, exit;
Label notInt, notFloat, notClass, exit;
__ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
if (wide) { // Read index.
@ -356,13 +356,16 @@ void TemplateTable::ldc(bool wide) {
__ align(32, 12);
__ bind(notInt);
#ifdef ASSERT
// String and Object are rewritten to fast_aldc
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
__ asm_assert_eq("unexpected type", 0x8765);
#endif
__ bne(CCR0, notFloat);
__ lfsx(F15_ftos, Rcpool, Rscratch1);
__ push(ftos);
__ b(exit);
__ align(32, 12);
// assume the tag is for condy; if not, the VM runtime will tell us
__ bind(notFloat);
condy_helper(exit);
__ align(32, 12);
__ bind(exit);
@ -380,6 +383,19 @@ void TemplateTable::fast_aldc(bool wide) {
// non-null object (CallSite, etc.)
__ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index.
__ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null);
// Convert null sentinel to NULL.
int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true);
__ ld(Rscratch, simm16_rest, Rscratch);
__ cmpld(CCR0, R17_tos, Rscratch);
if (VM_Version::has_isel()) {
__ isel_0(R17_tos, CCR0, Assembler::equal);
} else {
Label not_sentinel;
__ bne(CCR0, not_sentinel);
__ li(R17_tos, 0);
__ bind(not_sentinel);
}
__ verify_oop(R17_tos);
__ dispatch_epilog(atos, Bytecodes::length_for(bytecode()));
@ -395,7 +411,7 @@ void TemplateTable::fast_aldc(bool wide) {
void TemplateTable::ldc2_w() {
transition(vtos, vtos);
Label Llong, Lexit;
Label not_double, not_long, exit;
Register Rindex = R11_scratch1,
Rcpool = R12_scratch2,
@ -410,23 +426,129 @@ void TemplateTable::ldc2_w() {
__ addi(Rtag, Rtag, tags_offset);
__ lbzx(Rtag, Rtag, Rindex);
__ sldi(Rindex, Rindex, LogBytesPerWord);
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
__ bne(CCR0, Llong);
// A double can be placed at word-aligned locations in the constant pool.
// Check out Conversions.java for an example.
// Also ConstantPool::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97
__ bne(CCR0, not_double);
__ lfdx(F15_ftos, Rcpool, Rindex);
__ push(dtos);
__ b(Lexit);
__ b(exit);
__ bind(Llong);
__ bind(not_double);
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long);
__ bne(CCR0, not_long);
__ ldx(R17_tos, Rcpool, Rindex);
__ push(ltos);
__ b(exit);
__ bind(Lexit);
__ bind(not_long);
condy_helper(exit);
__ align(32, 12);
__ bind(exit);
}
void TemplateTable::condy_helper(Label& Done) {
const Register obj = R31;
const Register off = R11_scratch1;
const Register flags = R12_scratch2;
const Register rarg = R4_ARG2;
__ li(rarg, (int)bytecode());
call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
__ get_vm_result_2(flags);
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ andi(off, flags, ConstantPoolCacheEntry::field_index_mask);
// What sort of thing are we loading?
__ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
switch (bytecode()) {
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
{
// tos in (itos, ftos, stos, btos, ctos, ztos)
Label notInt, notFloat, notShort, notByte, notChar, notBool;
__ cmplwi(CCR0, flags, itos);
__ bne(CCR0, notInt);
// itos
__ lwax(R17_tos, obj, off);
__ push(itos);
__ b(Done);
__ bind(notInt);
__ cmplwi(CCR0, flags, ftos);
__ bne(CCR0, notFloat);
// ftos
__ lfsx(F15_ftos, obj, off);
__ push(ftos);
__ b(Done);
__ bind(notFloat);
__ cmplwi(CCR0, flags, stos);
__ bne(CCR0, notShort);
// stos
__ lhax(R17_tos, obj, off);
__ push(stos);
__ b(Done);
__ bind(notShort);
__ cmplwi(CCR0, flags, btos);
__ bne(CCR0, notByte);
// btos
__ lbzx(R17_tos, obj, off);
__ extsb(R17_tos, R17_tos);
__ push(btos);
__ b(Done);
__ bind(notByte);
__ cmplwi(CCR0, flags, ctos);
__ bne(CCR0, notChar);
// ctos
__ lhzx(R17_tos, obj, off);
__ push(ctos);
__ b(Done);
__ bind(notChar);
__ cmplwi(CCR0, flags, ztos);
__ bne(CCR0, notBool);
// ztos
__ lbzx(R17_tos, obj, off);
__ push(ztos);
__ b(Done);
__ bind(notBool);
break;
}
case Bytecodes::_ldc2_w:
{
Label notLong, notDouble;
__ cmplwi(CCR0, flags, ltos);
__ bne(CCR0, notLong);
// ltos
__ ldx(R17_tos, obj, off);
__ push(ltos);
__ b(Done);
__ bind(notLong);
__ cmplwi(CCR0, flags, dtos);
__ bne(CCR0, notDouble);
// dtos
__ lfdx(F15_ftos, obj, off);
__ push(dtos);
__ b(Done);
__ bind(notDouble);
break;
}
default:
ShouldNotReachHere();
}
__ stop("bad ldc/condy");
}
// Get the locals index located in the bytecode stream at bcp + offset.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -73,7 +73,7 @@
#undef USE_INLINE_ASM
static void copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -89,7 +89,7 @@ static void copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count)
}
}
static void copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -105,7 +105,7 @@ static void copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
}
}
static bool has_destructive_overlap(char* from, char* to, size_t byte_count) {
static bool has_destructive_overlap(const char* from, char* to, size_t byte_count) {
return (from < to) && ((to-from) < (ptrdiff_t)byte_count);
}
@ -662,7 +662,7 @@ static bool has_destructive_overlap(char* from, char* to, size_t byte_count) {
// D I S J O I N T C O P Y I N G //
//*************************************//
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
// JVM2008: very frequent, some tests frequent.
// Copy HeapWord (=DW) aligned storage. Use MVCLE in inline-asm code.
@ -740,13 +740,13 @@ static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count
#endif
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
// JVM2008: < 4k calls.
assert(((((size_t)from) & 0x07L) | (((size_t)to) & 0x07L)) == 0, "No atomic copy w/o aligned data");
pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate.
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
// JVM2008: very rare.
pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate.
}
@ -756,7 +756,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
// C O N J O I N T C O P Y I N G //
//*************************************//
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
// JVM2008: between some and lower end of frequent.
#ifdef USE_INLINE_ASM
@ -836,13 +836,13 @@ static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count
#endif
}
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
// Just delegate. HeapWords are optimally aligned anyway.
pd_aligned_conjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
#ifdef USE_INLINE_ASM
size_t count_in = count;
@ -866,16 +866,16 @@ static void pd_conjoint_bytes(void* from, void* to, size_t count) {
// C O N J O I N T A T O M I C C O P Y I N G //
//**************************************************//
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
// Call arraycopy stubs to do the job.
pd_conjoint_bytes(from, to, count); // bytes are always accessed atomically.
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
#ifdef USE_INLINE_ASM
size_t count_in = count;
if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerShort)) {
if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerShort)) {
// Use optimizations from shared code where no z-specific optimization exists.
copy_conjoint_jshorts_atomic(from, to, count);
} else {
@ -890,11 +890,11 @@ static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
#endif
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
#ifdef USE_INLINE_ASM
size_t count_in = count;
if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerInt)) {
if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerInt)) {
switch (count_in) {
case 4: COPY4_ATOMIC_4(to,from)
return;
@ -922,7 +922,7 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
#endif
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
#ifdef USE_INLINE_ASM
size_t count_in = count;
@ -970,11 +970,11 @@ static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
}
}
else
pd_aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate.
pd_aligned_disjoint_words((const HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate.
#endif
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef USE_INLINE_ASM
size_t count_in = count;
@ -1011,24 +1011,24 @@ static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
#endif
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
//**********************************************//

View File

@ -389,6 +389,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
#endif
z_agr(result, index); // Address of indexed array element.
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
// The resulting oop is null if the reference is not yet resolved.
// It is Universe::the_null_sentinel() if the reference resolved to NULL via condy.
}
// load cpool->resolved_klass_at(index)

View File

@ -450,7 +450,7 @@ void TemplateTable::sipush() {
void TemplateTable::ldc(bool wide) {
transition(vtos, vtos);
Label call_ldc, notFloat, notClass, Done;
Label call_ldc, notFloat, notClass, notInt, Done;
const Register RcpIndex = Z_tmp_1;
const Register Rtags = Z_ARG2;
@ -500,22 +500,17 @@ void TemplateTable::ldc(bool wide) {
__ z_bru(Done);
__ bind(notFloat);
#ifdef ASSERT
{
Label L;
__ z_cli(0, Raddr_type, JVM_CONSTANT_Integer);
__ z_bre(L);
// String and Object are rewritten to fast_aldc.
__ stop("unexpected tag type in ldc");
__ bind(L);
}
#endif
__ z_cli(0, Raddr_type, JVM_CONSTANT_Integer);
__ z_brne(notInt);
// itos
__ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false);
__ push_i(Z_tos);
__ z_bru(Done);
// assume the tag is for condy; if not, the VM runtime will tell us
__ bind(notInt);
condy_helper(Done);
__ bind(Done);
}
@ -528,15 +523,23 @@ void TemplateTable::fast_aldc(bool wide) {
const Register index = Z_tmp_2;
int index_size = wide ? sizeof(u2) : sizeof(u1);
Label L_resolved;
Label L_do_resolve, L_resolved;
// We are resolved if the resolved reference cache entry contains a
// non-null object (CallSite, etc.).
__ get_cache_index_at_bcp(index, 1, index_size); // Load index.
__ load_resolved_reference_at_index(Z_tos, index);
__ z_ltgr(Z_tos, Z_tos);
__ z_brne(L_resolved);
__ z_bre(L_do_resolve);
// Convert null sentinel to NULL.
__ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr());
__ z_cg(Z_tos, Address(Z_R1_scratch));
__ z_brne(L_resolved);
__ clear_reg(Z_tos);
__ z_bru(L_resolved);
__ bind(L_do_resolve);
// First time invocation - must resolve first.
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
__ load_const_optimized(Z_ARG1, (int)bytecode());
@ -548,7 +551,7 @@ void TemplateTable::fast_aldc(bool wide) {
void TemplateTable::ldc2_w() {
transition(vtos, vtos);
Label Long, Done;
Label notDouble, notLong, Done;
// Z_tmp_1 = index of cp entry
__ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned);
@ -566,21 +569,132 @@ void TemplateTable::ldc2_w() {
// Check type.
__ z_cli(0, Z_tos, JVM_CONSTANT_Double);
__ z_brne(Long);
__ z_brne(notDouble);
// dtos
__ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset));
__ push_d();
__ z_bru(Done);
__ bind(Long);
__ bind(notDouble);
__ z_cli(0, Z_tos, JVM_CONSTANT_Long);
__ z_brne(notLong);
// ltos
__ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset));
__ push_l();
__ z_bru(Done);
__ bind(notLong);
condy_helper(Done);
__ bind(Done);
}
void TemplateTable::condy_helper(Label& Done) {
const Register obj = Z_tmp_1;
const Register off = Z_tmp_2;
const Register flags = Z_ARG1;
const Register rarg = Z_ARG2;
__ load_const_optimized(rarg, (int)bytecode());
call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
__ get_vm_result_2(flags);
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
assert(ConstantPoolCacheEntry::field_index_mask == 0xffff, "or use other instructions");
__ z_llghr(off, flags);
const Address field(obj, off);
// What sort of thing are we loading?
__ z_srl(flags, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask flags for tos_state after the above shift.
ConstantPoolCacheEntry::verify_tos_state_shift();
switch (bytecode()) {
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
{
// tos in (itos, ftos, stos, btos, ctos, ztos)
Label notInt, notFloat, notShort, notByte, notChar, notBool;
__ z_cghi(flags, itos);
__ z_brne(notInt);
// itos
__ z_l(Z_tos, field);
__ push(itos);
__ z_bru(Done);
__ bind(notInt);
__ z_cghi(flags, ftos);
__ z_brne(notFloat);
// ftos
__ z_le(Z_ftos, field);
__ push(ftos);
__ z_bru(Done);
__ bind(notFloat);
__ z_cghi(flags, stos);
__ z_brne(notShort);
// stos
__ z_lh(Z_tos, field);
__ push(stos);
__ z_bru(Done);
__ bind(notShort);
__ z_cghi(flags, btos);
__ z_brne(notByte);
// btos
__ z_lb(Z_tos, field);
__ push(btos);
__ z_bru(Done);
__ bind(notByte);
__ z_cghi(flags, ctos);
__ z_brne(notChar);
// ctos
__ z_llh(Z_tos, field);
__ push(ctos);
__ z_bru(Done);
__ bind(notChar);
__ z_cghi(flags, ztos);
__ z_brne(notBool);
// ztos
__ z_lb(Z_tos, field);
__ push(ztos);
__ z_bru(Done);
__ bind(notBool);
break;
}
case Bytecodes::_ldc2_w:
{
Label notLong, notDouble;
__ z_cghi(flags, ltos);
__ z_brne(notLong);
// ltos
__ z_lg(Z_tos, field);
__ push(ltos);
__ z_bru(Done);
__ bind(notLong);
__ z_cghi(flags, dtos);
__ z_brne(notDouble);
// dtos
__ z_ld(Z_ftos, field);
__ push(dtos);
__ z_bru(Done);
__ bind(notDouble);
break;
}
default:
ShouldNotReachHere();
}
__ stop("bad ldc/condy");
}
void TemplateTable::locals_index(Register reg, int offset) {
__ z_llgc(reg, at_bcp(offset));
__ z_lcgr(reg);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,11 +27,11 @@
// Inline functions for memory copy and fill.
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -47,7 +47,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
}
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -65,23 +65,23 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -97,7 +97,7 @@ static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
}
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -113,12 +113,12 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
}
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
// Do better than this: inline memmove body NEEDS CLEANUP
if (from > to) {
while (count-- > 0) {
@ -135,24 +135,24 @@ static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
}
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {

View File

@ -1411,9 +1411,14 @@ void MacroAssembler::untested(const char* what) {
void MacroAssembler::unimplemented(const char* what) {
char* b = new char[1024];
jio_snprintf(b, 1024, "unimplemented: %s", what);
stop(b);
const char* buf = NULL;
{
ResourceMark rm;
stringStream ss;
ss.print("unimplemented: %s", what);
buf = code_string(ss.as_string());
}
stop(buf);
}

View File

@ -3660,9 +3660,14 @@ void MacroAssembler::os_breakpoint() {
}
void MacroAssembler::unimplemented(const char* what) {
char* b = new char[1024];
jio_snprintf(b, 1024, "unimplemented: %s", what);
stop(b);
const char* buf = NULL;
{
ResourceMark rm;
stringStream ss;
ss.print("unimplemented: %s", what);
buf = code_string(ss.as_string());
}
stop(buf);
}
#ifdef _LP64

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,11 +28,11 @@
// Inline functions for memory copy and fill.
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -49,7 +49,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
}
}
static void pd_disjoint_words_atomic(HeapWord* from,
static void pd_disjoint_words_atomic(const HeapWord* from,
HeapWord* to,
size_t count) {
switch (count) {
@ -70,73 +70,73 @@ static void pd_disjoint_words_atomic(HeapWord* from,
}
}
static void pd_aligned_conjoint_words(HeapWord* from,
static void pd_aligned_conjoint_words(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from,
static void pd_aligned_disjoint_words(const HeapWord* from,
HeapWord* to,
size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
memmove(to, from, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
_Copy_conjoint_jints_atomic(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
_Copy_conjoint_jlongs_atomic(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef _LP64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#else
assert(BytesPerInt == BytesPerOop, "jints and oops must be the same size");
_Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count);
_Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
#endif // _LP64
}
static void pd_arrayof_conjoint_bytes(HeapWord* from,
static void pd_arrayof_conjoint_bytes(const HeapWord* from,
HeapWord* to,
size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from,
static void pd_arrayof_conjoint_jshorts(const HeapWord* from,
HeapWord* to,
size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from,
static void pd_arrayof_conjoint_jints(const HeapWord* from,
HeapWord* to,
size_t count) {
_Copy_arrayof_conjoint_jints(from, to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from,
static void pd_arrayof_conjoint_jlongs(const HeapWord* from,
HeapWord* to,
size_t count) {
_Copy_arrayof_conjoint_jlongs(from, to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from,
static void pd_arrayof_conjoint_oops(const HeapWord* from,
HeapWord* to,
size_t count) {
#ifdef _LP64

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,10 +37,7 @@
notproduct, \
range, \
constraint, \
writeable) \
\
product(bool, UseUTCFileTimestamp, true, \
"Adjust the timestamp returned from stat() to be UTC")
writeable)
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP
#define OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
(void)memmove(to, from, count * HeapWordSize);
#else
@ -70,7 +70,7 @@ static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
@ -108,7 +108,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
@ -132,15 +132,15 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
#endif // AMD64
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_words(from, to, count);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
#ifdef AMD64
(void)memmove(to, from, count);
#else
@ -219,25 +219,25 @@ static void pd_conjoint_bytes(void* from, void* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
#ifdef AMD64
_Copy_conjoint_jints_atomic(from, to, count);
#else
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
// pd_conjoint_words is word-atomic in this implementation.
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#endif // AMD64
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
#ifdef AMD64
_Copy_conjoint_jlongs_atomic(from, to, count);
#else
@ -262,47 +262,47 @@ static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#else
assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size");
// pd_conjoint_words is word-atomic in this implementation.
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
_Copy_arrayof_conjoint_jints(from, to, count);
#else
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
_Copy_arrayof_conjoint_jlongs(from, to, count);
#else
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_arrayof_conjoint_jlongs(from, to, count);
#else
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
#endif // AMD64
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -99,7 +99,7 @@
: "memory", "cc"); \
}
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
@ -108,7 +108,7 @@ static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
_Copy_conjoint_words(from, to, count);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
if (__builtin_constant_p(count)) {
memcpy(to, from, count * sizeof(HeapWord));
return;
@ -121,7 +121,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
_Copy_disjoint_words(from, to, count);
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
@ -130,56 +130,56 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
_Copy_disjoint_words(from, to, count);
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_words(from, to, count);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
_Copy_conjoint_jints_atomic(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
_Copy_conjoint_jlongs_atomic(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jints(from, to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jlongs(from, to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
assert(!UseCompressedOops, "foo!");
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_arrayof_conjoint_jlongs(from, to, count);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
#define OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AARCH64
_Copy_conjoint_words(from, to, count * HeapWordSize);
#else
@ -34,7 +34,7 @@ static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AARCH64
_Copy_disjoint_words(from, to, count * HeapWordSize);
#else
@ -42,27 +42,27 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AARCH64
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_words(from, to, count);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
#ifdef AARCH64
_Copy_conjoint_jshorts_atomic(from, to, count * BytesPerShort);
#else
@ -70,58 +70,58 @@ static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
#endif
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
#ifdef AARCH64
_Copy_conjoint_jints_atomic(from, to, count * BytesPerInt);
#else
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
// pd_conjoint_words is word-atomic in this implementation.
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#endif
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
#ifdef AARCH64
assert(HeapWordSize == BytesPerLong, "64-bit architecture");
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#else
_Copy_conjoint_jlongs_atomic(to, from, count * BytesPerLong);
#endif
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef AARCH64
if (UseCompressedOops) {
assert(BytesPerHeapOop == BytesPerInt, "compressed oops");
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
} else {
assert(BytesPerHeapOop == BytesPerLong, "64-bit architecture");
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
#else
assert(BytesPerHeapOop == BytesPerInt, "32-bit architecture");
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
#endif
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic((void*)from, (void*)to, count);
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic((const void*)from, (void*)to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
#endif // OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP
#define OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
(void)memmove(to, from, count * HeapWordSize);
#else
@ -70,7 +70,7 @@ static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
@ -108,7 +108,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
@ -132,15 +132,15 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
#endif // AMD64
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_words(from, to, count);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
#ifdef AMD64
(void)memmove(to, from, count);
#else
@ -219,25 +219,25 @@ static void pd_conjoint_bytes(void* from, void* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
#ifdef AMD64
_Copy_conjoint_jints_atomic(from, to, count);
#else
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
// pd_conjoint_words is word-atomic in this implementation.
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#endif // AMD64
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
#ifdef AMD64
_Copy_conjoint_jlongs_atomic(from, to, count);
#else
@ -262,47 +262,47 @@ static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#else
assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size");
// pd_conjoint_words is word-atomic in this implementation.
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
_Copy_arrayof_conjoint_jints(from, to, count);
#else
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
_Copy_arrayof_conjoint_jlongs(from, to, count);
#else
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_arrayof_conjoint_jlongs(from, to, count);
#else
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
#endif // AMD64
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,11 @@
#ifndef OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP
#define OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifndef AMD64
(void)memcpy(to, from, count * HeapWordSize);
#else
@ -50,7 +50,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -68,15 +68,15 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
#ifdef AMD64
(void)memmove(to, from, count);
#else
@ -84,53 +84,53 @@ static void pd_conjoint_bytes(void* from, void* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
_Copy_conjoint_jints_atomic(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
// Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't.
_Copy_conjoint_jlongs_atomic(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#else
_Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count);
_Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jints(from, to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
_Copy_arrayof_conjoint_jlongs(from, to, count);
#else
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
#endif // AMD64
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_arrayof_conjoint_jlongs(from, to, count);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,11 @@
#ifndef OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP
#define OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
@ -50,7 +50,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#endif // AMD64
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
@ -68,23 +68,23 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -100,7 +100,7 @@ static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
}
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
@ -116,10 +116,10 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
}
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
#ifdef AMD64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
#else
// Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't.
__asm {
@ -149,7 +149,7 @@ static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#endif // AMD64
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
// Do better than this: inline memmove body NEEDS CLEANUP
if (from > to) {
while (count-- > 0) {
@ -166,7 +166,7 @@ static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
}
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
pd_conjoint_bytes_atomic(from, to, count);
#else
@ -174,20 +174,20 @@ static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count
#endif // AMD64
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
}
#endif // OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,27 +146,23 @@ void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table
cht->init(base_address, _num_entries, _num_buckets,
_compact_buckets->data(), _compact_entries->data());
if (log_is_enabled(Info, cds, hashtables)) {
ResourceMark rm;
LogMessage(cds, hashtables) msg;
stringStream info_stream;
LogMessage(cds, hashtables) msg;
if (msg.is_info()) {
double avg_cost = 0.0;
if (_num_entries > 0) {
avg_cost = double(table_bytes)/double(_num_entries);
}
info_stream.print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
msg.info("Shared %s table stats -------- base: " PTR_FORMAT,
table_name, (intptr_t)base_address);
info_stream.print_cr("Number of entries : %9d", _num_entries);
info_stream.print_cr("Total bytes used : %9d", table_bytes);
info_stream.print_cr("Average bytes per entry : %9.3f", avg_cost);
info_stream.print_cr("Average bucket size : %9.3f", summary.avg());
info_stream.print_cr("Variance of bucket size : %9.3f", summary.variance());
info_stream.print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
info_stream.print_cr("Empty buckets : %9d", _num_empty_buckets);
info_stream.print_cr("Value_Only buckets : %9d", _num_value_only_buckets);
info_stream.print_cr("Other buckets : %9d", _num_other_buckets);
msg.info("%s", info_stream.as_string());
msg.info("Number of entries : %9d", _num_entries);
msg.info("Total bytes used : %9d", table_bytes);
msg.info("Average bytes per entry : %9.3f", avg_cost);
msg.info("Average bucket size : %9.3f", summary.avg());
msg.info("Variance of bucket size : %9.3f", summary.variance());
msg.info("Std. dev. of bucket size: %9.3f", summary.sd());
msg.info("Empty buckets : %9d", _num_empty_buckets);
msg.info("Value_Only buckets : %9d", _num_value_only_buckets);
msg.info("Other buckets : %9d", _num_other_buckets);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -439,11 +439,11 @@ void CompiledMethod::increase_unloading_clock() {
}
void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
OrderAccess::release_store(&_unloading_clock, unloading_clock);
}
unsigned char CompiledMethod::unloading_clock() {
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
return OrderAccess::load_acquire(&_unloading_clock);
}
// Processing of oop references should have been sufficient to keep

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -270,5 +270,5 @@ bool DependencyContext::find_stale_entries() {
#endif //PRODUCT
int nmethodBucket::decrement() {
return Atomic::add(-1, (volatile int *)&_count);
return Atomic::sub(1, &_count);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,10 +75,6 @@ public:
// supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause);
bool card_mark_must_follow_store() const {
return true;
}
void stop();
void safepoint_synchronize_begin();
void safepoint_synchronize_end();

View File

@ -138,7 +138,7 @@ void CollectionSetChooser::sort_regions() {
G1PrintRegionLivenessInfoClosure cl("Post-Sorting");
for (uint i = 0; i < _end; ++i) {
HeapRegion* r = regions_at(i);
cl.doHeapRegion(r);
cl.do_heap_region(r);
}
}
verify();
@ -220,7 +220,7 @@ public:
_g1h(G1CollectedHeap::heap()),
_cset_updater(hrSorted, true /* parallel */, chunk_size) { }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC

View File

@ -134,9 +134,6 @@ void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
_old_is_full = true;
}
G1PLAB::G1PLAB(size_t gclab_word_size) :
PLAB(gclab_word_size), _retired(true) { }
size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
@ -253,7 +250,7 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
if ((required_in_plab <= plab_word_size) &&
may_throw_away_buffer(required_in_plab, plab_word_size)) {
G1PLAB* alloc_buf = alloc_buffer(dest, context);
PLAB* alloc_buf = alloc_buffer(dest, context);
alloc_buf->retire();
size_t actual_plab_size = 0;
@ -304,7 +301,7 @@ G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
void G1DefaultPLABAllocator::flush_and_retire_stats() {
for (uint state = 0; state < InCSetState::Num; state++) {
G1PLAB* const buf = _alloc_buffers[state];
PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
buf->flush_and_retire_stats(stats);
@ -318,7 +315,7 @@ void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
wasted = 0;
undo_wasted = 0;
for (uint state = 0; state < InCSetState::Num; state++) {
G1PLAB * const buf = _alloc_buffers[state];
PLAB * const buf = _alloc_buffers[state];
if (buf != NULL) {
wasted += buf->waste();
undo_wasted += buf->undo_waste();

View File

@ -178,39 +178,6 @@ public:
}
};
class G1PLAB: public PLAB {
private:
bool _retired;
public:
G1PLAB(size_t gclab_word_size);
virtual ~G1PLAB() {
guarantee(_retired, "Allocation buffer has not been retired");
}
// The amount of space in words wasted within the PLAB including
// waste due to refills and alignment.
size_t wasted() const { return _wasted; }
virtual void set_buf(HeapWord* buf, size_t word_size) {
PLAB::set_buf(buf, word_size);
_retired = false;
}
virtual void retire() {
if (_retired) {
return;
}
PLAB::retire();
_retired = true;
}
virtual void flush_and_retire_stats(PLABStats* stats) {
PLAB::flush_and_retire_stats(stats);
_retired = true;
}
};
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
// statistics.
@ -231,7 +198,7 @@ protected:
size_t _direct_allocated[InCSetState::Num];
virtual void flush_and_retire_stats() = 0;
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
@ -292,14 +259,14 @@ public:
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
// and old generation allocation.
class G1DefaultPLABAllocator : public G1PLABAllocator {
G1PLAB _surviving_alloc_buffer;
G1PLAB _tenured_alloc_buffer;
G1PLAB* _alloc_buffers[InCSetState::Num];
PLAB _surviving_alloc_buffer;
PLAB _tenured_alloc_buffer;
PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultPLABAllocator(G1Allocator* _allocator);
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
assert(_alloc_buffers[dest.value()] != NULL,

View File

@ -47,7 +47,7 @@ HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationCont
inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
G1PLAB* buffer = alloc_buffer(dest, context);
PLAB* buffer = alloc_buffer(dest, context);
if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
return buffer->allocate(word_sz);
} else {

View File

@ -144,7 +144,7 @@ class G1CardCountsClearClosure : public HeapRegionClosure {
HeapRegionClosure(), _card_counts(card_counts) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
_card_counts->clear_region(r);
return false;
}

View File

@ -285,7 +285,7 @@ class G1CreateCardLiveDataTask: public AbstractGangTask {
_mark_bitmap(mark_bitmap),
_cm(cm) { }
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr);
if (marked_bytes > 0) {
hr->add_to_marked_bytes(marked_bytes);
@ -352,7 +352,7 @@ class G1FinalizeCardLiveDataTask: public AbstractGangTask {
_helper(live_data, g1h->reserved_region().start()),
_gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { }
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
if (has_been_reclaimed(hr)) {
_helper.reset_live_data(hr);
}
@ -478,7 +478,7 @@ class G1VerifyCardLiveDataTask: public AbstractGangTask {
int failures() const { return _failures; }
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
int failures = 0;
// Walk the marking bitmap for this region and set the corresponding bits

View File

@ -1010,7 +1010,7 @@ class PostCompactionPrinterClosure: public HeapRegionClosure {
private:
G1HRPrinter* _hr_printer;
public:
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
assert(!hr->is_young(), "not expecting to find young regions");
_hr_printer->post_compaction(hr);
return false;
@ -1573,7 +1573,6 @@ jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
}
jint G1CollectedHeap::initialize() {
CollectedHeap::pre_initialize();
os::enable_vtime();
// Necessary to satisfy locking discipline assertions.
@ -1917,7 +1916,7 @@ public:
CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
_gc_time_stamp(gc_time_stamp), _failures(false) { }
virtual bool doHeapRegion(HeapRegion* hr) {
virtual bool do_heap_region(HeapRegion* hr) {
unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
if (_gc_time_stamp != region_gc_time_stamp) {
log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
@ -1969,7 +1968,7 @@ class SumUsedClosure: public HeapRegionClosure {
size_t _used;
public:
SumUsedClosure() : _used(0) {}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
_used += r->used();
return false;
}
@ -2188,7 +2187,7 @@ class IterateObjectClosureRegionClosure: public HeapRegionClosure {
ObjectClosure* _cl;
public:
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (!r->is_continues_humongous()) {
r->object_iterate(_cl);
}
@ -2303,7 +2302,7 @@ class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
PrintRegionClosure(outputStream* st) : _st(st) {}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
r->print_on(_st);
return false;
}
@ -2422,7 +2421,7 @@ private:
size_t _occupied_sum;
public:
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
size_t occupied = hrrs->occupied();
_occupied_sum += occupied;
@ -2669,7 +2668,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
_dcq(&JavaThread::dirty_card_queue_set()) {
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
if (!r->is_starts_humongous()) {
return false;
}
@ -2745,7 +2744,7 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
class VerifyRegionRemSetClosure : public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
if (!hr->is_archive() && !hr->is_continues_humongous()) {
hr->verify_rem_set();
}
@ -2815,7 +2814,7 @@ private:
public:
G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
_hr_printer->cset(r);
return false;
}
@ -4505,7 +4504,7 @@ private:
_local_free_list("Local Region List for CSet Freeing") {
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
@ -4628,7 +4627,7 @@ private:
public:
G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
_work_items[_cur_idx++] = WorkItem(r);
return false;
}
@ -4762,7 +4761,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
_free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
if (!r->is_starts_humongous()) {
return false;
}
@ -4897,7 +4896,7 @@ void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
class G1AbandonCollectionSetClosure : public HeapRegionClosure {
public:
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
G1CollectedHeap::heap()->clear_in_cset(r);
r->set_young_index_in_cset(-1);
@ -4967,7 +4966,7 @@ private:
bool _success;
public:
NoYoungRegionsClosure() : _success(true) { }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (r->is_young()) {
log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
p2i(r->bottom()), p2i(r->end()));
@ -4997,7 +4996,7 @@ private:
public:
TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (r->is_old()) {
_old_set->remove(r);
} else if(r->is_young()) {
@ -5065,7 +5064,7 @@ public:
}
}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (r->is_empty()) {
// Add free regions to the free list
r->set_free();

View File

@ -1197,7 +1197,7 @@ public:
}
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
// iteration early if the "do_heap_region" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid.
@ -1272,36 +1272,8 @@ public:
size_t max_tlab_size() const;
size_t unsafe_max_tlab_alloc(Thread* ignored) const;
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint. If such permission
// is granted for this heap type, the compiler promises to call
// defer_store_barrier() below on any slow path allocation of
// a new object for which such initializing store barriers will
// have been elided. G1, like CMS, allows this, but should be
// ready to provide a compensating write barrier as necessary
// if that storage came out of a non-young region. The efficiency
// of this implementation depends crucially on being able to
// answer very efficiently in constant time whether a piece of
// storage in the heap comes from a young region or not.
// See ReduceInitialCardMarks.
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
virtual bool card_mark_must_follow_store() const {
return true;
}
inline bool is_in_young(const oop obj);
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
// Returns "true" iff the given word_size is "very large".
static bool is_humongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -241,15 +241,6 @@ inline bool G1CollectedHeap::is_in_young(const oop obj) {
return heap_region_containing(obj)->is_young();
}
// We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set
// update logging post-barrier, we don't maintain remembered set
// information for young gen objects.
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
if (obj == NULL) {
return false;

View File

@ -186,9 +186,9 @@ void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint t
do {
HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
bool result = cl->doHeapRegion(r);
bool result = cl->do_heap_region(r);
if (result) {
cl->incomplete();
cl->set_incomplete();
return;
}
cur_pos++;
@ -292,7 +292,7 @@ public:
public:
G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
SurvRateGroup* group = r->surv_rate_group();
@ -332,7 +332,7 @@ class G1PrintCollectionSetClosure : public HeapRegionClosure {
public:
G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
_st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(r),
@ -524,7 +524,7 @@ public:
FREE_C_HEAP_ARRAY(int, _heap_region_indices);
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
const int idx = r->young_index_in_cset();
assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());

View File

@ -591,7 +591,7 @@ private:
G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
HeapWord* cur = r->bottom();
@ -638,7 +638,7 @@ public:
}
bool is_complete() {
return _cl.complete();
return _cl.is_complete();
}
};
@ -694,7 +694,7 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
}
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
// This closure can be called concurrently to the mutator, so we must make sure
// that the result of the getNextMarkedWordAddress() call is compared to the
// value passed to it as limit to detect any found bits.
@ -707,12 +707,12 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
CheckBitmapClearHRClosure cl(_next_mark_bitmap);
_g1h->heap_region_iterate(&cl);
return cl.complete();
return cl.is_complete();
}
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
r->note_start_of_marking();
return false;
}
@ -1094,7 +1094,7 @@ public:
const uint old_regions_removed() { return _old_regions_removed; }
const uint humongous_regions_removed() { return _humongous_regions_removed; }
bool doHeapRegion(HeapRegion *hr) {
bool do_heap_region(HeapRegion *hr) {
_g1->reset_gc_time_stamps(hr);
hr->note_end_of_marking();
@ -1135,7 +1135,7 @@ public:
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
&hrrs_cleanup_task);
_g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
assert(g1_note_end.complete(), "Shouldn't have yielded!");
assert(g1_note_end.is_complete(), "Shouldn't have yielded!");
// Now update the lists
_g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
@ -2922,7 +2922,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p
"(bytes)", "(bytes)");
}
bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
const char* type = r->get_type_str();
HeapWord* bottom = r->bottom();
HeapWord* end = r->end();

View File

@ -848,7 +848,7 @@ public:
// The header and footer are printed in the constructor and
// destructor respectively.
G1PrintRegionLivenessInfoClosure(const char* phase_name);
virtual bool doHeapRegion(HeapRegion* r);
virtual bool do_heap_region(HeapRegion* r);
~G1PrintRegionLivenessInfoClosure();
};

View File

@ -220,7 +220,7 @@ public:
return rspc.marked_bytes();
}
bool doHeapRegion(HeapRegion *hr) {
bool do_heap_region(HeapRegion *hr) {
assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
assert(hr->in_collection_set(), "bad CS");

View File

@ -56,7 +56,7 @@ class G1AdjustRegionClosure : public HeapRegionClosure {
_bitmap(bitmap),
_worker_id(worker_id) { }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
G1AdjustAndRebuildClosure cl(_worker_id);
if (r->is_humongous()) {
oop obj = oop(r->humongous_start_region()->bottom());

View File

@ -40,7 +40,7 @@ public:
G1ResetHumongousClosure(G1CMBitMap* bitmap) :
_bitmap(bitmap) { }
bool doHeapRegion(HeapRegion* current) {
bool do_heap_region(HeapRegion* current) {
if (current->is_humongous()) {
if (current->is_starts_humongous()) {
oop obj = oop(current->bottom());

View File

@ -37,7 +37,7 @@
#include "logging/log.hpp"
#include "utilities/ticks.inline.hpp"
bool G1FullGCPrepareTask::G1CalculatePointersClosure::doHeapRegion(HeapRegion* hr) {
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
if (hr->is_humongous()) {
oop obj = oop(hr->humongous_start_region()->bottom());
if (_bitmap->is_marked(obj)) {

View File

@ -67,7 +67,7 @@ protected:
G1FullGCCompactionPoint* cp);
void update_sets();
bool doHeapRegion(HeapRegion* hr);
bool do_heap_region(HeapRegion* hr);
bool freed_regions();
};

View File

@ -58,7 +58,7 @@ struct DetailedUsage : public StackObj {
class DetailedUsageClosure: public HeapRegionClosure {
public:
DetailedUsage _usage;
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (r->is_old()) {
_usage._old_used += r->used();
_usage._old_region_count++;

View File

@ -273,7 +273,7 @@ private:
G1CollectedHeap* _g1h;
public:
VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_archive()) {
VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
r->object_iterate(&verify_oop_pointers);
@ -306,7 +306,7 @@ public:
return _failures;
}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
// For archive regions, verify there are no heap pointers to
// non-pinned regions. For all others, verify liveness info.
if (r->is_closed_archive()) {
@ -498,7 +498,7 @@ public:
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
bool do_heap_region(HeapRegion* hr) {
if (hr->is_young()) {
// TODO
} else if (hr->is_humongous()) {
@ -608,7 +608,7 @@ class G1VerifyCardTableCleanup: public HeapRegionClosure {
public:
G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
: _verifier(verifier), _ct_bs(ct_bs) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_survivor()) {
_verifier->verify_dirty_region(r);
} else {
@ -654,7 +654,7 @@ private:
G1HeapVerifier* _verifier;
public:
G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
_verifier->verify_dirty_region(r);
return false;
}
@ -721,7 +721,7 @@ public:
bool failures() { return _failures; }
virtual bool doHeapRegion(HeapRegion* hr) {
virtual bool do_heap_region(HeapRegion* hr) {
bool result = _verifier->verify_bitmaps(_caller, hr);
if (!result) {
_failures = true;
@ -744,7 +744,7 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
public:
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
virtual bool doHeapRegion(HeapRegion* hr) {
virtual bool do_heap_region(HeapRegion* hr) {
uint i = hr->hrm_index();
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
if (hr->is_humongous()) {

View File

@ -206,7 +206,7 @@ void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr,
const AllocationContext_t context) const {
G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
if (alloc_buf->contains(obj_ptr)) {
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
dest_state.value() == InCSetState::Old,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,7 @@ private:
public:
G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
uint hrm_index = r->hrm_index();
if (!r->in_collection_set() && r->is_old_or_humongous()) {
_scan_top[hrm_index] = r->top();
@ -204,7 +204,7 @@ public:
if (_iter_states[region] != Unclaimed) {
return false;
}
jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed);
G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed);
return (res == Unclaimed);
}
@ -214,7 +214,7 @@ public:
if (iter_is_complete(region)) {
return false;
}
jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed);
G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed);
return (res == Claimed);
}
@ -349,7 +349,7 @@ void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_i
_scan_state->add_dirty_region(region_idx_for_card);
}
bool G1ScanRSForRegionClosure::doHeapRegion(HeapRegion* r) {
bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "should only be called on elements of CS.");
uint region_idx = r->hrm_index();
@ -522,7 +522,7 @@ public:
_g1h(G1CollectedHeap::heap()),
_live_data(live_data) { }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (!r->is_continues_humongous()) {
r->rem_set()->scrub(_live_data);
}

View File

@ -176,7 +176,7 @@ public:
CodeBlobClosure* code_root_cl,
uint worker_i);
bool doHeapRegion(HeapRegion* r);
bool do_heap_region(HeapRegion* r);
double strong_code_root_scan_time_sec() {
return _strong_code_root_scan_time_sec;

View File

@ -252,7 +252,7 @@ public:
_max_rs_mem_sz(0), _max_code_root_mem_sz(0)
{}
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
// HeapRegionRemSet::mem_size() includes the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,6 +131,7 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
}
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
initialize_deferred_card_mark_barriers();
mapper->set_mapping_changed_listener(&_listener);
_byte_map_size = mapper->reserved().byte_size();
@ -213,3 +214,14 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
}
}
}
bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
volatile jbyte* p = byte_for((void*)obj);
return *p == g1_young_card_val();
}
void G1SATBCardTableLoggingModRefBS::flush_deferred_barriers(JavaThread* thread) {
CardTableModRefBS::flush_deferred_barriers(thread);
thread->satb_mark_queue().flush();
thread->dirty_card_queue().flush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -92,6 +92,8 @@ public:
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
virtual bool is_in_young(oop obj) const;
};
template<>
@ -145,13 +147,19 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
// above no longer applies.
void invalidate(MemRegion mr);
void write_region_work(MemRegion mr) { invalidate(mr); }
void write_region(MemRegion mr) { invalidate(mr); }
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
template <DecoratorSet decorators, typename T>
void write_ref_field_post(T* field, oop new_val);
void write_ref_field_post_slow(volatile jbyte* byte);
virtual void flush_deferred_barriers(JavaThread* thread);
virtual bool card_mark_must_follow_store() const {
return true;
}
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -30,7 +30,7 @@
template <DecoratorSet decorators, typename T>
inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
if (HasDecorator<decorators, ARRAYCOPY_DEST_NOT_INITIALIZED>::value ||
if (HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value ||
HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
return;
}

View File

@ -79,7 +79,7 @@ public:
G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
virtual bool doHeapRegion(HeapRegion* r) {
virtual bool do_heap_region(HeapRegion* r) {
size_t rs_length = r->rem_set()->occupied();
_sampled_rs_lengths += rs_length;
@ -114,7 +114,7 @@ void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
G1CollectionSet* g1cs = g1h->collection_set();
g1cs->iterate(&cl);
if (cl.complete()) {
if (cl.is_complete()) {
g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
}
}

View File

@ -719,23 +719,23 @@ class HeapRegion: public G1ContiguousSpace {
};
// HeapRegionClosure is used for iterating over regions.
// Terminates the iteration when the "doHeapRegion" method returns "true".
// Terminates the iteration when the "do_heap_region" method returns "true".
class HeapRegionClosure : public StackObj {
friend class HeapRegionManager;
friend class G1CollectionSet;
bool _complete;
void incomplete() { _complete = false; }
bool _is_complete;
void set_incomplete() { _is_complete = false; }
public:
HeapRegionClosure(): _complete(true) {}
HeapRegionClosure(): _is_complete(true) {}
// Typically called on each region until it returns true.
virtual bool doHeapRegion(HeapRegion* r) = 0;
virtual bool do_heap_region(HeapRegion* r) = 0;
// True after iteration if the closure was applied to all heap regions
// and returned "false" in all cases.
bool complete() { return _complete; }
bool is_complete() { return _is_complete; }
};
#endif // SHARE_VM_GC_G1_HEAPREGION_HPP

View File

@ -242,9 +242,9 @@ void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
continue;
}
guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
bool res = blk->doHeapRegion(at(i));
bool res = blk->do_heap_region(at(i));
if (res) {
blk->incomplete();
blk->set_incomplete();
return;
}
}
@ -353,7 +353,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* h
if (!hrclaimer->claim_region(index)) {
continue;
}
bool res = blk->doHeapRegion(r);
bool res = blk->do_heap_region(r);
if (res) {
return;
}

View File

@ -236,8 +236,8 @@ public:
// and not free, and return the number of regions newly committed in commit_count.
bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
// Apply blk->do_heap_region() on all committed regions in address order,
// terminating the iteration early if do_heap_region() returns true.
void iterate(HeapRegionClosure* blk) const;
void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psTasks.hpp"
@ -677,3 +677,7 @@ HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
}
return min_start;
}
bool CardTableExtension::is_in_young(oop obj) const {
return ParallelScavengeHeap::heap()->is_in_young(obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -108,6 +108,13 @@ class CardTableExtension : public CardTableModRefBS {
}
#endif // ASSERT
// ReduceInitialCardMarks support
virtual bool is_in_young(oop obj) const;
virtual bool card_mark_must_follow_store() const {
return false;
}
};
template<>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,8 +57,6 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize();
const size_t heap_size = _collector_policy->max_heap_byte_size();
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
@ -490,13 +488,6 @@ void ParallelScavengeHeap::resize_all_tlabs() {
CollectedHeap::resize_all_tlabs();
}
bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein.
return is_in_young(new_obj);
}
// This method is used by System.gc() and JVMTI.
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(),
@ -719,4 +710,3 @@ GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
memory_pools.append(_old_pool);
return memory_pools;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -205,21 +205,6 @@ class ParallelScavengeHeap : public CollectedHeap {
size_t tlab_used(Thread* thr) const;
size_t unsafe_max_tlab_alloc(Thread* thr) const;
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint.
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
virtual bool card_mark_must_follow_store() const {
return false;
}
// Return true if we don't we need a store barrier for
// initializing stores to an object at this address.
virtual bool can_elide_initializing_store_barrier(oop new_obj);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -538,7 +538,7 @@ inline void ParallelCompactData::RegionData::decrement_destination_count()
{
assert(_dc_and_los < dc_claimed, "already claimed");
assert(_dc_and_los >= dc_one, "count would go negative");
Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
Atomic::add(dc_mask, &_dc_and_los);
}
inline HeapWord* ParallelCompactData::RegionData::data_location() const
@ -578,7 +578,7 @@ inline bool ParallelCompactData::RegionData::claim_unsafe()
inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
{
assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
Atomic::add((int) words, (volatile int*) &_dc_and_los);
Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
}
inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,10 +61,6 @@ public:
virtual bool is_in_closed_subset(const void* p) const {
return is_in(p);
}
virtual bool card_mark_must_follow_store() const {
return false;
}
};
#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP

View File

@ -32,6 +32,8 @@
#include "oops/oopsHierarchy.hpp"
#include "utilities/fakeRttiSupport.hpp"
class JavaThread;
// This class provides the interface between a barrier implementation and
// the rest of the system.
@ -107,18 +109,18 @@ public:
static void static_write_ref_array_pre(HeapWord* start, size_t count);
static void static_write_ref_array_post(HeapWord* start, size_t count);
// Support for optimizing compilers to call the barrier set on slow path allocations
// that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
// The allocation is safe to use iff it returns true. If not, the slow-path allocation
// is redone until it succeeds. This can e.g. prevent allocations from the slow path
// to be in old.
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
virtual void flush_deferred_barriers(JavaThread* thread) {}
virtual void make_parsable(JavaThread* thread) {}
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
// (For efficiency reasons, this operation is specialized for certain
// barrier types. Semantically, it should be thought of as a call to the
// virtual "_work" function below, which must implement the barrier.)
void write_region(MemRegion mr);
protected:
virtual void write_region_work(MemRegion mr) = 0;
public:
// Inform the BarrierSet that the the covered heap region that starts
// with "base" has been changed to have the given size (possibly from 0,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,4 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
inline void BarrierSet::write_region(MemRegion mr) {
write_region_work(mr);
}
#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "logging/log.hpp"
#include "memory/virtualspace.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/thread.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@ -61,7 +62,8 @@ CardTableModRefBS::CardTableModRefBS(
_committed(NULL),
_cur_covered_regions(0),
_byte_map(NULL),
byte_map_base(NULL)
byte_map_base(NULL),
_defer_initial_card_mark(false)
{
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
@ -75,6 +77,7 @@ CardTableModRefBS::CardTableModRefBS(
}
void CardTableModRefBS::initialize() {
initialize_deferred_card_mark_barriers();
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
@ -521,3 +524,112 @@ void CardTableModRefBS::print_on(outputStream* st) const {
st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
}
// Helper for ReduceInitialCardMarks. For performance,
// compiled code may elide card-marks for initializing stores
// to a newly allocated object along the fast-path. We
// compensate for such elided card-marks as follows:
// (a) Generational, non-concurrent collectors, such as
// GenCollectedHeap(ParNew,DefNew,Tenured) and
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
// need the card-mark if and only if the region is
// in the old gen, and do not care if the card-mark
// succeeds or precedes the initializing stores themselves,
// so long as the card-mark is completed before the next
// scavenge. For all these cases, we can do a card mark
// at the point at which we do a slow path allocation
// in the old gen, i.e. in this call.
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
// in addition that the card-mark for an old gen allocated
// object strictly follow any associated initializing stores.
// In these cases, the memRegion remembered below is
// used to card-mark the entire region either just before the next
// slow-path allocation by this thread or just before the next scavenge or
// CMS-associated safepoint, whichever of these events happens first.
// (The implicit assumption is that the object has been fully
// initialized by this point, a fact that we assert when doing the
// card-mark.)
// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
// is used to remember the pre-value of any store. Initializing
// stores will not need this barrier, so we need not worry about
// compensating for the missing pre-barrier here. Turning now
// to the post-barrier, we note that G1 needs a RS update barrier
// which simply enqueues a (sequence of) dirty cards which may
// optionally be refined by the concurrent update threads. Note
// that this barrier need only be applied to a non-young write,
// but, like in CMS, because of the presence of concurrent refinement
// (much like CMS' precleaning), must strictly follow the oop-store.
// Thus, using the same protocol for maintaining the intended
// invariants turns out, serendepitously, to be the same for both
// G1 and CMS.
//
// For any future collector, this code should be reexamined with
// that specific collector in mind, and the documentation above suitably
// extended and updated.
void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
if (!ReduceInitialCardMarks) {
return;
}
// If a previous card-mark was deferred, flush it now.
flush_deferred_card_mark_barrier(thread);
if (new_obj->is_typeArray() || is_in_young(new_obj)) {
// Arrays of non-references don't need a post-barrier.
// The deferred_card_mark region should be empty
// following the flush above.
assert(thread->deferred_card_mark().is_empty(), "Error");
} else {
MemRegion mr((HeapWord*)new_obj, new_obj->size());
assert(!mr.is_empty(), "Error");
if (_defer_initial_card_mark) {
// Defer the card mark
thread->set_deferred_card_mark(mr);
} else {
// Do the card mark
write_region(mr);
}
}
}
void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
// Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
// otherwise remains unused.
#if defined(COMPILER2) || INCLUDE_JVMCI
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
&& (DeferInitialCardMark || card_mark_must_follow_store());
#else
assert(_defer_initial_card_mark == false, "Who would set it?");
#endif
}
void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
#if defined(COMPILER2) || INCLUDE_JVMCI
MemRegion deferred = thread->deferred_card_mark();
if (!deferred.is_empty()) {
assert(_defer_initial_card_mark, "Otherwise should be empty");
{
// Verify that the storage points to a parsable object in heap
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
assert(!is_in_young(old_obj),
"Else should have been filtered in on_slowpath_allocation_exit()");
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
assert(deferred.word_size() == (size_t)(old_obj->size()),
"Mismatch: multiple objects?");
}
write_region(deferred);
// "Clear" the deferred_card_mark field
thread->set_deferred_card_mark(MemRegion());
}
assert(thread->deferred_card_mark().is_empty(), "invariant");
#else
assert(!_defer_initial_card_mark, "Should be false");
assert(thread->deferred_card_mark().is_empty(), "Should be empty");
#endif
}
void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) {
// The deferred store barriers must all have been flushed to the
// card-table (or other remembered set structure) before GC starts
// processing the card-table (or other remembered set).
flush_deferred_card_mark_barrier(thread);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,10 @@ class CardTableModRefBS: public ModRefBarrierSet {
CT_MR_BS_last_reserved = 16
};
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used
bool _defer_initial_card_mark;
// a word's worth (row) of clean card values
static const intptr_t clean_card_row = (intptr_t)(-1);
@ -180,8 +184,8 @@ class CardTableModRefBS: public ModRefBarrierSet {
CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
~CardTableModRefBS();
protected:
void write_region_work(MemRegion mr) {
public:
void write_region(MemRegion mr) {
dirty_MemRegion(mr);
}
@ -314,6 +318,49 @@ class CardTableModRefBS: public ModRefBarrierSet {
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
// ReduceInitialCardMarks
void initialize_deferred_card_mark_barriers();
// If the CollectedHeap was asked to defer a store barrier above,
// this informs it to flush such a deferred store barrier to the
// remembered set.
void flush_deferred_card_mark_barrier(JavaThread* thread);
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint. If such permission
// is granted for this heap type, the compiler promises to call
// defer_store_barrier() below on any slow path allocation of
// a new object for which such initializing store barriers will
// have been elided. G1, like CMS, allows this, but should be
// ready to provide a compensating write barrier as necessary
// if that storage came out of a non-young region. The efficiency
// of this implementation depends crucially on being able to
// answer very efficiently in constant time whether a piece of
// storage in the heap comes from a young region or not.
// See ReduceInitialCardMarks.
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
// If a compiler is eliding store barriers for TLAB-allocated objects,
// we will be informed of a slow-path allocation by a call
// to on_slowpath_allocation_exit() below. Such a call precedes the
// initialization of the object itself, and no post-store-barriers will
// be issued. Some heap types require that the barrier strictly follows
// the initializing stores. (This is currently implemented by deferring the
// barrier until the next slow-path allocation or gc-related safepoint.)
// This interface answers whether a particular barrier type needs the card
// mark to be thus strictly sequenced after the stores.
virtual bool card_mark_must_follow_store() const = 0;
virtual bool is_in_young(oop obj) const = 0;
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
virtual void flush_deferred_barriers(JavaThread* thread);
virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,3 +121,6 @@ void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
}
}
bool CardTableModRefBSForCTRS::is_in_young(oop obj) const {
return GenCollectedHeap::heap()->is_in_young(obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,12 @@ public:
void set_CTRS(CardTableRS* rs) { _rs = rs; }
virtual bool card_mark_must_follow_store() const {
return UseConcMarkSweepGC;
}
virtual bool is_in_young(oop obj) const;
private:
CardTableRS* _rs;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -177,8 +177,7 @@ CollectedHeap::CollectedHeap() :
_total_collections(0),
_total_full_collections(0),
_gc_cause(GCCause::_no_gc),
_gc_lastcause(GCCause::_no_gc),
_defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
_gc_lastcause(GCCause::_no_gc)
{
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint);
@ -239,17 +238,6 @@ void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
BarrierSet::set_bs(barrier_set);
}
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
// otherwise remains unused.
#if COMPILER2_OR_JVMCI
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
&& (DeferInitialCardMark || card_mark_must_follow_store());
#else
assert(_defer_initial_card_mark == false, "Who would set it?");
#endif
}
#ifndef PRODUCT
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
@ -333,28 +321,6 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
return obj;
}
void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
MemRegion deferred = thread->deferred_card_mark();
if (!deferred.is_empty()) {
assert(_defer_initial_card_mark, "Otherwise should be empty");
{
// Verify that the storage points to a parsable object in heap
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
assert(is_in(old_obj), "Not in allocated heap");
assert(!can_elide_initializing_store_barrier(old_obj),
"Else should have been filtered in new_store_pre_barrier()");
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
assert(deferred.word_size() == (size_t)(old_obj->size()),
"Mismatch: multiple objects?");
}
BarrierSet* bs = barrier_set();
bs->write_region(deferred);
// "Clear" the deferred_card_mark field
thread->set_deferred_card_mark(MemRegion());
}
assert(thread->deferred_card_mark().is_empty(), "invariant");
}
size_t CollectedHeap::max_tlab_size() const {
// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
// This restriction could be removed by enabling filling with multiple arrays.
@ -370,72 +336,6 @@ size_t CollectedHeap::max_tlab_size() const {
return align_down(max_int_size, MinObjAlignment);
}
// Helper for ReduceInitialCardMarks. For performance,
// compiled code may elide card-marks for initializing stores
// to a newly allocated object along the fast-path. We
// compensate for such elided card-marks as follows:
// (a) Generational, non-concurrent collectors, such as
// GenCollectedHeap(ParNew,DefNew,Tenured) and
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
// need the card-mark if and only if the region is
// in the old gen, and do not care if the card-mark
// succeeds or precedes the initializing stores themselves,
// so long as the card-mark is completed before the next
// scavenge. For all these cases, we can do a card mark
// at the point at which we do a slow path allocation
// in the old gen, i.e. in this call.
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
// in addition that the card-mark for an old gen allocated
// object strictly follow any associated initializing stores.
// In these cases, the memRegion remembered below is
// used to card-mark the entire region either just before the next
// slow-path allocation by this thread or just before the next scavenge or
// CMS-associated safepoint, whichever of these events happens first.
// (The implicit assumption is that the object has been fully
// initialized by this point, a fact that we assert when doing the
// card-mark.)
// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
// is used to remember the pre-value of any store. Initializing
// stores will not need this barrier, so we need not worry about
// compensating for the missing pre-barrier here. Turning now
// to the post-barrier, we note that G1 needs a RS update barrier
// which simply enqueues a (sequence of) dirty cards which may
// optionally be refined by the concurrent update threads. Note
// that this barrier need only be applied to a non-young write,
// but, like in CMS, because of the presence of concurrent refinement
// (much like CMS' precleaning), must strictly follow the oop-store.
// Thus, using the same protocol for maintaining the intended
// invariants turns out, serendepitously, to be the same for both
// G1 and CMS.
//
// For any future collector, this code should be reexamined with
// that specific collector in mind, and the documentation above suitably
// extended and updated.
oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
// If a previous card-mark was deferred, flush it now.
flush_deferred_store_barrier(thread);
if (can_elide_initializing_store_barrier(new_obj) ||
new_obj->is_typeArray()) {
// Arrays of non-references don't need a pre-barrier.
// The deferred_card_mark region should be empty
// following the flush above.
assert(thread->deferred_card_mark().is_empty(), "Error");
} else {
MemRegion mr((HeapWord*)new_obj, new_obj->size());
assert(!mr.is_empty(), "Error");
if (_defer_initial_card_mark) {
// Defer the card mark
thread->set_deferred_card_mark(mr);
} else {
// Do the card mark
BarrierSet* bs = barrier_set();
bs->write_region(mr);
}
}
return new_obj;
}
size_t CollectedHeap::filler_array_hdr_size() {
return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
}
@ -538,24 +438,16 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
" otherwise concurrent mutator activity may make heap "
" unparsable again");
const bool use_tlab = UseTLAB;
const bool deferred = _defer_initial_card_mark;
// The main thread starts allocating via a TLAB even before it
// has added itself to the threads list at vm boot-up.
JavaThreadIteratorWithHandle jtiwh;
assert(!use_tlab || jtiwh.length() > 0,
"Attempt to fill tlabs before main thread has been added"
" to threads list is doomed to failure!");
BarrierSet *bs = barrier_set();
for (; JavaThread *thread = jtiwh.next(); ) {
if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
#if COMPILER2_OR_JVMCI
// The deferred store barriers must all have been flushed to the
// card-table (or other remembered set structure) before GC starts
// processing the card-table (or other remembered set).
if (deferred) flush_deferred_store_barrier(thread);
#else
assert(!deferred, "Should be false");
assert(thread->deferred_card_mark().is_empty(), "Should be empty");
#endif
bs->make_parsable(thread);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -101,10 +101,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
GCHeapLog* _gc_heap_log;
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used
bool _defer_initial_card_mark;
MemRegion _reserved;
protected:
@ -129,13 +125,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Constructor
CollectedHeap();
// Do common initializations that must follow instance construction,
// for example, those needing virtual calls.
// This code could perhaps be moved into initialize() but would
// be slightly more awkward because we want the latter to be a
// pure virtual.
void pre_initialize();
// Create a new tlab. All TLAB allocations must go through this.
virtual HeapWord* allocate_new_tlab(size_t size);
@ -408,45 +397,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
return 0;
}
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint. If such permission
// is granted for this heap type, the compiler promises to call
// defer_store_barrier() below on any slow path allocation of
// a new object for which such initializing store barriers will
// have been elided.
virtual bool can_elide_tlab_store_barriers() const = 0;
// If a compiler is eliding store barriers for TLAB-allocated objects,
// there is probably a corresponding slow path which can produce
// an object allocated anywhere. The compiler's runtime support
// promises to call this function on such a slow-path-allocated
// object before performing initializations that have elided
// store barriers. Returns new_obj, or maybe a safer copy thereof.
virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
// Answers whether an initializing store to a new object currently
// allocated at the given address doesn't need a store
// barrier. Returns "true" if it doesn't need an initializing
// store barrier; answers "false" if it does.
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
// If a compiler is eliding store barriers for TLAB-allocated objects,
// we will be informed of a slow-path allocation by a call
// to new_store_pre_barrier() above. Such a call precedes the
// initialization of the object itself, and no post-store-barriers will
// be issued. Some heap types require that the barrier strictly follows
// the initializing stores. (This is currently implemented by deferring the
// barrier until the next slow-path allocation or gc-related safepoint.)
// This interface answers whether a particular heap type needs the card
// mark to be thus strictly sequenced after the stores.
virtual bool card_mark_must_follow_store() const = 0;
// If the CollectedHeap was asked to defer a store barrier above,
// this informs it to flush such a deferred store barrier to the
// remembered set.
virtual void flush_deferred_store_barrier(JavaThread* thread);
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
// "CollectedHeap" supports.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,8 +71,6 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
}
jint GenCollectedHeap::initialize() {
CollectedHeap::pre_initialize();
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -270,22 +270,6 @@ public:
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
virtual HeapWord* allocate_new_tlab(size_t size);
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint.
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
// only and may need to be re-examined in case other
// kinds of collectors are implemented in the future.
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
// The "requestor" generation is performing some garbage collection
// action for which it would be useful to have scratch space. The
// requestor promises to allocate no more than "max_alloc_words" in any

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,7 @@ public:
// Causes all refs in "mr" to be assumed to be modified.
virtual void invalidate(MemRegion mr) = 0;
virtual void write_region(MemRegion mr) = 0;
// The caller guarantees that "mr" contains no references. (Perhaps it's
// objects have been moved elsewhere.)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,7 +73,7 @@ oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// Optimized covariant case
bs->write_ref_array_pre(dst, (int)length,
HasDecorator<decorators, ARRAYCOPY_DEST_NOT_INITIALIZED>::value);
HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value);
Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
bs->write_ref_array((HeapWord*)dst, length);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,6 @@ public:
// Initializes the buffer to be empty, but with the given "word_sz".
// Must get initialized with "set_buf" for an allocation to succeed.
PLAB(size_t word_sz);
virtual ~PLAB() {}
static size_t size_required_for_allocation(size_t word_size) { return word_size + AlignmentReserve; }
@ -120,7 +119,7 @@ public:
}
// Sets the space of the buffer to be [buf, space+word_sz()).
virtual void set_buf(HeapWord* buf, size_t new_word_sz) {
void set_buf(HeapWord* buf, size_t new_word_sz) {
assert(new_word_sz > AlignmentReserve, "Too small");
_word_sz = new_word_sz;
@ -136,11 +135,11 @@ public:
// Flush allocation statistics into the given PLABStats supporting ergonomic
// sizing of PLAB's and retire the current buffer. To be called at the end of
// GC.
virtual void flush_and_retire_stats(PLABStats* stats);
void flush_and_retire_stats(PLABStats* stats);
// Fills in the unallocated portion of the buffer with a garbage object and updates
// statistics. To be called during GC.
virtual void retire();
void retire();
};
// PLAB book-keeping.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -153,7 +153,7 @@ bool
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
assert(_n_threads > 0, "Initialization is incorrect");
assert(_offered_termination < _n_threads, "Invariant");
Atomic::inc((int *)&_offered_termination);
Atomic::inc(&_offered_termination);
uint yield_count = 0;
// Number of hard spin loops done since last yield
@ -228,7 +228,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
#endif
if (peek_in_queue_set() ||
(terminator != NULL && terminator->should_exit_termination())) {
Atomic::dec((int *)&_offered_termination);
Atomic::dec(&_offered_termination);
assert(_offered_termination < _n_threads, "Invariant");
return false;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -205,7 +205,7 @@ bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
#if !(defined SPARC || defined IA32 || defined AMD64)
OrderAccess::fence();
#endif
uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
uint localBot = OrderAccess::load_acquire(&_bottom);
uint n_elems = size(localBot, oldAge.top());
if (n_elems == 0) {
return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -157,7 +157,7 @@ public:
// Wait for the coordinator to dispatch a task.
_start_semaphore->wait();
uint num_started = (uint) Atomic::add(1, (volatile jint*)&_started);
uint num_started = Atomic::add(1u, &_started);
// Subtract one to get a zero-indexed worker id.
uint worker_id = num_started - 1;
@ -168,7 +168,7 @@ public:
void worker_done_with_task() {
// Mark that the worker is done with the task.
// The worker is not allowed to read the state variables after this line.
uint not_finished = (uint) Atomic::add(-1, (volatile jint*)&_not_finished);
uint not_finished = Atomic::sub(1u, &_not_finished);
// The last worker signals to the coordinator that all work is completed.
if (not_finished == 0) {
@ -439,7 +439,7 @@ bool SubTasksDone::is_task_claimed(uint t) {
#ifdef ASSERT
if (!res) {
assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
Atomic::inc((volatile jint*) &_claimed);
Atomic::inc(&_claimed);
}
#endif
return res;

View File

@ -116,10 +116,7 @@ JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance(JavaThread* thread, Klass* klas
oop obj = ik->allocate_instance(CHECK);
thread->set_vm_result(obj);
JRT_BLOCK_END;
if (ReduceInitialCardMarks) {
new_store_pre_barrier(thread);
}
SharedRuntime::on_slowpath_allocation_exit(thread);
JRT_END
JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_klass, jint length))
@ -151,29 +148,9 @@ JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_k
}
}
JRT_BLOCK_END;
if (ReduceInitialCardMarks) {
new_store_pre_barrier(thread);
}
SharedRuntime::on_slowpath_allocation_exit(thread);
JRT_END
void JVMCIRuntime::new_store_pre_barrier(JavaThread* thread) {
// After any safepoint, just before going back to compiled code,
// we inform the GC that we will be doing initializing writes to
// this object in the future without emitting card-marks, so
// GC may take any compensating steps.
// NOTE: Keep this code consistent with GraphKit::store_barrier.
oop new_obj = thread->vm_result();
if (new_obj == NULL) return;
assert(Universe::heap()->can_elide_tlab_store_barriers(),
"compiler must check this first");
// GC may decide to give back a safer copy of new_obj.
new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
thread->set_vm_result(new_obj);
}
JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
assert(klass->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,7 +154,6 @@ class JVMCIRuntime: public AllStatic {
static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
static void write_barrier_post(JavaThread* thread, void* card);
static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
static void new_store_pre_barrier(JavaThread* thread);
// used to throw exceptions from compiled JVMCI code
static void throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message);

View File

@ -38,9 +38,9 @@
inline void inc_stat_counter(volatile julong* dest, julong add_value) {
#if defined(SPARC) || defined(X86)
// Sparc and X86 have atomic jlong (8 bytes) instructions
julong value = Atomic::load((volatile jlong*)dest);
julong value = Atomic::load(dest);
value += add_value;
Atomic::store((jlong)value, (volatile jlong*)dest);
Atomic::store(value, dest);
#else
// possible word-tearing during load/store
*dest += add_value;

View File

@ -410,14 +410,11 @@ bool FileMapInfo::open_for_read() {
// Write the FileMapInfo information to the file.
void FileMapInfo::open_for_write() {
_full_path = Arguments::GetSharedArchivePath();
if (log_is_enabled(Info, cds)) {
ResourceMark rm;
LogMessage(cds) msg;
stringStream info_stream;
info_stream.print_cr("Dumping shared data to file: ");
info_stream.print_cr(" %s", _full_path);
msg.info("%s", info_stream.as_string());
_full_path = Arguments::GetSharedArchivePath();
LogMessage(cds) msg;
if (msg.is_info()) {
msg.info("Dumping shared data to file: ");
msg.info(" %s", _full_path);
}
#ifdef _WINDOWS // On Windows, need WRITE permission to remove the file.

View File

@ -883,13 +883,11 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all)
const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
ResourceMark rm;
LogMessage(cds) msg;
stringStream info_stream;
info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
info_stream.print_cr("%s", hdr);
info_stream.print_cr("%s", sep);
msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
msg.info("%s", hdr);
msg.info("%s", sep);
for (int type = 0; type < int(_number_of_types); type ++) {
const char *name = type_name((Type)type);
int ro_count = _counts[RO][type];
@ -903,7 +901,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all)
double rw_perc = percent_of(rw_bytes, rw_all);
double perc = percent_of(bytes, ro_all + rw_all);
info_stream.print_cr(fmt_stats, name,
msg.info(fmt_stats, name,
ro_count, ro_bytes, ro_perc,
rw_count, rw_bytes, rw_perc,
count, bytes, perc);
@ -921,8 +919,8 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all)
double all_rw_perc = percent_of(all_rw_bytes, rw_all);
double all_perc = percent_of(all_bytes, ro_all + rw_all);
info_stream.print_cr("%s", sep);
info_stream.print_cr(fmt_stats, "Total",
msg.info("%s", sep);
msg.info(fmt_stats, "Total",
all_ro_count, all_ro_bytes, all_ro_perc,
all_rw_count, all_rw_bytes, all_rw_perc,
all_count, all_bytes, all_perc);
@ -930,7 +928,6 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all)
assert(all_ro_bytes == ro_all, "everything should have been counted");
assert(all_rw_bytes == rw_all, "everything should have been counted");
msg.info("%s", info_stream.as_string());
#undef fmt_stats
}

View File

@ -155,6 +155,8 @@ const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
// - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
// - Accesses on HeapWord* translate to a runtime check choosing one of the above
// - Accesses on other types translate to raw memory accesses without runtime checks
// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
// marking that the previous value is uninitialized nonsense rather than a real value.
// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
// alive, regardless of the type of reference being accessed. It will however perform the memory access
// in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
@ -164,10 +166,12 @@ const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
// responsibility of performing the access and what barriers to be performed to the GC. This is the default.
// Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
// decorator for enabling primitive barriers is enabled for the build.
const DecoratorSet AS_RAW = UCONST64(1) << 11;
const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 12;
const DecoratorSet AS_NORMAL = UCONST64(1) << 13;
const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_NO_KEEPALIVE | AS_NORMAL;
const DecoratorSet AS_RAW = UCONST64(1) << 11;
const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 12;
const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 13;
const DecoratorSet AS_NORMAL = UCONST64(1) << 14;
const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_DEST_NOT_INITIALIZED |
AS_NO_KEEPALIVE | AS_NORMAL;
// === Reference Strength Decorators ===
// These decorators only apply to accesses on oop-like types (oop/narrowOop).
@ -178,10 +182,10 @@ const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_NO_KEEPALIVE | AS_NORMAL;
// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
// This could for example come from the unsafe API.
// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 14;
const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 15;
const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 16;
const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 17;
const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 15;
const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 16;
const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 17;
const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 18;
const DecoratorSet ON_DECORATOR_MASK = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
@ -196,23 +200,21 @@ const DecoratorSet ON_DECORATOR_MASK = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
// but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
// implies that it is also an IN_ROOT.
const DecoratorSet IN_HEAP = UCONST64(1) << 18;
const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 19;
const DecoratorSet IN_ROOT = UCONST64(1) << 20;
const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21;
const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 22;
const DecoratorSet IN_HEAP = UCONST64(1) << 19;
const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 20;
const DecoratorSet IN_ROOT = UCONST64(1) << 21;
const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 22;
const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 23;
const DecoratorSet IN_DECORATOR_MASK = IN_HEAP | IN_HEAP_ARRAY |
IN_ROOT | IN_CONCURRENT_ROOT |
IN_ARCHIVE_ROOT;
// == Value Decorators ==
// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 23;
const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 24;
const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
// == Arraycopy Decorators ==
// * ARRAYCOPY_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
// marking that the previous value uninitialized nonsense rather than a real value.
// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
// are not guaranteed to be subclasses of the class of the destination array. This requires
// a check-cast barrier during the copying operation. If this is not set, it is assumed
@ -222,14 +224,12 @@ const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
const DecoratorSet ARRAYCOPY_DEST_NOT_INITIALIZED = UCONST64(1) << 24;
const DecoratorSet ARRAYCOPY_CHECKCAST = UCONST64(1) << 25;
const DecoratorSet ARRAYCOPY_DISJOINT = UCONST64(1) << 26;
const DecoratorSet ARRAYCOPY_ARRAYOF = UCONST64(1) << 27;
const DecoratorSet ARRAYCOPY_ATOMIC = UCONST64(1) << 28;
const DecoratorSet ARRAYCOPY_ALIGNED = UCONST64(1) << 29;
const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_DEST_NOT_INITIALIZED |
ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
@ -343,8 +343,8 @@ class Access: public AllStatic {
template <DecoratorSet expected_mo_decorators>
static void verify_primitive_decorators() {
const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE) | IN_HEAP |
IN_HEAP_ARRAY;
const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE ^ AS_DEST_NOT_INITIALIZED) |
IN_HEAP | IN_HEAP_ARRAY;
verify_decorators<expected_mo_decorators | primitive_decorators>();
}

View File

@ -1060,6 +1060,7 @@ void Access<decorators>::verify_decorators() {
const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
(barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
(barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
(barrier_strength_decorators ^ AS_RAW) == 0 ||
(barrier_strength_decorators ^ AS_NORMAL) == 0
));

View File

@ -3861,7 +3861,7 @@ void GraphKit::write_barrier_post(Node* oop_store,
if (use_ReduceInitialCardMarks()
&& obj == just_allocated_object(control())) {
// We can skip marks on a freshly-allocated object in Eden.
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
// Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
// That routine informs GC to take appropriate compensating steps,
// upon a slow-path allocation, so as to make this card-mark
// elision safe.
@ -4159,7 +4159,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
* as part of the allocation in the case the allocated object is not located
* in the nursery, this would happen for humongous objects. This is similar to
* how CMS is required to handle this case, see the comments for the method
* CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
* CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
* A deferred card mark is required for these objects and handled in the above
* mentioned methods.
*
@ -4249,7 +4249,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
// We can skip marks on a freshly-allocated object in Eden.
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
// Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
// That routine informs GC to take appropriate compensating steps,
// upon a slow-path allocation, so as to make this card-mark
// elision safe.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -755,8 +755,10 @@ class GraphKit : public Phase {
Node* just_allocated_object(Node* current_control);
static bool use_ReduceInitialCardMarks() {
return (ReduceInitialCardMarks
&& Universe::heap()->can_elide_tlab_store_barriers());
BarrierSet *bs = Universe::heap()->barrier_set();
return bs->is_a(BarrierSet::CardTableModRef)
&& barrier_set_cast<CardTableModRefBS>(bs)->can_elide_tlab_store_barriers()
&& ReduceInitialCardMarks;
}
// Sync Ideal and Graph kits.

View File

@ -2347,7 +2347,7 @@ void IdealLoopTree::dump_head( ) const {
tty->print(" ");
tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
if (_irreducible) tty->print(" IRREDUCIBLE");
Node* entry = _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl);
Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl);
Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
if (predicate != NULL ) {
tty->print(" limit_check");
@ -2398,7 +2398,7 @@ void IdealLoopTree::dump_head( ) const {
if (Verbose) {
tty->print(" body={"); _body.dump_simple(); tty->print(" }");
}
if (_head->as_Loop()->is_strip_mined()) {
if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) {
tty->print(" strip_mined");
}
tty->cr();

View File

@ -693,7 +693,9 @@ static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) {
Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
// Store has to be first in the loop body
IdealLoopTree *n_loop = get_loop(n_ctrl);
if (n->is_Store() && n_loop != _ltree_root && n_loop->is_loop() && n->in(0) != NULL) {
if (n->is_Store() && n_loop != _ltree_root &&
n_loop->is_loop() && n_loop->_head->is_Loop() &&
n->in(0) != NULL) {
Node* address = n->in(MemNode::Address);
Node* value = n->in(MemNode::ValueIn);
Node* mem = n->in(MemNode::Memory);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,23 +194,6 @@ const char* OptoRuntime::stub_name(address entry) {
// We failed the fast-path allocation. Now we need to do a scavenge or GC
// and try allocation again.
void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
// After any safepoint, just before going back to compiled code,
// we inform the GC that we will be doing initializing writes to
// this object in the future without emitting card-marks, so
// GC may take any compensating steps.
// NOTE: Keep this code consistent with GraphKit::store_barrier.
oop new_obj = thread->vm_result();
if (new_obj == NULL) return;
assert(Universe::heap()->can_elide_tlab_store_barriers(),
"compiler must check this first");
// GC may decide to give back a safer copy of new_obj.
new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
thread->set_vm_result(new_obj);
}
// object allocation
JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread))
JRT_BLOCK;
@ -244,10 +227,8 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thre
deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
JRT_BLOCK_END;
if (GraphKit::use_ReduceInitialCardMarks()) {
// inform GC that we won't do card marks for initializing writes.
new_store_pre_barrier(thread);
}
// inform GC that we won't do card marks for initializing writes.
SharedRuntime::on_slowpath_allocation_exit(thread);
JRT_END
@ -284,10 +265,8 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaT
thread->set_vm_result(result);
JRT_BLOCK_END;
if (GraphKit::use_ReduceInitialCardMarks()) {
// inform GC that we won't do card marks for initializing writes.
new_store_pre_barrier(thread);
}
// inform GC that we won't do card marks for initializing writes.
SharedRuntime::on_slowpath_allocation_exit(thread);
JRT_END
// array allocation without zeroing
@ -314,10 +293,9 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len
thread->set_vm_result(result);
JRT_BLOCK_END;
if (GraphKit::use_ReduceInitialCardMarks()) {
// inform GC that we won't do card marks for initializing writes.
new_store_pre_barrier(thread);
}
// inform GC that we won't do card marks for initializing writes.
SharedRuntime::on_slowpath_allocation_exit(thread);
oop result = thread->vm_result();
if ((len > 0) && (result != NULL) &&

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -163,10 +163,6 @@ class OptoRuntime : public AllStatic {
static void new_array_C(Klass* array_klass, int len, JavaThread *thread);
static void new_array_nozero_C(Klass* array_klass, int len, JavaThread *thread);
// Post-slow-path-allocation, pre-initializing-stores step for
// implementing ReduceInitialCardMarks
static void new_store_pre_barrier(JavaThread* thread);
// Allocate storage for a multi-dimensional arrays
// Note: needs to be fixed for arbitrary number of dimensions
static void multianewarray2_C(Klass* klass, int len1, int len2, JavaThread *thread);

View File

@ -528,7 +528,7 @@ class OldRegionsLivenessClosure: public HeapRegionClosure {
size_t total_memory() { return _total_memory; }
size_t total_memory_to_free() { return _total_memory_to_free; }
bool doHeapRegion(HeapRegion* r) {
bool do_heap_region(HeapRegion* r) {
if (r->is_old()) {
size_t prev_live = r->marked_bytes();
size_t live = r->live_bytes();

View File

@ -537,6 +537,7 @@ static SpecialFlag const special_jvm_flags[] = {
{ "SharedReadOnlySize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
{ "UseUTCFileTimestamp", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
{ "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() },

View File

@ -85,7 +85,7 @@ julong os::num_frees = 0; // # of calls to free
julong os::free_bytes = 0; // # of bytes freed
#endif
static juint cur_malloc_words = 0; // current size for MallocMaxTestWords
static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords
void os_init_globals() {
// Called from init_globals().
@ -629,12 +629,12 @@ static void verify_memory(void* ptr) {
//
static bool has_reached_max_malloc_test_peak(size_t alloc_size) {
if (MallocMaxTestWords > 0) {
jint words = (jint)(alloc_size / BytesPerWord);
size_t words = (alloc_size / BytesPerWord);
if ((cur_malloc_words + words) > MallocMaxTestWords) {
return true;
}
Atomic::add(words, (volatile jint *)&cur_malloc_words);
Atomic::add(words, &cur_malloc_words);
}
return false;
}
@ -1826,8 +1826,7 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
os::SuspendResume::State to)
{
os::SuspendResume::State result =
(os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from);
if (result == from) {
// success
return to;

View File

@ -27,6 +27,8 @@
#include "jvm.h"
#include "jvmtifiles/jvmti.h"
#include "metaprogramming/isRegisteredEnum.hpp"
#include "metaprogramming/integralConstant.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/handles.hpp"
#include "utilities/macros.hpp"
@ -1006,6 +1008,10 @@ class os: AllStatic {
};
#ifndef _WINDOWS
template<> struct IsRegisteredEnum<os::SuspendResume::State> : public TrueType {};
#endif // !_WINDOWS
// Note that "PAUSE" is almost always used with synchronization
// so arguably we should provide Atomic::SpinPause() instead
// of the global SpinPause() with C linkage.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3169,3 +3169,16 @@ frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread
}
return activation;
}
void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) {
// After any safepoint, just before going back to compiled code,
// we inform the GC that we will be doing initializing writes to
// this object in the future without emitting card-marks, so
// GC may take any compensating steps.
oop new_obj = thread->vm_result();
if (new_obj == NULL) return;
BarrierSet *bs = Universe::heap()->barrier_set();
bs->on_slowpath_allocation_exit(thread, new_obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -211,6 +211,10 @@ class SharedRuntime: AllStatic {
static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason);
#endif
// Post-slow-path-allocation, pre-initializing-stores step for
// implementing e.g. ReduceInitialCardMarks
static void on_slowpath_allocation_exit(JavaThread* thread);
static void enable_stack_reserved_zone(JavaThread* thread);
static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -418,7 +418,7 @@ JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
HeapAccess<ARRAYCOPY_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count);
HeapAccess<AS_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
@ -462,7 +462,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* de
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
HeapAccess<ARRAYCOPY_ARRAYOF | ARRAYCOPY_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, src, dest, count);
HeapAccess<ARRAYCOPY_ARRAYOF | AS_DEST_NOT_INITIALIZED>::oop_arraycopy(NULL, NULL, src, dest, count);
JRT_END
address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {

View File

@ -1994,20 +1994,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
JvmtiExport::cleanup_thread(this);
}
// We must flush any deferred card marks before removing a thread from
// the list of active threads.
Universe::heap()->flush_deferred_store_barrier(this);
assert(deferred_card_mark().is_empty(), "Should have been flushed");
#if INCLUDE_ALL_GCS
// We must flush the G1-related buffers before removing a thread
// from the list of active threads. We must do this after any deferred
// card marks have been flushed (above) so that any entries that are
// added to the thread's dirty card queue as a result are not lost.
if (UseG1GC) {
flush_barrier_queues();
}
#endif // INCLUDE_ALL_GCS
// We must flush any deferred card marks and other various GC barrier
// related buffers (e.g. G1 SATB buffer and G1 dirty card queue buffer)
// before removing a thread from the list of active threads.
BarrierSet::barrier_set()->flush_deferred_barriers(this);
log_info(os, thread)("JavaThread %s (tid: " UINTX_FORMAT ").",
exit_type == JavaThread::normal_exit ? "exiting" : "detaching",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,24 +30,18 @@
#include "runtime/thread.hpp"
inline void Thread::set_suspend_flag(SuspendFlags f) {
assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
uint32_t flags;
do {
flags = _suspend_flags;
}
while (Atomic::cmpxchg((jint)(flags | f),
(volatile jint*)&_suspend_flags,
(jint)flags) != (jint)flags);
while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags);
}
inline void Thread::clear_suspend_flag(SuspendFlags f) {
assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
uint32_t flags;
do {
flags = _suspend_flags;
}
while (Atomic::cmpxchg((jint)(flags & ~f),
(volatile jint*)&_suspend_flags,
(jint)flags) != (jint)flags);
while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags);
}
inline void Thread::set_has_async_exception() {

View File

@ -466,6 +466,7 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
\
nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \
nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \
@ -482,7 +483,6 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
\
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
\

Some files were not shown because too many files have changed in this diff Show More