8195142: Refactor out card table from CardTableModRefBS to flatten the BarrierSet hierarchy

Reviewed-by: stefank, coleenp, kvn, ehelin
This commit is contained in:
Erik Österlund 2018-02-26 09:34:12 +01:00
parent b9bc9f0be8
commit 95d34d138d
104 changed files with 2012 additions and 1797 deletions

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -995,6 +995,7 @@ definitions %{
source_hpp %{
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "opto/addnode.hpp"
@ -4438,8 +4439,8 @@ encode %{
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
%}
%}
// The only difference between aarch64_enc_cmpxchg and
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
@ -5845,7 +5846,7 @@ operand immByteMapBase()
%{
// Get base of card map
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
match(ConP);
op_cost(0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -30,6 +30,8 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
@ -42,6 +44,7 @@
#include "runtime/vframeArray.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -1162,10 +1165,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// arg0: store_address
Address store_addr(rfp, 2*BytesPerWord);
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@ -1186,13 +1185,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
assert_different_registers(card_offset, byte_map_base, rscratch1);
f.load_argument(0, card_offset);
__ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);
__ lsr(card_offset, card_offset, CardTable::card_shift);
__ load_byte_map_base(byte_map_base);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
__ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
__ br(Assembler::EQ, done);
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
__ membar(Assembler::StoreLoad);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,8 +29,9 @@
#include "jvm.h"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
@ -46,6 +47,7 @@
#include "runtime/thread.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -3615,16 +3617,16 @@ void MacroAssembler::store_check(Register obj) {
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension,
assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
lsr(obj, obj, CardTableModRefBS::card_shift);
lsr(obj, obj, CardTable::card_shift);
assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
assert(CardTable::dirty_card_val() == 0, "must be");
load_byte_map_base(rscratch1);
@ -4126,8 +4128,9 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@ -4144,20 +4147,20 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
// storing region crossing non-NULL, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
ExternalAddress cardtable((address) ct->byte_map_base());
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register card_addr = tmp;
lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
lsr(card_addr, store_addr, CardTable::card_shift);
// get the address of the card
load_byte_map_base(tmp2);
add(card_addr, card_addr, tmp2);
ldrb(tmp2, Address(card_addr));
cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
cmpw(tmp2, (int)G1CardTable::g1_young_card_val());
br(Assembler::EQ, done);
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
membar(Assembler::StoreLoad);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,6 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/instanceOop.hpp"
@ -652,9 +654,7 @@ class StubGenerator: public StubCodeGenerator {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ pop(saved_regs, sp);
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@ -695,16 +695,16 @@ class StubGenerator: public StubCodeGenerator {
__ pop(saved_regs, sp);
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
__ lsr(start, start, CardTableModRefBS::card_shift);
__ lsr(end, end, CardTableModRefBS::card_shift);
__ lsr(start, start, CardTable::card_shift);
__ lsr(end, end, CardTable::card_shift);
__ sub(end, end, start); // number of bytes to copy
const Register count = end; // 'end' register contains bytes count now

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -184,8 +184,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
if (val == noreg) {
__ store_heap_oop_null(obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,8 @@
#include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
@ -475,22 +477,21 @@ void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp)
}
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
assert(CardTableModRefBS::dirty_card_val() == 0,
assert(CardTable::dirty_card_val() == 0,
"Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
#ifdef AARCH64
// AARCH64 has a register that is constant zero. We can use that one to set the
// value in the card table to dirty.
__ move(FrameMap::ZR_opr, card_addr);
#else // AARCH64
CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
if(((intx)ct->byte_map_base & 0xff) == 0) {
if((ci_card_table_address_as<intx>() & 0xff) == 0) {
// If the card table base address is aligned to 256 bytes, we can use the register
// that contains the card_table_base_address.
__ move(value, card_addr);
} else {
// Otherwise we need to create a register containing that value.
LIR_Opr tmp_zero = new_register(T_INT);
__ move(LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()), tmp_zero);
__ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
__ move(tmp_zero, card_addr);
}
#endif // AARCH64
@ -510,14 +511,14 @@ void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Co
}
#ifdef AARCH64
LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
LIR_Opr tmp2 = tmp;
__ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTableModRefBS::card_shift)
__ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
#else
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
// byte instruction does not support the addressing mode we need.
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BOOLEAN);
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
#endif
if (UseCondCardMark) {
if (UseConcMarkSweepGC) {
@ -527,7 +528,7 @@ void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Co
__ move(card_addr, cur_value);
LabelObj* L_already_dirty = new LabelObj();
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()));
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
__ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
set_card(tmp, card_addr);
__ branch_destination(L_already_dirty->label());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,9 @@
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/compiledICHolder.hpp"
@ -40,6 +43,7 @@
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -608,8 +612,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
Label done;
Label recheck;
Label runtime;
@ -619,8 +621,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_buf()));
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
@ -649,12 +650,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// explicitly specify that 'cardtable' has a relocInfo::none
// type.
__ lea(r_card_base_1, cardtable);
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift));
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
// first quick check without barrier
__ ldrb(r_tmp2, Address(r_card_addr_0));
__ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
__ b(recheck, ne);
__ bind(done);
@ -675,14 +676,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// reload card state after the barrier that ensures the stored oop was visible
__ ldrb(r_tmp2, Address(r_card_addr_0));
assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
assert(CardTable::dirty_card_val() == 0, "adjust this code");
__ cbz(r_tmp2, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code");
if (((intptr_t)ct->byte_map_base & 0xff) == 0) {
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
// Card table is aligned so the lowest byte of the table address base is zero.
__ strb(r_card_base_1, Address(r_card_addr_0));
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interp_masm_arm.hpp"
@ -410,12 +411,12 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
// Check barrier set type (should be card table) and element size
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension,
assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
// Load card table base address.
@ -433,19 +434,19 @@ void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
rarely accessed area of thread descriptor).
*/
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
}
// The 2nd part of the store check.
void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
assert_different_registers(obj, card_table_base, tmp);
assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
#ifdef AARCH64
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift));
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
Address card_table_addr(card_table_base);
#else
Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift);
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
#endif
if (UseCondCardMark) {
@ -472,8 +473,9 @@ void InterpreterMacroAssembler::set_card(Register card_table_base, Address card_
#ifdef AARCH64
strb(ZR, card_table_addr);
#else
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) {
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "ci/ciEnv.hpp"
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -43,6 +44,7 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -2265,7 +2267,8 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
Label done;
Label runtime;
@ -2286,18 +2289,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
// storing region crossing non-NULL, is card already dirty?
const Register card_addr = tmp1;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift));
mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
ldrb(tmp2, Address(card_addr));
cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
cmp(tmp2, (int)G1CardTable::g1_young_card_val());
b(done, eq);
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
assert(CardTable::dirty_card_val() == 0, "adjust this code");
ldrb(tmp2, Address(card_addr));
cbz(tmp2, done);
@ -3023,7 +3026,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
}
#endif // COMPILER2
// Must preserve condition codes, or C2 encodeKlass_not_null rule
// must be changed.
void MacroAssembler::encode_klass_not_null(Register r) {
@ -3261,4 +3263,3 @@ void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch
}
#endif // COMPILER2

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_arm.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/instanceOop.hpp"
@ -2907,8 +2909,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(saved_regs | R9ifScratched);
#endif // AARCH64
}
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@ -2961,12 +2962,12 @@ class StubGenerator: public StubCodeGenerator {
#endif // !AARCH64
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
BLOCK_COMMENT("CardTablePostBarrier");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done;
@ -2975,12 +2976,12 @@ class StubGenerator: public StubCodeGenerator {
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
__ sub(count, count, BytesPerHeapOop); // last addr
__ logical_shift_right(addr, addr, CardTableModRefBS::card_shift);
__ logical_shift_right(count, count, CardTableModRefBS::card_shift);
__ logical_shift_right(addr, addr, CardTable::card_shift);
__ logical_shift_right(count, count, CardTable::card_shift);
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base, symbolic_Relocation::card_table_reference);
__ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@ -2992,8 +2993,6 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_done);
}
break;
case BarrierSet::ModRef:
break;
default:
ShouldNotReachHere();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -228,8 +228,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
if (is_null) {
__ store_heap_oop_null(new_val, obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/compiledICHolder.hpp"
@ -40,6 +43,7 @@
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -795,7 +799,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register tmp = R0;
Register addr = R14;
Register tmp2 = R15;
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
jbyte* byte_map_base = ci_card_table_address();
Label restart, refill, ret;
@ -803,26 +807,26 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ std(addr, -8, R1_SP);
__ std(tmp2, -16, R1_SP);
__ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0.
__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
__ add(addr, tmp2, addr);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
// Return if young card.
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val());
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
__ beq(CCR0, ret);
// Return if sequential consistent value is already dirty.
__ membar(Assembler::StoreLoad);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val());
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
__ beq(CCR0, ret);
// Not dirty.
// First, dirty it.
__ li(tmp, G1SATBCardTableModRefBS::dirty_card_val());
__ li(tmp, G1CardTable::dirty_card_val());
__ stb(tmp, 0, addr);
int dirty_card_q_index_byte_offset =

View File

@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -43,6 +44,7 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -3036,20 +3038,20 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
CardTable* ct = bs->card_table();
#ifdef ASSERT
cmpdi(CCR0, Rnew_val, 0);
asm_assert_ne("null oop not allowed", 0x321);
#endif
card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
card_table_write(ct->byte_map_base(), Rtmp, Rstore_addr);
}
// Write the card table byte.
void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
assert_different_registers(Robj, Rtmp, R0);
load_const_optimized(Rtmp, (address)byte_map_base, R0);
srdi(Robj, Robj, CardTableModRefBS::card_shift);
srdi(Robj, Robj, CardTable::card_shift);
li(R0, 0); // dirty
if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
stbx(R0, Rtmp, Robj);
@ -3171,6 +3173,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
// Does store cross heap regions?
if (G1RSBarrierRegionFilter) {
@ -3187,26 +3190,26 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
#endif
// Storing region crossing non-NULL, is card already dirty?
assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register Rcard_addr = Rtmp1;
Register Rbase = Rtmp2;
load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
load_const_optimized(Rbase, (address)ct->byte_map_base(), /*temp*/ Rtmp3);
srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
srdi(Rcard_addr, Rstore_addr, CardTable::card_shift);
// Get the address of the card.
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
cmpwi(CCR0, Rtmp3, (int)G1CardTable::g1_young_card_val());
beq(CCR0, filtered);
membar(Assembler::StoreLoad);
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
cmpwi(CCR0, Rtmp3 /* card value */, CardTable::dirty_card_val());
beq(CCR0, filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Dirty card and log.
li(Rtmp3, CardTableModRefBS::dirty_card_val());
li(Rtmp3, CardTable::dirty_card_val());
//release(); // G1: oops are allowed to get visible after dirty marking.
stbx(Rtmp3, Rbase, Rcard_addr);

View File

@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/instanceOop.hpp"
@ -667,9 +669,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered);
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@ -703,8 +703,7 @@ class StubGenerator: public StubCodeGenerator {
__ restore_LR_CR(R0);
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
Label Lskip_loop, Lstore_loop;
if (UseConcMarkSweepGC) {
@ -712,19 +711,20 @@ class StubGenerator: public StubCodeGenerator {
__ release();
}
CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* const ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
__ sldi(count, count, LogBytesPerHeapOop);
__ addi(count, count, -BytesPerHeapOop);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srdi(addr, addr, CardTableModRefBS::card_shift);
__ srdi(count, count, CardTableModRefBS::card_shift);
__ srdi(addr, addr, CardTable::card_shift);
__ srdi(count, count, CardTable::card_shift);
__ subf(count, addr, count);
assert_different_registers(R0, addr, count, tmp);
__ load_const(tmp, (address)ct->byte_map_base);
__ load_const(tmp, (address)ct->byte_map_base());
__ addic_(count, count, 1);
__ beq(CCR0, Lskip_loop);
__ li(R0, 0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -103,8 +103,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
Label Lnull, Ldone;
if (Rval != noreg) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
@ -40,6 +43,7 @@
#include "vmreg_s390.inline.hpp"
#include "registerSaver_s390.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -845,7 +849,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register r1 = Z_R6; // Must be saved/restored.
Register r2 = Z_R7; // Must be saved/restored.
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
jbyte* byte_map_base = ci_card_table_address();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
@ -854,17 +858,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// Calculate address of card corresponding to the updated oop slot.
AddressLiteral rs(byte_map_base);
__ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift);
__ z_srlg(addr_card, addr_oop, CardTable::card_shift);
addr_oop = noreg; // dead now
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
__ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
__ z_bre(young_card);
__ z_sync(); // Required to support concurrent cleaning.
__ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val());
__ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
__ z_brne(not_already_dirty);
__ bind(young_card);
@ -877,7 +881,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(not_already_dirty);
// First, dirty it: [addr_card] := 0
__ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val());
__ z_mvi(0, addr_card, CardTable::dirty_card_val());
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
Register buf = r2;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -27,6 +27,7 @@
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
@ -50,6 +51,7 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -3502,12 +3504,13 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
// Write to card table for modification at store_addr - register is destroyed afterwards.
void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
assert_different_registers(store_addr, tmp);
z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
load_absolute_address(tmp, (address)bs->byte_map_base);
z_srlg(store_addr, store_addr, CardTable::card_shift);
load_absolute_address(tmp, (address)ct->byte_map_base());
z_agr(store_addr, tmp);
z_mvi(0, store_addr, 0); // Store byte 0.
}
@ -3707,6 +3710,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
G1CardTable* ct = bs->card_table();
assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
BLOCK_COMMENT("g1_write_barrier_post {");
@ -3733,15 +3737,15 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
Rnew_val = noreg; // end of lifetime
// Storing region crossing non-NULL, is card already dirty?
assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
// Make sure not to use Z_R0 for any of these registers.
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
// calculate address of card
load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base.
z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
load_const_optimized(Rbase, (address)ct->byte_map_base()); // Card table base.
z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table.
z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
Rbase = noreg; // end of lifetime
@ -3753,13 +3757,13 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
// Check the card value. If dirty, we're done.
// This also avoids false sharing of the (already dirty) card.
z_sync(); // Required to support concurrent cleaning.
assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code");
z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar.
z_bre(filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Dirty card and log.
z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
z_mvi(0, Rcard_addr, CardTable::dirty_card_val());
Register Rcard_addr_x = Rcard_addr;
Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,6 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "registerSaver_s390.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "nativeInst_s390.hpp"
@ -722,8 +724,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered);
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
case BarrierSet::ModRef:
break;
default:
@ -761,14 +762,14 @@ class StubGenerator: public StubCodeGenerator {
}
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
// These cases formerly known as
// void array_store_check(Register addr, Register count, bool branchToEnd).
{
NearLabel doXC, done;
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Z_R0, Z_R1, addr, count);
// Nothing to do if count <= 0.
@ -787,11 +788,11 @@ class StubGenerator: public StubCodeGenerator {
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base);
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTableModRefBS::card_shift);
__ z_srlg(count, count, CardTableModRefBS::card_shift);
__ z_srlg(addr, addr, CardTable::card_shift);
__ z_srlg(count, count, CardTable::card_shift);
// Prefetch first elements of card table for update.
if (VM_Version::has_Prefetch()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -260,8 +260,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
if (val_is_null) {
__ store_heap_oop_null(val, offset, base);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/compiledICHolder.hpp"
@ -38,6 +41,7 @@
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -843,22 +847,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
jbyte* byte_map_base = ci_card_table_address();
Label not_already_dirty, restart, refill, young_card;
__ srlx(addr, CardTableModRefBS::card_shift, addr);
__ srlx(addr, CardTable::card_shift, addr);
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "jvm.h"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -44,6 +45,7 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -658,7 +660,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) {
srlx(obj, CardTableModRefBS::card_shift, obj);
srlx(obj, CardTable::card_shift, obj);
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
@ -3574,17 +3576,17 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
Label not_already_dirty, restart, refill, young_card;
__ srlx(O0, CardTableModRefBS::card_shift, O0);
__ srlx(O0, CardTable::card_shift, O0);
AddressLiteral addrlit(byte_map_base);
__ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
@ -3664,6 +3666,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp);
@ -3704,7 +3707,8 @@ void g1_barrier_stubs_init() {
if (dirty_card_log_enqueue == 0) {
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
generate_dirty_card_log_enqueue(bs->byte_map_base);
CardTable *ct = bs->card_table();
generate_dirty_card_log_enqueue(ct->byte_map_base());
assert(dirty_card_log_enqueue != 0, "postcondition.");
}
if (satb_log_enqueue_with_frame == 0) {
@ -3726,9 +3730,10 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
if (new_val == G0) return;
CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
card_table_write(bs->byte_map_base, tmp, store_addr);
CardTable* ct = bs->card_table();
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
card_table_write(ct->byte_map_base(), tmp, store_addr);
}
// ((OopHandle)result).resolve();

View File

@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"
@ -875,9 +877,7 @@ class StubGenerator: public StubCodeGenerator {
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@ -908,11 +908,11 @@ class StubGenerator: public StubCodeGenerator {
__ restore();
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
Label L_loop, L_done;
@ -923,10 +923,10 @@ class StubGenerator: public StubCodeGenerator {
__ sub(count, BytesPerHeapOop, count);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
__ srl_ptr(count, CardTableModRefBS::card_shift, count);
__ srl_ptr(addr, CardTable::card_shift, addr);
__ srl_ptr(count, CardTable::card_shift, count);
__ sub(count, addr, count);
AddressLiteral rs(ct->byte_map_base);
AddressLiteral rs(ct->byte_map_base());
__ set(rs, tmp);
__ BIND(L_loop);
__ stb(G0, tmp, addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,8 +90,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/compiledICHolder.hpp"
@ -39,6 +42,7 @@
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@ -1632,10 +1636,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
CardTableModRefBS* ct =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
Label done;
Label enqueued;
Label runtime;
@ -1657,25 +1657,25 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
const Register card_addr = rcx;
f.load_argument(0, card_addr);
__ shrptr(card_addr, CardTableModRefBS::card_shift);
__ shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
__ movptr(cardtable, (intptr_t)ct->byte_map_base);
__ movptr(cardtable, ci_card_table_address_as<intptr_t>());
__ addptr(card_addr, cardtable);
NOT_LP64(__ get_thread(thread);)
__ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
const Register tmp = rdx;
__ push(rdx);

View File

@ -27,6 +27,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -45,6 +46,7 @@
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@ -5407,9 +5409,10 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_buf()));
CardTableModRefBS* ct =
CardTableModRefBS* ctbs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@ -5432,24 +5435,24 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
const Register cardtable = tmp2;
movptr(card_addr, store_addr);
shrptr(card_addr, CardTableModRefBS::card_shift);
shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
movptr(cardtable, (intptr_t)ct->byte_map_base);
movptr(cardtable, (intptr_t)ct->byte_map_base());
addptr(card_addr, cardtable);
cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
jcc(Assembler::equal, done);
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean.
// dirty card and log.
movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
cmpl(queue_index, 0);
jcc(Assembler::equal, runtime);
@ -5494,14 +5497,14 @@ void MacroAssembler::store_check(Register obj) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableForRS ||
bs->kind() == BarrierSet::CardTableExtension,
assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
shrptr(obj, CardTableModRefBS::card_shift);
shrptr(obj, CardTable::card_shift);
Address card_addr;
@ -5510,7 +5513,7 @@ void MacroAssembler::store_check(Register obj) {
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t disp = (intptr_t) ct->byte_map_base;
intptr_t disp = (intptr_t) ct->byte_map_base();
if (is_simm32(disp)) {
card_addr = Address(noreg, obj, Address::times_1, disp);
} else {
@ -5518,12 +5521,12 @@ void MacroAssembler::store_check(Register obj) {
// displacement and done in a single instruction given favorable mapping and a
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
// entry and that entry is not properly handled by the relocation code.
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none);
Address index(noreg, obj, Address::times_1);
card_addr = as_Address(ArrayAddress(cardtable, index));
}
int dirty = CardTableModRefBS::dirty_card_val();
int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
if (UseConcMarkSweepGC) {

View File

@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@ -704,10 +706,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered);
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default :
ShouldNotReachHere();
@ -739,22 +738,22 @@ class StubGenerator: public StubCodeGenerator {
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
const Register end = count; // elements count; end == start+count-1
assert_different_registers(start, end);
__ lea(end, Address(start, count, Address::times_ptr, -wordSize));
__ shrptr(start, CardTableModRefBS::card_shift);
__ shrptr(end, CardTableModRefBS::card_shift);
__ shrptr(start, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> count
__ BIND(L_loop);
intptr_t disp = (intptr_t) ct->byte_map_base;
intptr_t disp = (intptr_t) ct->byte_map_base();
Address cardtable(start, count, Address::times_1, disp);
__ movb(cardtable, 0);
__ decrement(count);

View File

@ -25,6 +25,9 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@ -1232,9 +1235,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(filtered);
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@ -1272,12 +1273,8 @@ class StubGenerator: public StubCodeGenerator {
__ popa();
}
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
Label L_loop, L_done;
const Register end = count;
@ -1286,11 +1283,11 @@ class StubGenerator: public StubCodeGenerator {
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
__ shrptr(start, CardTableModRefBS::card_shift);
__ shrptr(end, CardTableModRefBS::card_shift);
__ shrptr(start, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> cards count
int64_t disp = (int64_t) ct->byte_map_base;
int64_t disp = ci_card_table_address_as<int64_t>();
__ mov64(scratch, disp);
__ addptr(start, scratch);
__ BIND(L_loop);

View File

@ -198,8 +198,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
{
if (val == noreg) {
__ store_heap_oop_null(obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/metaspaceShared.hpp"
@ -42,7 +43,7 @@ void JavaThread::cache_global_variables() {
}
if (bs->is_a(BarrierSet::CardTableModRef)) {
_card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base);
_card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->card_table()->byte_map_base());
} else {
_card_table_base = NULL;
}

View File

@ -25,7 +25,10 @@
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaAssertions.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcLocker.hpp"
#include "interpreter/abstractInterpreter.hpp"
@ -539,8 +542,7 @@ void AOTCodeHeap::link_global_lib_symbols() {
_lib_symbols_initialized = true;
CollectedHeap* heap = Universe::heap();
CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set());
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base);
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ci_card_table_address());
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,8 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
@ -1461,11 +1463,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
// No pre barriers
break;
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
// No pre barriers
break;
default :
@ -1481,13 +1479,9 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
G1SATBCardTableModRef_post_barrier(addr, new_val);
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
CardTableModRef_post_barrier(addr, new_val);
break;
case BarrierSet::ModRef:
// No post barriers
break;
default :
ShouldNotReachHere();
}
@ -1616,9 +1610,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
////////////////////////////////////////////////////////////////////////
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
// ptr cannot be an object because we use this barrier for array card marks
@ -1640,9 +1632,9 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
__ move(addr, tmp);
__ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
__ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
} else {
__ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
__ unsigned_shift_right(addr, CardTable::card_shift, tmp);
}
LIR_Address* card_addr;
@ -1652,7 +1644,7 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
}
LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = new_register(T_INT);
if (UseConcMarkSweepGC) {

View File

@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/universe.hpp"
// ciUtilities
//
@ -43,3 +46,13 @@ const char basictype_to_char(BasicType t) {
char c = type2char(t);
return c ? c : 'X';
}
// ------------------------------------------------------------------
// card_table_base
jbyte *ci_card_table_address() {
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
return ct->byte_map_base();
}

View File

@ -27,6 +27,7 @@
#include "ci/ciEnv.hpp"
#include "runtime/interfaceSupport.hpp"
#include "utilities/globalDefinitions.hpp"
// The following routines and definitions are used internally in the
// compiler interface.
@ -114,4 +115,9 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
const char basictype_to_char(BasicType t);
jbyte *ci_card_table_address();
template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address());
}
#endif // SHARE_VM_CI_CIUTILITIES_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "code/codeCache.hpp"
#include "code/relocInfo.hpp"
#include "code/relocInfo_ext.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
@ -59,8 +60,9 @@ address symbolic_Relocation::symbolic_value(symbolic_Relocation::symbolic_refere
}
case symbolic_Relocation::card_table_reference: {
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
return (address)ct->byte_map_base;
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
CardTable* ct = ctbs->card_table();
return (address)ct->byte_map_base();
}
case symbolic_Relocation::mark_bits_reference: {
return (address)Universe::verify_mark_bits();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,11 @@
*/
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.hpp"
#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/resourceArea.hpp"
@ -318,7 +320,7 @@ void decode_env::print_address(address adr) {
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->is_a(BarrierSet::CardTableModRef) &&
adr == (address)(barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)) {
adr == ci_card_table_address_as<address>()) {
st->print("word_map_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,9 +88,9 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, M
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
"CompactibleFreeListSpace._dict_par_lock", true,
Monitor::_safepoint_check_never),
_rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
_rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
CMSRescanMultiple),
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
_marking_task_size(CardTable::card_size_in_words * BitsPerWord *
CMSConcMarkMultiple),
_collector(NULL),
_preconsumptionDirtyCardClosure(NULL)
@ -609,7 +609,7 @@ public:
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector,
ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) :
FilteringDCTOC(sp, cl, precision, boundary),
@ -693,7 +693,7 @@ FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
@ -2828,7 +2828,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
}
const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
return ergo_max;
}
@ -2865,15 +2865,15 @@ initialize_sequential_subtasks_for_marking(int n_threads,
// The "size" of each task is fixed according to rescan_task_size.
assert(n_threads > 0, "Unexpected n_threads argument");
const size_t task_size = marking_task_size();
assert(task_size > CardTableModRefBS::card_size_in_words &&
(task_size % CardTableModRefBS::card_size_in_words == 0),
assert(task_size > CardTable::card_size_in_words &&
(task_size % CardTable::card_size_in_words == 0),
"Otherwise arithmetic below would be incorrect");
MemRegion span = _old_gen->reserved();
if (low != NULL) {
if (span.contains(low)) {
// Align low down to a card boundary so that
// we can use block_offset_careful() on span boundaries.
HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
HeapWord* aligned_low = align_down(low, CardTable::card_size);
// Clip span prefix at aligned_low
span = span.intersection(MemRegion(aligned_low, span.end()));
} else if (low > span.end()) {
@ -2881,7 +2881,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
} // else use entire span
}
assert(span.is_empty() ||
((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
((uintptr_t)span.start() % CardTable::card_size == 0),
"span should start at a card boundary");
size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
assert((n_tasks == 0) == span.is_empty(), "Inconsistency");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/cms/adaptiveFreeList.hpp"
#include "gc/cms/promotionInfo.hpp"
#include "gc/shared/blockOffsetTable.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/space.hpp"
#include "logging/log.hpp"
#include "memory/binaryTreeDictionary.hpp"
@ -432,7 +433,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);

View File

@ -448,7 +448,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_start_sampling(false),
_between_prologue_and_epilogue(false),
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
_modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
_modUnionTable((CardTable::card_shift - LogHeapWordSize),
-1 /* lock-free */, "No_lock" /* dummy */),
_modUnionClosurePar(&_modUnionTable),
// Adjust my span to cover old (cms) gen
@ -900,7 +900,7 @@ void CMSCollector::promoted(bool par, HeapWord* start,
// card size.
MemRegion mr(start,
align_up(start + obj_size,
CardTableModRefBS::card_size /* bytes */));
CardTable::card_size /* bytes */));
if (par) {
_modUnionTable.par_mark_range(mr);
} else {
@ -3223,7 +3223,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
if (sp->used_region().contains(_restart_addr)) {
// Align down to a card boundary for the start of 0th task
// for this space.
aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
aligned_start = align_down(_restart_addr, CardTable::card_size);
}
size_t chunk_size = sp->marking_task_size();
@ -4026,17 +4026,16 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
startTimer();
sample_eden();
// Get and clear dirty region from card table
dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
MemRegion(nextAddr, endAddr),
true,
CardTableModRefBS::precleaned_card_val());
dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
true,
CardTable::precleaned_card_val());
assert(dirtyRegion.start() >= nextAddr,
"returned region inconsistent?");
}
lastAddr = dirtyRegion.end();
numDirtyCards =
dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
dirtyRegion.word_size()/CardTable::card_size_in_words;
if (!dirtyRegion.is_empty()) {
stopTimer();
@ -4050,7 +4049,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
if (stop_point != NULL) {
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
"Should only be AbortablePreclean.");
_ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
_ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
if (should_abort_preclean()) {
break; // out of preclean loop
} else {
@ -4577,7 +4576,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
assert(pst->valid(), "Uninitialized use?");
uint nth_task = 0;
const int alignment = CardTableModRefBS::card_size * BitsPerWord;
const int alignment = CardTable::card_size * BitsPerWord;
MemRegion span = sp->used_region();
HeapWord* start_addr = span.start();
HeapWord* end_addr = align_up(span.end(), alignment);
@ -4603,7 +4602,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
// precleaned, and setting the corresponding bits in the mod union
// table. Since we have been careful to partition at Card and MUT-word
// boundaries no synchronization is needed between parallel threads.
_collector->_ct->ct_bs()->dirty_card_iterate(this_span,
_collector->_ct->dirty_card_iterate(this_span,
&modUnionClosure);
// Having transferred these marks into the modUnionTable,
@ -4914,16 +4913,14 @@ void CMSCollector::do_remark_non_parallel() {
// mod union table.
{
ModUnionClosure modUnionClosure(&_modUnionTable);
_ct->ct_bs()->dirty_card_iterate(
_cmsGen->used_region(),
&modUnionClosure);
_ct->dirty_card_iterate(_cmsGen->used_region(),
&modUnionClosure);
}
// Having transferred these marks into the modUnionTable, we just need
// to rescan the marked objects on the dirty cards in the modUnionTable.
// The initial marking may have been done during an asynchronous
// collection so there may be dirty bits in the mod-union table.
const int alignment =
CardTableModRefBS::card_size * BitsPerWord;
const int alignment = CardTable::card_size * BitsPerWord;
{
// ... First handle dirty cards in CMS gen
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
@ -5633,9 +5630,9 @@ HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
}
assert(sz > 0, "size must be nonzero");
HeapWord* next_block = addr + sz;
HeapWord* next_card = align_up(next_block, CardTableModRefBS::card_size);
assert(align_down((uintptr_t)addr, CardTableModRefBS::card_size) <
align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
HeapWord* next_card = align_up(next_block, CardTable::card_size);
assert(align_down((uintptr_t)addr, CardTable::card_size) <
align_down((uintptr_t)next_card, CardTable::card_size),
"must be different cards");
return next_card;
}
@ -6294,7 +6291,7 @@ void MarkFromRootsClosure::reset(HeapWord* addr) {
assert(_markStack->isEmpty(), "would cause duplicates on stack");
assert(_span.contains(addr), "Out of bounds _finger?");
_finger = addr;
_threshold = align_up(_finger, CardTableModRefBS::card_size);
_threshold = align_up(_finger, CardTable::card_size);
}
// Should revisit to see if this should be restructured for
@ -6321,7 +6318,7 @@ bool MarkFromRootsClosure::do_bit(size_t offset) {
// during the preclean or remark phase. (CMSCleanOnEnter)
if (CMSCleanOnEnter) {
size_t sz = _collector->block_size_using_printezis_bits(addr);
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
// Bump _threshold to end_card_addr; note that
@ -6408,9 +6405,9 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
HeapWord* old_threshold = _threshold;
assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
assert(is_aligned(old_threshold, CardTable::card_size),
"_threshold should always be card-aligned");
_threshold = align_up(_finger, CardTableModRefBS::card_size);
_threshold = align_up(_finger, CardTable::card_size);
MemRegion mr(old_threshold, _threshold);
assert(!mr.is_empty(), "Control point invariant");
assert(_span.contains(mr), "Should clear within span");
@ -6520,9 +6517,9 @@ void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
HeapWord* old_threshold = _threshold;
assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
assert(is_aligned(old_threshold, CardTable::card_size),
"_threshold should always be card-aligned");
_threshold = align_up(_finger, CardTableModRefBS::card_size);
_threshold = align_up(_finger, CardTable::card_size);
MemRegion mr(old_threshold, _threshold);
assert(!mr.is_empty(), "Control point invariant");
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
@ -6890,7 +6887,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
// are required.
if (obj->is_objArray()) {
size_t sz = obj->size();
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_mod_union_table->mark_range(redirty_range);
@ -7003,15 +7000,15 @@ bool CMSPrecleanRefsYieldClosure::should_return() {
}
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
"mr should be aligned to start at a card boundary");
// We'd like to assert:
// assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
// assert(mr.word_size()%CardTable::card_size_in_words == 0,
// "mr should be a range of cards");
// However, that would be too strong in one case -- the last
// partition ends at _unallocated_block which, in general, can be
// an arbitrary boundary, not necessarily card aligned.
_num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
_num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
_space->object_iterate_mem(mr, &_scan_cl);
}
@ -7620,7 +7617,7 @@ void CMSKeepAliveClosure::do_oop(oop obj) {
// table.
if (obj->is_objArray()) {
size_t sz = obj->size();
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_collector->_modUnionTable.mark_range(redirty_range);

View File

@ -77,7 +77,7 @@ class SerialOldTracer;
// methods are used). This is essentially a wrapper around the BitMap class,
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
// we have _shifter == 0. and for the mod union table we have
// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
// shifter == CardTable::card_shift - LogHeapWordSize.)
// XXX 64-bit issues in BitMap?
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -448,7 +448,7 @@ inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
// This is superfluous except at the end of the space;
// we should do better than this XXX
MemRegion mr2(mr.start(), align_up(mr.end(),
CardTableModRefBS::card_size /* bytes */));
CardTable::card_size /* bytes */));
_t->mark_range(mr2);
}
@ -457,7 +457,7 @@ inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
// This is superfluous except at the end of the space;
// we should do better than this XXX
MemRegion mr2(mr.start(), align_up(mr.end(),
CardTableModRefBS::card_size /* bytes */));
CardTable::card_size /* bytes */));
_t->par_mark_range(mr2);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
void CardTableModRefBSForCTRS::
void CardTableRS::
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
@ -82,7 +82,7 @@ non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
}
void
CardTableModRefBSForCTRS::
CardTableRS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
@ -162,7 +162,7 @@ process_stride(Space* sp,
}
void
CardTableModRefBSForCTRS::
CardTableRS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
@ -371,7 +371,7 @@ process_chunk_boundaries(Space* sp,
}
void
CardTableModRefBSForCTRS::
CardTableRS::
get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,12 +40,12 @@ void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_re
size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
// We keep card counts for every card, so the size of the card counts table must
// be the same as the card table.
return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words);
return G1CardTable::compute_size(mem_region_size_in_words);
}
size_t G1CardCounts::heap_map_factor() {
// See G1CardCounts::compute_size() why we reuse the card table value.
return G1SATBCardTableLoggingModRefBS::heap_map_factor();
return G1CardTable::heap_map_factor();
}
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
@ -72,8 +72,8 @@ void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
// threshold limit is no more than this.
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
_ct_bs = _g1h->g1_barrier_set();
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
_ct = _g1h->card_table();
_ct_bot = _ct->byte_for_const(_g1h->reserved_region().start());
_card_counts = (jubyte*) mapper->reserved().start();
_reserved_max_card_num = mapper->reserved().byte_size();
@ -116,17 +116,17 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
void G1CardCounts::clear_range(MemRegion mr) {
if (has_count_table()) {
const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
const jbyte* from_card_ptr = _ct->byte_for_const(mr.start());
// We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an
// OOB access to the card table.
const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
const jbyte* last_card_ptr = _ct->byte_for_const(mr.last());
#ifdef ASSERT
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
HeapWord* start_addr = _ct->addr_for(from_card_ptr);
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
HeapWord* last_addr = _ct->addr_for(last_card_ptr);
assert((last_addr + G1CardTable::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
#endif // ASSERT
// Clear the counts for the (exclusive) card range.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
#define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
#include "memory/virtualspace.hpp"
@ -56,6 +57,7 @@ class G1CardCounts: public CHeapObj<mtGC> {
G1CardCountsMappingChangedListener _listener;
G1CollectedHeap* _g1h;
G1CardTable* _ct;
// The table of counts
jubyte* _card_counts;
@ -66,9 +68,6 @@ class G1CardCounts: public CHeapObj<mtGC> {
// CardTable bottom.
const jbyte* _ct_bot;
// Barrier set
CardTableModRefBS* _ct_bs;
// Returns true if the card counts table has been reserved.
bool has_reserved_count_table() { return _card_counts != NULL; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,10 +68,10 @@ void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
assert(max_capacity % num_max_regions == 0,
"Given capacity must be evenly divisible by region size.");
size_t region_size = max_capacity / num_max_regions;
assert(region_size % (G1SATBCardTableModRefBS::card_size * BitsPerWord) == 0,
assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
"Region size must be evenly divisible by area covered by a single word.");
_max_capacity = max_capacity;
_cards_per_region = region_size / G1SATBCardTableModRefBS::card_size;
_cards_per_region = region_size / G1CardTable::card_size;
_live_regions_size_in_bits = live_region_bitmap_size_in_bits();
_live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
@ -85,11 +85,11 @@ void G1CardLiveData::pretouch() {
}
size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
return _max_capacity / (_cards_per_region << G1SATBCardTableModRefBS::card_shift);
return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
}
size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
return _max_capacity >> G1SATBCardTableModRefBS::card_shift;
return _max_capacity >> G1CardTable::card_shift;
}
// Helper class that provides functionality to generate the Live Data Count
@ -132,7 +132,7 @@ private:
void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
_card_bm.clear_range(start_idx, end_idx);
}
@ -140,7 +140,7 @@ private:
// Mark the card liveness bitmap for the object spanning from start to end.
void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
@ -168,7 +168,7 @@ public:
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
BitMap::idx_t card_num = uintptr_t(addr) >> CardTableModRefBS::card_shift;
BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
return card_num - _heap_card_bias;
}
@ -262,7 +262,7 @@ public:
// Calculate the card number for the bottom of the heap. Used
// in biasing indexes into the accounting card bitmaps.
_heap_card_bias =
uintptr_t(base_address) >> CardTableModRefBS::card_shift;
uintptr_t(base_address) >> G1CardTable::card_shift;
}
};

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.inline.hpp"
bool G1CardTable::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
// It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val();
} else {
if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val();
}
}
if (new_val != val) {
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
}
return true;
}
void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset_with_concurrent_readers(first, g1_young_gen, last - first);
}
#ifndef PRODUCT
void G1CardTable::verify_g1_young_region(MemRegion mr) {
verify_region(mr, g1_young_gen, true);
}
#endif
void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
// Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
_card_table->clear(mr);
}
void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
mapper->set_mapping_changed_listener(&_listener);
_byte_map_size = mapper->reserved().byte_size();
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 1;
_covered[0] = _whole_heap;
_byte_map = (jbyte*) mapper->reserved().start();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
log_trace(gc, barrier)("G1CardTable::G1CardTable: ");
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
}
bool G1CardTable::is_in_young(oop obj) const {
volatile jbyte* p = byte_for(obj);
return *p == G1CardTable::g1_young_card_val();
}

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1CARDTABLE_HPP
#define SHARE_VM_GC_G1_G1CARDTABLE_HPP
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/shared/cardTable.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/macros.hpp"
class G1CardTable;
class G1RegionToSpaceMapper;
class G1CardTableChangedListener : public G1MappingChangedListener {
private:
G1CardTable* _card_table;
public:
G1CardTableChangedListener() : _card_table(NULL) { }
void set_card_table(G1CardTable* card_table) { _card_table = card_table; }
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
};
class G1CardTable: public CardTable {
friend class VMStructs;
friend class G1CardTableChangedListener;
G1CardTableChangedListener _listener;
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1
};
public:
G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
_listener.set_card_table(this);
}
bool is_card_dirty(size_t card_index) {
return _byte_map[card_index] == dirty_card_val();
}
static jbyte g1_young_card_val() { return g1_young_gen; }
/*
Claimed and deferred bits are used together in G1 during the evacuation
pause. These bits can have the following state transitions:
1. The claimed bit can be put over any other card state. Except that
the "dirty -> dirty and claimed" transition is checked for in
G1 code and is not used.
2. Deferred bit can be set only if the previous state of the card
was either clean or claimed. mark_card_deferred() is wait-free.
We do not care if the operation is be successful because if
it does not it will only result in duplicate entry in the update
buffer because of the "cache-miss". So it's not worth spinning.
*/
bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
inline void set_card_claimed(size_t card_index);
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);
bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
static size_t compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
// Returns how many bytes of the heap a single byte of the Card Table corresponds to.
static size_t heap_map_factor() { return card_size; }
void initialize() {}
void initialize(G1RegionToSpaceMapper* mapper);
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
virtual bool is_in_young(oop obj) const;
};
#endif // SHARE_VM_GC_G1_G1CARDTABLE_HPP

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
#define SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
#include "gc/g1/g1CardTable.hpp"
void G1CardTable::set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
#endif // SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP

View File

@ -52,6 +52,7 @@
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@ -103,10 +104,10 @@ class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
private:
size_t _num_dirtied;
G1CollectedHeap* _g1h;
G1SATBCardTableLoggingModRefBS* _g1_bs;
G1CardTable* _g1_ct;
HeapRegion* region_for_card(jbyte* card_ptr) const {
return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
}
bool will_become_free(HeapRegion* hr) const {
@ -117,14 +118,14 @@ class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
public:
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
_num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
HeapRegion* hr = region_for_card(card_ptr);
// Should only dirty cards in regions that won't be freed.
if (!will_become_free(hr)) {
*card_ptr = CardTableModRefBS::dirty_card_val();
*card_ptr = G1CardTable::dirty_card_val();
_num_dirtied++;
}
@ -1465,6 +1466,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_young_gen_sampling_thread(NULL),
_collector_policy(collector_policy),
_soft_ref_policy(),
_card_table(NULL),
_memory_manager("G1 Young Generation", "end of minor GC"),
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
_eden_pool(NULL),
@ -1616,11 +1618,13 @@ jint G1CollectedHeap::initialize() {
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// Create the barrier set for the entire reserved region.
G1SATBCardTableLoggingModRefBS* bs
= new G1SATBCardTableLoggingModRefBS(reserved_region());
G1CardTable* ct = new G1CardTable(reserved_region());
ct->initialize();
G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct);
bs->initialize();
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
set_barrier_set(bs);
_card_table = ct;
// Create the hot card cache.
_hot_card_cache = new G1HotCardCache(this);
@ -1651,8 +1655,8 @@ jint G1CollectedHeap::initialize() {
G1RegionToSpaceMapper* cardtable_storage =
create_aux_memory_mapper("Card Table",
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
G1SATBCardTableLoggingModRefBS::heap_map_factor());
G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
G1CardTable::heap_map_factor());
G1RegionToSpaceMapper* card_counts_storage =
create_aux_memory_mapper("Card Counts Table",
@ -1666,7 +1670,7 @@ jint G1CollectedHeap::initialize() {
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
_card_table->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_hot_card_cache->initialize(card_counts_storage);
@ -1676,7 +1680,7 @@ jint G1CollectedHeap::initialize() {
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
// Also create a G1 rem set.
_g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
_g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
_g1_rem_set->initialize(max_capacity(), max_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@ -2691,17 +2695,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
if (!r->rem_set()->is_empty()) {
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
G1CardTable* ct = g1h->card_table();
HeapRegionRemSetIterator hrrs(r->rem_set());
size_t card_index;
while (hrrs.has_next(card_index)) {
jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
// The remembered set might contain references to already freed
// regions. Filter out such entries to avoid failing card table
// verification.
if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
*card_ptr = CardTableModRefBS::dirty_card_val();
if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
if (*card_ptr != G1CardTable::dirty_card_val()) {
*card_ptr = G1CardTable::dirty_card_val();
_dcq.enqueue(card_ptr);
}
}

View File

@ -28,6 +28,7 @@
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
@ -150,6 +151,7 @@ private:
WorkGang* _workers;
G1CollectorPolicy* _collector_policy;
G1CardTable* _card_table;
SoftRefPolicy _soft_ref_policy;
@ -1178,6 +1180,10 @@ public:
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
G1CardTable* card_table() const {
return _card_table;
}
// Iteration functions.
// Iterate over all objects, calling "cl.do_object" on each.

View File

@ -123,7 +123,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end);
g1_barrier_set()->g1_mark_as_young(mr);
card_table()->g1_mark_as_young(mr);
}
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,12 +38,12 @@
class UpdateRSetDeferred : public ExtendedOopClosure {
private:
G1CollectedHeap* _g1;
DirtyCardQueue *_dcq;
G1SATBCardTableModRefBS* _ct_bs;
DirtyCardQueue* _dcq;
G1CardTable* _ct;
public:
UpdateRSetDeferred(DirtyCardQueue* dcq) :
_g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
_g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
@ -59,9 +59,9 @@ public:
if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
return;
}
size_t card_index = _ct_bs->index_for(p);
if (_ct_bs->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
size_t card_index = _ct->index_for(p);
if (_ct->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
}
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,7 +112,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(Heap
hr->reset_gc_time_stamp();
hr->rem_set()->clear();
_g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end()));
_g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
if (_g1h->g1_hot_card_cache()->use_cache()) {
_g1h->g1_hot_card_cache()->reset_card_counts(hr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -604,10 +604,9 @@ void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1HeapVerifier* _verifier;
G1SATBCardTableModRefBS* _ct_bs;
public:
G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
: _verifier(verifier), _ct_bs(ct_bs) { }
G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
: _verifier(verifier) { }
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_survivor()) {
_verifier->verify_dirty_region(r);
@ -620,16 +619,16 @@ public:
void G1HeapVerifier::verify_card_table_cleanup() {
if (G1VerifyCTCleanup || VerifyAfterGC) {
G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
G1VerifyCardTableCleanup cleanup_verifier(this);
_g1h->heap_region_iterate(&cleanup_verifier);
}
}
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
// All of the region should be clean.
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
G1CardTable* ct = _g1h->card_table();
MemRegion mr(hr->bottom(), hr->end());
ct_bs->verify_not_dirty_region(mr);
ct->verify_not_dirty_region(mr);
}
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
@ -640,12 +639,12 @@ void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
G1CardTable* ct = _g1h->card_table();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
if (hr->is_young()) {
ct_bs->verify_g1_young_region(mr);
ct->verify_g1_young_region(mr);
} else {
ct_bs->verify_dirty_region(mr);
ct->verify_dirty_region(mr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
: _g1h(g1h),
_refs(g1h->task_queue(worker_id)),
_dcq(&g1h->dirty_card_queue_set()),
_ct_bs(g1h->g1_barrier_set()),
_ct(g1h->card_table()),
_closures(NULL),
_hash_seed(17),
_worker_id(worker_id),
@ -390,7 +390,6 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
return forward_ptr;
}
}
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
_g1h(g1h),
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
G1SATBCardTableModRefBS* _ct_bs;
G1CardTable* _ct;
G1EvacuationRootClosures* _closures;
G1PLABAllocator* _plab_allocator;
@ -72,7 +72,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
G1CardTable* ct() { return _ct; }
InCSetState dest(InCSetState original) const {
assert(original.is_valid(),
@ -104,10 +104,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
// If the field originates from the to-space, we don't need to include it
// in the remembered set updates.
if (!from->is_young()) {
size_t card_index = ctbs()->index_for(p);
size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (ctbs()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
if (ct()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
}
}
}

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1FromCardCache.hpp"
@ -74,7 +75,7 @@ private:
static size_t chunk_size() { return M; }
void work(uint worker_id) {
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
G1CardTable* ct = _g1h->card_table();
while (_cur_dirty_regions < _num_dirty_regions) {
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
@ -83,7 +84,7 @@ private:
for (size_t i = next; i < max; i++) {
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
if (!r->is_survivor()) {
ct_bs->clear(MemRegion(r->bottom(), r->end()));
ct->clear(MemRegion(r->bottom(), r->end()));
}
}
}
@ -280,12 +281,12 @@ public:
};
G1RemSet::G1RemSet(G1CollectedHeap* g1,
CardTableModRefBS* ct_bs,
G1CardTable* ct,
G1HotCardCache* hot_card_cache) :
_g1(g1),
_scan_state(new G1RemSetScanState()),
_num_conc_refined_cards(0),
_ct_bs(ct_bs),
_ct(ct),
_g1p(_g1->g1_policy()),
_hot_card_cache(hot_card_cache),
_prev_period_summary() {
@ -328,7 +329,7 @@ G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state
_worker_i(worker_i) {
_g1h = G1CollectedHeap::heap();
_bot = _g1h->bot();
_ct_bs = _g1h->g1_barrier_set();
_ct = _g1h->card_table();
}
void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
@ -345,7 +346,7 @@ void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
}
void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
_ct_bs->set_card_claimed(card_index);
_ct->set_card_claimed(card_index);
_scan_state->add_dirty_region(region_idx_for_card);
}
@ -381,7 +382,7 @@ bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
_cards_claimed++;
// If the card is dirty, then G1 will scan it during Update RS.
if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) {
if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
continue;
}
@ -535,15 +536,15 @@ void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) {
_g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
}
inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) {
inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
#ifdef ASSERT
G1CollectedHeap* g1 = G1CollectedHeap::heap();
assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)),
assert(g1->is_in_exact(ct->addr_for(card_ptr)),
"Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
p2i(card_ptr),
ct_bs->index_for(ct_bs->addr_for(card_ptr)),
p2i(ct_bs->addr_for(card_ptr)),
g1->addr_to_region(ct_bs->addr_for(card_ptr)));
ct->index_for(ct->addr_for(card_ptr)),
p2i(ct->addr_for(card_ptr)),
g1->addr_to_region(ct->addr_for(card_ptr)));
#endif
}
@ -551,15 +552,15 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
uint worker_i) {
assert(!_g1->is_gc_active(), "Only call concurrently");
check_card_ptr(card_ptr, _ct_bs);
check_card_ptr(card_ptr, _ct);
// If the card is no longer dirty, nothing to do.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
if (*card_ptr != G1CardTable::dirty_card_val()) {
return;
}
// Construct the region representing the card.
HeapWord* start = _ct_bs->addr_for(card_ptr);
HeapWord* start = _ct->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start);
@ -605,7 +606,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
return;
} else if (card_ptr != orig_card_ptr) {
// Original card was inserted and an old card was evicted.
start = _ct_bs->addr_for(card_ptr);
start = _ct->addr_for(card_ptr);
r = _g1->heap_region_containing(start);
// Check whether the region formerly in the cache should be
@ -639,7 +640,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// Okay to clean and process the card now. There are still some
// stale card cases that may be detected by iteration and dealt with
// as iteration failure.
*const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val();
*const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
// This fence serves two purposes. First, the card must be cleaned
// before processing the contents. Second, we can't proceed with
@ -651,7 +652,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap.
HeapWord* end = start + CardTableModRefBS::card_size_in_words;
HeapWord* end = start + G1CardTable::card_size_in_words;
MemRegion dirty_region(start, MIN2(scan_limit, end));
assert(!dirty_region.is_empty(), "sanity");
@ -668,8 +669,8 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
if (!card_processed) {
// The card might have gotten re-dirtied and re-enqueued while we
// worked. (In fact, it's pretty likely.)
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
*card_ptr = CardTableModRefBS::dirty_card_val();
if (*card_ptr != G1CardTable::dirty_card_val()) {
*card_ptr = G1CardTable::dirty_card_val();
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
DirtyCardQueue* sdcq =
@ -685,20 +686,20 @@ bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
assert(_g1->is_gc_active(), "Only call during GC");
check_card_ptr(card_ptr, _ct_bs);
check_card_ptr(card_ptr, _ct);
// If the card is no longer dirty, nothing to do. This covers cards that were already
// scanned as parts of the remembered sets.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
if (*card_ptr != G1CardTable::dirty_card_val()) {
return false;
}
// We claim lazily (so races are possible but they're benign), which reduces the
// number of potential duplicate scans (multiple threads may enqueue the same card twice).
*card_ptr = CardTableModRefBS::clean_card_val() | CardTableModRefBS::claimed_card_val();
*card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
// Construct the region representing the card.
HeapWord* card_start = _ct_bs->addr_for(card_ptr);
HeapWord* card_start = _ct->addr_for(card_ptr);
// And find the region containing it.
uint const card_region_idx = _g1->addr_to_region(card_start);
@ -711,7 +712,7 @@ bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap.
HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words;
HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
assert(!dirty_region.is_empty(), "sanity");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CardLiveData.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1RemSetSummary.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
@ -72,7 +73,7 @@ private:
G1CollectedHeap* _g1;
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
CardTableModRefBS* _ct_bs;
G1CardTable* _ct;
G1Policy* _g1p;
G1HotCardCache* _hot_card_cache;
@ -93,7 +94,7 @@ public:
void cleanupHRRS();
G1RemSet(G1CollectedHeap* g1,
CardTableModRefBS* ct_bs,
G1CardTable* ct,
G1HotCardCache* hot_card_cache);
~G1RemSet();
@ -162,7 +163,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
CodeBlobClosure* _code_root_cl;
G1BlockOffsetTable* _bot;
G1SATBCardTableModRefBS *_ct_bs;
G1CardTable *_ct;
double _strong_code_root_scan_time_sec;
uint _worker_i;

View File

@ -23,22 +23,20 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/thread.inline.hpp"
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
MemRegion whole_heap,
G1CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT))
{ }
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
@ -80,88 +78,17 @@ void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, boo
}
}
bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
// It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val();
} else {
if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val();
}
}
if (new_val != val) {
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
}
return true;
}
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset_with_concurrent_readers(first, g1_young_gen, last - first);
}
#ifndef PRODUCT
void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
verify_region(mr, g1_young_gen, true);
}
#endif
void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
// Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
_card_table->clear(mr);
}
G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
_dcqs(JavaThread::dirty_card_queue_set()),
_listener()
{
_listener.set_card_table(this);
}
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
initialize_deferred_card_mark_barriers();
mapper->set_mapping_changed_listener(&_listener);
_byte_map_size = mapper->reserved().byte_size();
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 1;
_covered[0] = _whole_heap;
_byte_map = (jbyte*) mapper->reserved().start();
byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
}
G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) :
G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)),
_dcqs(JavaThread::dirty_card_queue_set()) {}
void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
// In the slow path, we know a card is not young
assert(*byte != g1_young_gen, "slow path invoked without filtering");
assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
OrderAccess::storeload();
if (*byte != dirty_card) {
*byte = dirty_card;
if (*byte != G1CardTable::dirty_card_val()) {
*byte = G1CardTable::dirty_card_val();
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
@ -174,16 +101,15 @@ void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* b
}
}
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
if (mr.is_empty()) {
return;
}
volatile jbyte* byte = byte_for(mr.start());
jbyte* last_byte = byte_for(mr.last());
volatile jbyte* byte = _card_table->byte_for(mr.start());
jbyte* last_byte = _card_table->byte_for(mr.last());
Thread* thr = Thread::current();
// skip all consecutive young cards
for (; byte <= last_byte && *byte == g1_young_gen; byte++);
for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
if (byte <= last_byte) {
OrderAccess::storeload();
@ -191,11 +117,11 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
if (*byte == G1CardTable::g1_young_card_val()) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
if (*byte != G1CardTable::dirty_card_val()) {
*byte = G1CardTable::dirty_card_val();
jt->dirty_card_queue().enqueue(byte);
}
}
@ -203,11 +129,11 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
if (*byte == G1CardTable::g1_young_card_val()) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
if (*byte != G1CardTable::dirty_card_val()) {
*byte = G1CardTable::dirty_card_val();
_dcqs.shared_dirty_card_queue()->enqueue(byte);
}
}
@ -215,11 +141,6 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
}
}
bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
volatile jbyte* p = byte_for((void*)obj);
return *p == g1_young_card_val();
}
void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
// This method initializes the SATB and dirty card queues before a
// JavaThread is added to the Java thread list. Right now, we don't

View File

@ -33,6 +33,8 @@
class DirtyCardQueueSet;
class G1SATBCardTableLoggingModRefBS;
class CardTable;
class G1CardTable;
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
@ -40,16 +42,10 @@ class G1SATBCardTableLoggingModRefBS;
class G1SATBCardTableModRefBS: public CardTableModRefBS {
friend class VMStructs;
protected:
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1
};
G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti);
~G1SATBCardTableModRefBS() { }
public:
static int g1_young_card_val() { return g1_young_gen; }
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
@ -62,38 +58,6 @@ public:
template <DecoratorSet decorators, typename T>
void write_ref_field_pre(T* field);
/*
Claimed and deferred bits are used together in G1 during the evacuation
pause. These bits can have the following state transitions:
1. The claimed bit can be put over any other card state. Except that
the "dirty -> dirty and claimed" transition is checked for in
G1 code and is not used.
2. Deferred bit can be set only if the previous state of the card
was either clean or claimed. mark_card_deferred() is wait-free.
We do not care if the operation is be successful because if
it does not it will only result in duplicate entry in the update
buffer because of the "cache-miss". So it's not worth spinning.
*/
bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
inline void set_card_claimed(size_t card_index);
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);
bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
virtual bool is_in_young(oop obj) const;
};
template<>
@ -106,42 +70,14 @@ struct BarrierSet::GetType<BarrierSet::G1SATBCT> {
typedef G1SATBCardTableModRefBS type;
};
class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
private:
G1SATBCardTableLoggingModRefBS* _card_table;
public:
G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
};
// Adds card-table logging to the post-barrier.
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
friend class G1SATBCardTableLoggingModRefBSChangedListener;
private:
G1SATBCardTableLoggingModRefBSChangedListener _listener;
DirtyCardQueueSet& _dcqs;
public:
static size_t compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
// Returns how many bytes of the heap a single byte of the Card Table corresponds to.
static size_t heap_map_factor() {
return CardTableModRefBS::card_size;
}
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
virtual void initialize() { }
virtual void initialize(G1RegionToSpaceMapper* mapper);
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
G1SATBCardTableLoggingModRefBS(G1CardTable* card_table);
// NB: if you do a whole-heap invalidation, the "usual invariant" defined
// above no longer applies.
@ -157,10 +93,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
virtual bool card_mark_must_follow_store() const {
return true;
}
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -25,8 +25,9 @@
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#include "gc/shared/accessBarrierSupport.inline.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/shared/accessBarrierSupport.inline.hpp"
template <DecoratorSet decorators, typename T>
inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
@ -43,23 +44,13 @@ inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
template <DecoratorSet decorators, typename T>
inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
volatile jbyte* byte = byte_for(field);
if (*byte != g1_young_gen) {
volatile jbyte* byte = _card_table->byte_for(field);
if (*byte != G1CardTable::g1_young_card_val()) {
// Take a slow path for cards in old
write_ref_field_post_slow(byte);
}
}
void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
// Archive roots need to be enqueued since they add subgraphs to the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -100,7 +100,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
if (G1HeapRegionSize != GrainBytes) {
FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
@ -139,9 +139,8 @@ void HeapRegion::par_clear() {
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
CardTableModRefBS* ct_bs =
barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
ct_bs->clear(MemRegion(bottom(), end()));
G1CardTable* ct = G1CollectedHeap::heap()->card_table();
ct->clear(MemRegion(bottom(), end()));
}
void HeapRegion::calc_gc_efficiency() {
@ -463,7 +462,7 @@ void HeapRegion::print_on(outputStream* st) const {
class G1VerificationClosure : public OopClosure {
protected:
G1CollectedHeap* _g1h;
CardTableModRefBS* _bs;
G1CardTable *_ct;
oop _containing_obj;
bool _failures;
int _n_failures;
@ -473,7 +472,7 @@ public:
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
_g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
_g1h(g1h), _ct(g1h->card_table()),
_containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
}
@ -576,9 +575,9 @@ public:
if (from != NULL && to != NULL &&
from != to &&
!to->is_pinned()) {
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
jbyte cv_field = *_bs->byte_for_const(p);
const jbyte dirty = CardTableModRefBS::dirty_card_val();
jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
jbyte cv_field = *_ct->byte_for_const(p);
const jbyte dirty = G1CardTable::dirty_card_val();
bool is_bad = !(from->is_young()
|| to->rem_set()->contains_reference(p)
@ -834,7 +833,6 @@ void G1ContiguousSpace::clear(bool mangle_space) {
CompactibleSpace::clear(mangle_space);
reset_bot();
}
#ifndef PRODUCT
void G1ContiguousSpace::mangle_unused_area() {
mangle_unused_area_complete();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "gc/g1/heapRegionType.hpp"
#include "gc/g1/survRateGroup.hpp"
#include "gc/shared/ageTable.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "utilities/macros.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,7 +103,7 @@ protected:
if (loc_hr->is_in_reserved(from)) {
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
CardIdx_t from_card = (CardIdx_t)
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
assert((size_t)from_card < HeapRegion::CardsPerRegion,
"Must be in range.");
@ -170,7 +170,7 @@ public:
bool contains_reference(OopOrNarrowOopStar from) const {
assert(hr()->is_in_reserved(from), "Precondition.");
size_t card_ind = pointer_delta(from, hr()->bottom(),
CardTableModRefBS::card_size);
G1CardTable::card_size);
return _bm.at(card_ind);
}
@ -354,7 +354,7 @@ void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
uint cur_hrm_ind = _hr->hrm_index();
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
@ -382,7 +382,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
uintptr_t from_hr_bot_card_index =
uintptr_t(from_hr->bottom())
>> CardTableModRefBS::card_shift;
>> G1CardTable::card_shift;
CardIdx_t card_index = from_card - from_hr_bot_card_index;
assert((size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
@ -671,9 +671,9 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
} else {
uintptr_t from_card =
(uintptr_t(from) >> CardTableModRefBS::card_shift);
(uintptr_t(from) >> G1CardTable::card_shift);
uintptr_t hr_bot_card_index =
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
assert(from_card >= hr_bot_card_index, "Inv");
CardIdx_t card_index = from_card - hr_bot_card_index;
assert((size_t)card_index < HeapRegion::CardsPerRegion,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ void SparsePRTEntry::init(RegionIdx_t region_ind) {
// Check that the card array element type can represent all cards in the region.
// Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition");
G1CardTable::card_size >= HeapRegionBounds::max_size(), "precondition");
assert(G1RSetSparseRegionEntries > 0, "precondition");
_region_ind = region_ind;
_next_index = RSHashTable::NullEntry;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -509,7 +509,7 @@ void ASPSYoungGen::reset_after_change() {
}
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr);
space_invariants();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
void ObjectStartArray::initialize(MemRegion reserved_region) {
// We're based on the assumption that we use the same
// size blocks as the card table.
assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
assert((int)block_size == (int)CardTable::card_size, "Sanity");
assert((int)block_size <= 512, "block_size must be less than or equal to 512");
// Calculate how much space must be reserved

View File

@ -26,7 +26,6 @@
#include "code/codeCache.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
@ -70,7 +69,9 @@ jint ParallelScavengeHeap::initialize() {
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
PSCardTable* card_table = new PSCardTable(reserved_region());
card_table->initialize();
CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
barrier_set->initialize();
set_barrier_set(barrier_set);
@ -625,6 +626,14 @@ ParallelScavengeHeap* ParallelScavengeHeap::heap() {
return (ParallelScavengeHeap*)heap;
}
CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
}
PSCardTable* ParallelScavengeHeap::card_table() {
return static_cast<PSCardTable*>(barrier_set()->card_table());
}
// Before delegating the resize to the young generation,
// the reserved space for the young and old generations
// may be changed to accommodate the desired resize.

View File

@ -30,6 +30,7 @@
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
@ -46,6 +47,7 @@ class GCTaskManager;
class MemoryManager;
class MemoryPool;
class PSAdaptiveSizePolicy;
class PSCardTable;
class PSHeapSummary;
class ParallelScavengeHeap : public CollectedHeap {
@ -125,6 +127,9 @@ class ParallelScavengeHeap : public CollectedHeap {
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
CardTableModRefBS* barrier_set();
PSCardTable* card_table();
AdjoiningGenerations* gens() { return _gens; }
// Returns JNI_OK on success

View File

@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psTasks.hpp"
@ -39,9 +39,9 @@
// may be either dirty or newgen.
class CheckForUnmarkedOops : public OopClosure {
private:
PSYoungGen* _young_gen;
CardTableExtension* _card_table;
HeapWord* _unmarked_addr;
PSYoungGen* _young_gen;
PSCardTable* _card_table;
HeapWord* _unmarked_addr;
protected:
template <class T> void do_oop_work(T* p) {
@ -56,7 +56,7 @@ class CheckForUnmarkedOops : public OopClosure {
}
public:
CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) :
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
@ -71,16 +71,14 @@ class CheckForUnmarkedOops : public OopClosure {
// precise or imprecise, dirty or newgen.
class CheckForUnmarkedObjects : public ObjectClosure {
private:
PSYoungGen* _young_gen;
CardTableExtension* _card_table;
PSYoungGen* _young_gen;
PSCardTable* _card_table;
public:
CheckForUnmarkedObjects() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_young_gen = heap->young_gen();
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
// No point in asserting barrier set type here. Need to make CardTableExtension
// a unique barrier set type.
_card_table = heap->card_table();
}
// Card marks are not precise. The current system can leave us with
@ -99,8 +97,8 @@ class CheckForUnmarkedObjects : public ObjectClosure {
// Checks for precise marking of oops as newgen.
class CheckForPreciseMarks : public OopClosure {
private:
PSYoungGen* _young_gen;
CardTableExtension* _card_table;
PSYoungGen* _young_gen;
PSCardTable* _card_table;
protected:
template <class T> void do_oop_work(T* p) {
@ -112,7 +110,7 @@ class CheckForPreciseMarks : public OopClosure {
}
public:
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) :
_young_gen(young_gen), _card_table(card_table) { }
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
@ -128,12 +126,12 @@ class CheckForPreciseMarks : public OopClosure {
// when the space is empty, fix the calculation of
// end_card to allow sp_top == sp->bottom().
void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
MutableSpace* sp,
HeapWord* space_top,
PSPromotionManager* pm,
uint stripe_number,
uint stripe_total) {
void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
MutableSpace* sp,
HeapWord* space_top,
PSPromotionManager* pm,
uint stripe_number,
uint stripe_total) {
int ssize = 128; // Naked constant! Work unit = 64k.
int dirty_card_count = 0;
@ -320,7 +318,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
}
// This should be called before a scavenge.
void CardTableExtension::verify_all_young_refs_imprecise() {
void PSCardTable::verify_all_young_refs_imprecise() {
CheckForUnmarkedObjects check;
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@ -330,26 +328,21 @@ void CardTableExtension::verify_all_young_refs_imprecise() {
}
// This should be called immediately after a scavenge, before mutators resume.
void CardTableExtension::verify_all_young_refs_precise() {
void PSCardTable::verify_all_young_refs_precise() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
CheckForPreciseMarks check(
heap->young_gen(),
barrier_set_cast<CardTableExtension>(heap->barrier_set()));
CheckForPreciseMarks check(heap->young_gen(), this);
old_gen->oop_iterate_no_header(&check);
verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
}
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
CardTableExtension* card_table =
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
jbyte* bot = card_table->byte_for(mr.start());
jbyte* top = card_table->byte_for(mr.end());
while(bot <= top) {
void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
jbyte* bot = byte_for(mr.start());
jbyte* top = byte_for(mr.end());
while (bot <= top) {
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
if (*bot == verify_card)
*bot = youngergen_card;
@ -357,7 +350,7 @@ void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
}
}
bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
bool PSCardTable::addr_is_marked_imprecise(void *addr) {
jbyte* p = byte_for(addr);
jbyte val = *p;
@ -376,7 +369,7 @@ bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
}
// Also includes verify_card
bool CardTableExtension::addr_is_marked_precise(void *addr) {
bool PSCardTable::addr_is_marked_precise(void *addr) {
jbyte* p = byte_for(addr);
jbyte val = *p;
@ -404,8 +397,7 @@ bool CardTableExtension::addr_is_marked_precise(void *addr) {
// The method resize_covered_region_by_end() is analogous to
// CardTableModRefBS::resize_covered_region() but
// for regions that grow or shrink at the low end.
void CardTableExtension::resize_covered_region(MemRegion new_region) {
void PSCardTable::resize_covered_region(MemRegion new_region) {
for (int i = 0; i < _cur_covered_regions; i++) {
if (_covered[i].start() == new_region.start()) {
// Found a covered region with the same start as the
@ -439,13 +431,13 @@ void CardTableExtension::resize_covered_region(MemRegion new_region) {
resize_covered_region_by_start(new_region);
}
void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
CardTableModRefBS::resize_covered_region(new_region);
void PSCardTable::resize_covered_region_by_start(MemRegion new_region) {
CardTable::resize_covered_region(new_region);
debug_only(verify_guard();)
}
void CardTableExtension::resize_covered_region_by_end(int changed_region,
MemRegion new_region) {
void PSCardTable::resize_covered_region_by_end(int changed_region,
MemRegion new_region) {
assert(SafepointSynchronize::is_at_safepoint(),
"Only expect an expansion at the low end at a GC");
debug_only(verify_guard();)
@ -484,8 +476,8 @@ void CardTableExtension::resize_covered_region_by_end(int changed_region,
debug_only(verify_guard();)
}
bool CardTableExtension::resize_commit_uncommit(int changed_region,
MemRegion new_region) {
bool PSCardTable::resize_commit_uncommit(int changed_region,
MemRegion new_region) {
bool result = false;
// Commit new or uncommit old pages, if necessary.
MemRegion cur_committed = _committed[changed_region];
@ -506,13 +498,12 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
#ifdef ASSERT
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
"Starts should have proper alignment");
"Starts should have proper alignment");
#endif
jbyte* new_start = byte_for(new_region.start());
// Round down because this is for the start address
HeapWord* new_start_aligned =
(HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
// The guard page is always committed and should not be committed over.
// This method is used in cases where the generation is growing toward
// lower addresses but the guard region is still at the end of the
@ -579,21 +570,20 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
return result;
}
void CardTableExtension::resize_update_committed_table(int changed_region,
MemRegion new_region) {
void PSCardTable::resize_update_committed_table(int changed_region,
MemRegion new_region) {
jbyte* new_start = byte_for(new_region.start());
// Set the new start of the committed region
HeapWord* new_start_aligned =
(HeapWord*)align_down(new_start, os::vm_page_size());
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
MemRegion new_committed = MemRegion(new_start_aligned,
_committed[changed_region].end());
_committed[changed_region].end());
_committed[changed_region] = new_committed;
_committed[changed_region].set_start(new_start_aligned);
}
void CardTableExtension::resize_update_card_table_entries(int changed_region,
MemRegion new_region) {
void PSCardTable::resize_update_card_table_entries(int changed_region,
MemRegion new_region) {
debug_only(verify_guard();)
MemRegion original_covered = _covered[changed_region];
// Initialize the card entries. Only consider the
@ -610,8 +600,8 @@ void CardTableExtension::resize_update_card_table_entries(int changed_region,
while (entry < end) { *entry++ = clean_card; }
}
void CardTableExtension::resize_update_covered_table(int changed_region,
MemRegion new_region) {
void PSCardTable::resize_update_covered_table(int changed_region,
MemRegion new_region) {
// Update the covered region
_covered[changed_region].set_start(new_region.start());
_covered[changed_region].set_word_size(new_region.word_size());
@ -665,7 +655,7 @@ void CardTableExtension::resize_update_covered_table(int changed_region,
// -------------
// ^ returns this
HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const {
assert(_cur_covered_regions >= 0, "Expecting at least on region");
HeapWord* min_start = _committed[ind].start();
for (int j = 0; j < ind; j++) {
@ -678,6 +668,6 @@ HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
return min_start;
}
bool CardTableExtension::is_in_young(oop obj) const {
bool PSCardTable::is_in_young(oop obj) const {
return ParallelScavengeHeap::heap()->is_in_young(obj);
}

View File

@ -22,17 +22,18 @@
*
*/
#ifndef SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
#define SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
#ifndef SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
#define SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTable.hpp"
#include "oops/oop.hpp"
class MutableSpace;
class ObjectStartArray;
class PSPromotionManager;
class GCTaskQueue;
class CardTableExtension : public CardTableModRefBS {
class PSCardTable: public CardTable {
private:
// Support methods for resizing the card table.
// resize_commit_uncommit() returns true if the pages were committed or
@ -43,21 +44,18 @@ class CardTableExtension : public CardTableModRefBS {
void resize_update_committed_table(int changed_region, MemRegion new_region);
void resize_update_covered_table(int changed_region, MemRegion new_region);
protected:
void verify_all_young_refs_precise_helper(MemRegion mr);
static void verify_all_young_refs_precise_helper(MemRegion mr);
public:
enum ExtendedCardValue {
youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5
youngergen_card = CT_MR_BS_last_reserved + 1,
verify_card = CT_MR_BS_last_reserved + 5
};
CardTableExtension(MemRegion whole_heap) :
CardTableModRefBS(
whole_heap,
BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
{ }
public:
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
static jbyte youngergen_card_val() { return youngergen_card; }
static jbyte verify_card_val() { return verify_card; }
// Scavenge support
void scavenge_contents_parallel(ObjectStartArray* start_array,
@ -67,10 +65,6 @@ class CardTableExtension : public CardTableModRefBS {
uint stripe_number,
uint stripe_total);
// Verification
static void verify_all_young_refs_imprecise();
static void verify_all_young_refs_precise();
bool addr_is_marked_imprecise(void *addr);
bool addr_is_marked_precise(void *addr);
@ -88,6 +82,9 @@ class CardTableExtension : public CardTableModRefBS {
*byte = youngergen_card;
}
// ReduceInitialCardMarks support
bool is_in_young(oop obj) const;
// Adaptive size policy support
// Allows adjustment of the base and size of the covered regions
void resize_covered_region(MemRegion new_region);
@ -102,29 +99,14 @@ class CardTableExtension : public CardTableModRefBS {
HeapWord* lowest_prev_committed_start(int ind) const;
#ifdef ASSERT
bool is_valid_card_address(jbyte* addr) {
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
}
#endif // ASSERT
// ReduceInitialCardMarks support
virtual bool is_in_young(oop obj) const;
virtual bool card_mark_must_follow_store() const {
return false;
}
// Verification
void verify_all_young_refs_imprecise();
void verify_all_young_refs_precise();
};
template<>
struct BarrierSet::GetName<CardTableExtension> {
static const BarrierSet::Name value = BarrierSet::CardTableExtension;
};
template<>
struct BarrierSet::GetType<BarrierSet::CardTableExtension> {
typedef ::CardTableExtension type;
};
#endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
#endif // SHARE_VM_GC_PARALLEL_PSCARDTABLE

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -236,12 +236,12 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
PSCardTable* card_table = heap->card_table();
MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) {
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
@ -111,11 +112,8 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
BarrierSet* bs = heap->barrier_set();
bs->resize_covered_region(cmr);
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
PSCardTable* ct = heap->card_table();
ct->resize_covered_region(cmr);
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than one generation,
@ -386,7 +384,7 @@ void PSOldGen::post_resize() {
size_t new_word_size = new_memregion.word_size();
start_array()->set_covered_region(new_memregion);
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
// ALWAYS do this last!!
object_space()->initialize(new_memregion,

View File

@ -1017,12 +1017,12 @@ void PSParallelCompact::post_compact()
bool young_gen_empty = eden_empty && from_space->is_empty() &&
to_space->is_empty();
ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
PSCardTable* ct = heap->card_table();
MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) {
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
ct->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
@ -60,7 +59,7 @@
HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
int PSScavenge::_consecutive_skipped_scavenges = 0;
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
CardTableExtension* PSScavenge::_card_table = NULL;
PSCardTable* PSScavenge::_card_table = NULL;
bool PSScavenge::_survivor_overflow = false;
uint PSScavenge::_tenuring_threshold = 0;
HeapWord* PSScavenge::_young_generation_boundary = NULL;
@ -322,7 +321,7 @@ bool PSScavenge::invoke_no_policy() {
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
CardTableExtension::verify_all_young_refs_imprecise();
heap->card_table()->verify_all_young_refs_imprecise();
}
assert(young_gen->to_space()->is_empty(),
@ -617,8 +616,8 @@ bool PSScavenge::invoke_no_policy() {
if (VerifyRememberedSets) {
// Precise verification will give false positives. Until this is fixed,
// use imprecise verification.
// CardTableExtension::verify_all_young_refs_precise();
CardTableExtension::verify_all_young_refs_imprecise();
// heap->card_table()->verify_all_young_refs_precise();
heap->card_table()->verify_all_young_refs_imprecise();
}
if (log_is_enabled(Debug, gc, heap, exit)) {
@ -778,7 +777,7 @@ void PSScavenge::initialize() {
NULL); // header provides liveness info
// Cache the cardtable
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
_card_table = heap->card_table();
_counters = new CollectorCounters("PSScavenge", 0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
#define SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psVirtualspace.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcTrace.hpp"
@ -67,7 +67,7 @@ class PSScavenge: AllStatic {
// Flags/counters
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
static CardTableExtension* _card_table; // We cache the card table for fast access.
static PSCardTable* _card_table; // We cache the card table for fast access.
static bool _survivor_overflow; // Overflow this collection
static uint _tenuring_threshold; // tenuring threshold for next scavenge
static elapsedTimer _accumulated_time; // total time spent on scavenge
@ -89,7 +89,7 @@ class PSScavenge: AllStatic {
static inline void save_to_space_top_before_gc();
// Private accessors
static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
static PSCardTable* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
#define SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,9 +26,9 @@
#include "aot/aotLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/psMarkSweep.hpp"
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
@ -176,8 +176,7 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
{
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
CardTableExtension* card_table =
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
card_table->scavenge_contents_parallel(_old_gen->start_array(),
_old_gen->object_space(),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -148,7 +148,7 @@ class StealTask : public GCTask {
// will be covered. In this example if 4 tasks have been created to cover
// all the stripes and there are only 3 threads, one of the threads will
// get the tasks with the 4th stripe. However, there is a dependence in
// CardTableExtension::scavenge_contents_parallel() on the number
// PSCardTable::scavenge_contents_parallel() on the number
// of tasks created. In scavenge_contents_parallel the distance
// to the next stripe is calculated based on the number of tasks.
// If the stripe width is ssize, a task's next stripe is at

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ void PSYoungGen::initialize_work() {
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
@ -870,7 +870,7 @@ void PSYoungGen::post_resize() {
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
space_invariants();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -189,7 +189,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
(HeapWord*)_virtual_space.high());
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->barrier_set()->resize_covered_region(cmr);
gch->rem_set()->resize_covered_region(cmr);
_eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
@ -454,7 +454,7 @@ void DefNewGeneration::compute_new_size() {
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
gch->barrier_set()->resize_covered_region(cmr);
gch->rem_set()->resize_covered_region(cmr);
log_debug(gc, ergo, heap)(
"New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
@ -634,7 +634,7 @@ void DefNewGeneration::collect(bool full,
{
// DefNew needs to run with n_threads == 0, to make sure the serial
// version of the card table scanning code is used.
// See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
// See: CardTableRS::non_clean_card_iterate_possibly_parallel.
StrongRootsScope srs(0);
gch->young_process_roots(&srs,

View File

@ -123,15 +123,6 @@ protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
// Inform the BarrierSet that the the covered heap region that starts
// with "base" has been changed to have the given size (possibly from 0,
// for initialization.)
virtual void resize_covered_region(MemRegion new_region) = 0;
// If the barrier set imposes any alignment restrictions on boundaries
// within the heap, this function tells whether they are met.
virtual bool is_aligned(HeapWord* addr) = 0;
// Print a description of the memory for the barrier set
virtual void print_on(outputStream* st) const = 0;

View File

@ -29,25 +29,31 @@
#if INCLUDE_ALL_GCS
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
f(CardTableExtension) \
f(G1SATBCTLogging)
#else
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#endif
#if INCLUDE_ALL_GCS
#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
f(G1SATBCT)
#else
#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#endif
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
f(CardTableForRS) \
f(CardTableModRef) \
FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
f(ModRef) \
FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
// Do something for each known barrier set.
#define FOR_EACH_BARRIER_SET_DO(f) \
f(ModRef) \
f(CardTableModRef) \
f(CardTableForRS) \
f(CardTableExtension) \
f(G1SATBCT) \
f(G1SATBCTLogging)
FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
FOR_EACH_CONCRETE_BARRIER_SET_DO(f)
// To enable runtime-resolution of GC barriers on primitives, please
// define SUPPORT_BARRIER_ON_PRIMITIVES.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,10 +29,8 @@
#include "gc/shared/modRefBarrierSet.inline.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/cardTableModRefBSForCTRS.hpp"
#if INCLUDE_ALL_GCS
#include "gc/parallel/cardTableExtension.hpp" // Parallel support
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ bool CardGeneration::grow_by(size_t bytes) {
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
@ -166,7 +166,7 @@ void CardGeneration::shrink(size_t bytes) {
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + size;

View File

@ -0,0 +1,512 @@
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/space.inline.hpp"
#include "logging/log.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
size_t CardTable::compute_byte_map_size() {
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
"uninitialized, check declaration order");
assert(_page_size != 0, "uninitialized, check declaration order");
const size_t granularity = os::vm_allocation_granularity();
return align_up(_guard_index + 1, MAX2(_page_size, granularity));
}
CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
_scanned_concurrently(conc_scan),
_whole_heap(whole_heap),
_guard_index(0),
_guard_region(),
_last_valid_index(0),
_page_size(os::vm_page_size()),
_byte_map_size(0),
_covered(NULL),
_committed(NULL),
_cur_covered_regions(0),
_byte_map(NULL),
_byte_map_base(NULL)
{
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
assert(card_size <= 512, "card_size must be less than 512"); // why?
_covered = new MemRegion[_max_covered_regions];
if (_covered == NULL) {
vm_exit_during_initialization("Could not allocate card table covered region set.");
}
}
CardTable::~CardTable() {
if (_covered) {
delete[] _covered;
_covered = NULL;
}
if (_committed) {
delete[] _committed;
_committed = NULL;
}
}
void CardTable::initialize() {
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
_byte_map_size = compute_byte_map_size();
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 0;
_committed = new MemRegion[_max_covered_regions];
if (_committed == NULL) {
vm_exit_during_initialization("Could not allocate card table committed region set.");
}
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
_page_size, heap_rs.base(), heap_rs.size());
if (!heap_rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the "
"card marking array");
}
// The assembler store_check code will do an unsigned shift of the oop,
// then add it to _byte_map_base, i.e.
//
// _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
_byte_map = (jbyte*) heap_rs.base();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
jbyte* guard_card = &_byte_map[_guard_index];
HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
_guard_region = MemRegion(guard_page, _page_size);
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
!ExecMem, "card table last card");
*guard_card = last_card;
log_trace(gc, barrier)("CardTable::CardTable: ");
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
}
int CardTable::find_covering_region_by_base(HeapWord* base) {
int i;
for (i = 0; i < _cur_covered_regions; i++) {
if (_covered[i].start() == base) return i;
if (_covered[i].start() > base) break;
}
// If we didn't find it, create a new one.
assert(_cur_covered_regions < _max_covered_regions,
"too many covered regions");
// Move the ones above up, to maintain sorted order.
for (int j = _cur_covered_regions; j > i; j--) {
_covered[j] = _covered[j-1];
_committed[j] = _committed[j-1];
}
int res = i;
_cur_covered_regions++;
_covered[res].set_start(base);
_covered[res].set_word_size(0);
jbyte* ct_start = byte_for(base);
HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
_committed[res].set_start(ct_start_aligned);
_committed[res].set_word_size(0);
return res;
}
int CardTable::find_covering_region_containing(HeapWord* addr) {
for (int i = 0; i < _cur_covered_regions; i++) {
if (_covered[i].contains(addr)) {
return i;
}
}
assert(0, "address outside of heap?");
return -1;
}
HeapWord* CardTable::largest_prev_committed_end(int ind) const {
HeapWord* max_end = NULL;
for (int j = 0; j < ind; j++) {
HeapWord* this_end = _committed[j].end();
if (this_end > max_end) max_end = this_end;
}
return max_end;
}
MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
MemRegion result = mr;
for (int r = 0; r < _cur_covered_regions; r += 1) {
if (r != self) {
result = result.minus(_committed[r]);
}
}
// Never include the guard page.
result = result.minus(_guard_region);
return result;
}
void CardTable::resize_covered_region(MemRegion new_region) {
// We don't change the start of a region, only the end.
assert(_whole_heap.contains(new_region),
"attempt to cover area not in reserved area");
debug_only(verify_guard();)
// collided is true if the expansion would push into another committed region
debug_only(bool collided = false;)
int const ind = find_covering_region_by_base(new_region.start());
MemRegion const old_region = _covered[ind];
assert(old_region.start() == new_region.start(), "just checking");
if (new_region.word_size() != old_region.word_size()) {
// Commit new or uncommit old pages, if necessary.
MemRegion cur_committed = _committed[ind];
// Extend the end of this _committed region
// to cover the end of any lower _committed regions.
// This forms overlapping regions, but never interior regions.
HeapWord* const max_prev_end = largest_prev_committed_end(ind);
if (max_prev_end > cur_committed.end()) {
cur_committed.set_end(max_prev_end);
}
// Align the end up to a page size (starts are already aligned).
HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
HeapWord* new_end_aligned = align_up(new_end, _page_size);
assert(new_end_aligned >= new_end, "align up, but less");
// Check the other regions (excludes "ind") to ensure that
// the new_end_aligned does not intrude onto the committed
// space of another region.
int ri = 0;
for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
if (new_end_aligned > _committed[ri].start()) {
assert(new_end_aligned <= _committed[ri].end(),
"An earlier committed region can't cover a later committed region");
// Any region containing the new end
// should start at or beyond the region found (ind)
// for the new end (committed regions are not expected to
// be proper subsets of other committed regions).
assert(_committed[ri].start() >= _committed[ind].start(),
"New end of committed region is inconsistent");
new_end_aligned = _committed[ri].start();
// new_end_aligned can be equal to the start of its
// committed region (i.e., of "ind") if a second
// region following "ind" also start at the same location
// as "ind".
assert(new_end_aligned >= _committed[ind].start(),
"New end of committed region is before start");
debug_only(collided = true;)
// Should only collide with 1 region
break;
}
}
#ifdef ASSERT
for (++ri; ri < _cur_covered_regions; ri++) {
assert(!_committed[ri].contains(new_end_aligned),
"New end of committed region is in a second committed region");
}
#endif
// The guard page is always committed and should not be committed over.
// "guarded" is used for assertion checking below and recalls the fact
// that the would-be end of the new committed region would have
// penetrated the guard page.
HeapWord* new_end_for_commit = new_end_aligned;
DEBUG_ONLY(bool guarded = false;)
if (new_end_for_commit > _guard_region.start()) {
new_end_for_commit = _guard_region.start();
DEBUG_ONLY(guarded = true;)
}
if (new_end_for_commit > cur_committed.end()) {
// Must commit new pages.
MemRegion const new_committed =
MemRegion(cur_committed.end(), new_end_for_commit);
assert(!new_committed.is_empty(), "Region should not be empty here");
os::commit_memory_or_exit((char*)new_committed.start(),
new_committed.byte_size(), _page_size,
!ExecMem, "card table expansion");
// Use new_end_aligned (as opposed to new_end_for_commit) because
// the cur_committed region may include the guard region.
} else if (new_end_aligned < cur_committed.end()) {
// Must uncommit pages.
MemRegion const uncommit_region =
committed_unique_to_self(ind, MemRegion(new_end_aligned,
cur_committed.end()));
if (!uncommit_region.is_empty()) {
// It is not safe to uncommit cards if the boundary between
// the generations is moving. A shrink can uncommit cards
// owned by generation A but being used by generation B.
if (!UseAdaptiveGCBoundary) {
if (!os::uncommit_memory((char*)uncommit_region.start(),
uncommit_region.byte_size())) {
assert(false, "Card table contraction failed");
// The call failed so don't change the end of the
// committed region. This is better than taking the
// VM down.
new_end_aligned = _committed[ind].end();
}
} else {
new_end_aligned = _committed[ind].end();
}
}
}
// In any case, we can reset the end of the current committed entry.
_committed[ind].set_end(new_end_aligned);
#ifdef ASSERT
// Check that the last card in the new region is committed according
// to the tables.
bool covered = false;
for (int cr = 0; cr < _cur_covered_regions; cr++) {
if (_committed[cr].contains(new_end - 1)) {
covered = true;
break;
}
}
assert(covered, "Card for end of new region not committed");
#endif
// The default of 0 is not necessarily clean cards.
jbyte* entry;
if (old_region.last() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start());
} else {
entry = byte_after(old_region.last());
}
assert(index_for(new_region.last()) < _guard_index,
"The guard card will be overwritten");
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last());
jbyte* const end = (jbyte*) new_end_for_commit;
assert((end >= byte_after(new_region.last())) || collided || guarded,
"Expect to be beyond new region unless impacting another region");
// do nothing if we resized downward.
#ifdef ASSERT
for (int ri = 0; ri < _cur_covered_regions; ri++) {
if (ri != ind) {
// The end of the new committed region should not
// be in any existing region unless it matches
// the start of the next region.
assert(!_committed[ri].contains(end) ||
(_committed[ri].start() == (HeapWord*) end),
"Overlapping committed regions");
}
}
#endif
if (entry < end) {
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
}
}
// In any case, the covered size changes.
_covered[ind].set_word_size(new_region.word_size());
log_trace(gc, barrier)("CardTable::resize_covered_region: ");
log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
// Touch the last card of the covered region to show that it
// is committed (or SEGV).
debug_only((void) (*byte_for(_covered[ind].last()));)
debug_only(verify_guard();)
}
// Note that these versions are precise! The scanning code has to handle the
// fact that the write barrier may be either precise or imprecise.
void CardTable::dirty_MemRegion(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
while (cur < last) {
*cur = dirty_card;
cur++;
}
}
void CardTable::clear_MemRegion(MemRegion mr) {
// Be conservative: only clean cards entirely contained within the
// region.
jbyte* cur;
if (mr.start() == _whole_heap.start()) {
cur = byte_for(mr.start());
} else {
assert(mr.start() > _whole_heap.start(), "mr is not covered.");
cur = byte_after(mr.start() - 1);
}
jbyte* last = byte_after(mr.last());
memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
}
void CardTable::clear(MemRegion mr) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) clear_MemRegion(mri);
}
}
void CardTable::dirty(MemRegion mr) {
jbyte* first = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
memset(first, dirty_card, last-first);
}
// Unlike several other card table methods, dirty_card_iterate()
// iterates over dirty cards ranges in increasing address order.
void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte *cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
next_entry = cur_entry + 1;
if (*cur_entry == dirty_card) {
size_t dirty_cards;
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
next_entry <= limit && *next_entry == dirty_card;
dirty_cards++, next_entry++);
MemRegion cur_cards(addr_for(cur_entry),
dirty_cards*card_size_in_words);
cl->do_MemRegion(cur_cards);
}
}
}
}
}
MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
bool reset,
int reset_val) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte* cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
next_entry = cur_entry + 1;
if (*cur_entry == dirty_card) {
size_t dirty_cards;
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
next_entry <= limit && *next_entry == dirty_card;
dirty_cards++, next_entry++);
MemRegion cur_cards(addr_for(cur_entry),
dirty_cards*card_size_in_words);
if (reset) {
for (size_t i = 0; i < dirty_cards; i++) {
cur_entry[i] = reset_val;
}
}
return cur_cards;
}
}
}
}
return MemRegion(mr.end(), mr.end());
}
uintx CardTable::ct_max_alignment_constraint() {
return card_size * os::vm_page_size();
}
void CardTable::verify_guard() {
// For product build verification
guarantee(_byte_map[_guard_index] == last_card,
"card table guard has been modified");
}
void CardTable::invalidate(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) dirty_MemRegion(mri);
}
}
void CardTable::verify() {
verify_guard();
}
#ifndef PRODUCT
void CardTable::verify_region(MemRegion mr,
jbyte val, bool val_equals) {
jbyte* start = byte_for(mr.start());
jbyte* end = byte_for(mr.last());
bool failures = false;
for (jbyte* curr = start; curr <= end; ++curr) {
jbyte curr_val = *curr;
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) {
if (!failures) {
log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
failures = true;
}
log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
p2i(curr), p2i(addr_for(curr)),
p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
(int) curr_val);
}
}
guarantee(!failures, "there should not have been any failures");
}
void CardTable::verify_not_dirty_region(MemRegion mr) {
verify_region(mr, dirty_card, false /* val_equals */);
}
void CardTable::verify_dirty_region(MemRegion mr) {
verify_region(mr, dirty_card, true /* val_equals */);
}
#endif
void CardTable::print_on(outputStream* st) const {
st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
}

View File

@ -0,0 +1,266 @@
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_CARDTABLE_HPP
#define SHARE_VM_GC_SHARED_CARDTABLE_HPP
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/align.hpp"
class CardTable: public CHeapObj<mtGC> {
friend class VMStructs;
protected:
// The declaration order of these const fields is important; see the
// constructor before changing.
const bool _scanned_concurrently;
const MemRegion _whole_heap; // the region covered by the card table
size_t _guard_index; // index of very last element in the card
// table; it is set to a guard value
// (last_card) and should never be modified
size_t _last_valid_index; // index of the last valid element
const size_t _page_size; // page size used when mapping _byte_map
size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array
jbyte* _byte_map_base;
int _cur_covered_regions;
// The covered regions should be in address order.
MemRegion* _covered;
// The committed regions correspond one-to-one to the covered regions.
// They represent the card-table memory that has been committed to service
// the corresponding covered region. It may be that committed region for
// one covered region corresponds to a larger region because of page-size
// roundings. Thus, a committed region for one covered region may
// actually extend onto the card-table space for the next covered region.
MemRegion* _committed;
// The last card is a guard card, and we commit the page for it so
// we can use the card for verification purposes. We make sure we never
// uncommit the MemRegion for that page.
MemRegion _guard_region;
inline size_t compute_byte_map_size();
// Finds and return the index of the region, if any, to which the given
// region would be contiguous. If none exists, assign a new region and
// returns its index. Requires that no more than the maximum number of
// covered regions defined in the constructor are ever in use.
int find_covering_region_by_base(HeapWord* base);
// Same as above, but finds the region containing the given address
// instead of starting at a given base address.
int find_covering_region_containing(HeapWord* addr);
// Returns the leftmost end of a committed region corresponding to a
// covered region before covered region "ind", or else "NULL" if "ind" is
// the first covered region.
HeapWord* largest_prev_committed_end(int ind) const;
// Returns the part of the region mr that doesn't intersect with
// any committed region other than self. Used to prevent uncommitting
// regions that are also committed by other regions. Also protects
// against uncommitting the guard region.
MemRegion committed_unique_to_self(int self, MemRegion mr) const;
// Some barrier sets create tables whose elements correspond to parts of
// the heap; the CardTableModRefBS is an example. Such barrier sets will
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. At most one covered
// region per generation is needed.
static const int _max_covered_regions = 2;
enum CardValues {
clean_card = -1,
// The mask contains zeros in places for all other values.
clean_card_mask = clean_card - 31,
dirty_card = 0,
precleaned_card = 1,
claimed_card = 2,
deferred_card = 4,
last_card = 8,
CT_MR_BS_last_reserved = 16
};
// a word's worth (row) of clean card values
static const intptr_t clean_card_row = (intptr_t)(-1);
public:
CardTable(MemRegion whole_heap, bool conc_scan);
virtual ~CardTable();
virtual void initialize();
// The kinds of precision a CardTableModRefBS may offer.
enum PrecisionStyle {
Precise,
ObjHeadPreciseArray
};
// Tells what style of precision this card table offers.
PrecisionStyle precision() {
return ObjHeadPreciseArray; // Only one supported for now.
}
// *** Barrier set functions.
// Initialization utilities; covered_words is the size of the covered region
// in, um, words.
inline size_t cards_required(size_t covered_words) {
// Add one for a guard card, used to detect errors.
const size_t words = align_up(covered_words, card_size_in_words);
return words / card_size_in_words + 1;
}
// Dirty the bytes corresponding to "mr" (not all of which must be
// covered.)
void dirty_MemRegion(MemRegion mr);
// Clear (to clean_card) the bytes entirely contained within "mr" (not
// all of which must be covered.)
void clear_MemRegion(MemRegion mr);
// Return true if "p" is at the start of a card.
bool is_card_aligned(HeapWord* p) {
jbyte* pcard = byte_for(p);
return (addr_for(pcard) == p);
}
// Mapping from address to card marking array entry
jbyte* byte_for(const void* p) const {
assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift];
assert(result >= _byte_map && result < _byte_map + _byte_map_size,
"out of bounds accessor for card marking array");
return result;
}
// The card table byte one after the card marking array
// entry for argument address. Typically used for higher bounds
// for loops iterating through the card table.
jbyte* byte_after(const void* p) const {
return byte_for(p) + 1;
}
virtual void invalidate(MemRegion mr);
void clear(MemRegion mr);
void dirty(MemRegion mr);
// Provide read-only access to the card table array.
const jbyte* byte_for_const(const void* p) const {
return byte_for(p);
}
const jbyte* byte_after_const(const void* p) const {
return byte_after(p);
}
// Mapping from card marking array entry to address of first word
HeapWord* addr_for(const jbyte* p) const {
assert(p >= _byte_map && p < _byte_map + _byte_map_size,
"out of bounds access to card marking array. p: " PTR_FORMAT
" _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte));
HeapWord* result = (HeapWord*) (delta << card_shift);
assert(_whole_heap.contains(result),
"Returning result = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
return result;
}
// Mapping from address to card marking array index.
size_t index_for(void* p) {
assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
return byte_for(p) - _byte_map;
}
const jbyte* byte_for_index(const size_t card_index) const {
return _byte_map + card_index;
}
// Resize one of the regions covered by the remembered set.
virtual void resize_covered_region(MemRegion new_region);
// *** Card-table-RemSet-specific things.
static uintx ct_max_alignment_constraint();
// Apply closure "cl" to the dirty cards containing some part of
// MemRegion "mr".
void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
// Return the MemRegion corresponding to the first maximal run
// of dirty cards lying completely within MemRegion mr.
// If reset is "true", then sets those card table entries to the given
// value.
MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
int reset_val);
// Constants
enum SomePublicConstants {
card_shift = 9,
card_size = 1 << card_shift,
card_size_in_words = card_size / sizeof(HeapWord)
};
static jbyte clean_card_val() { return clean_card; }
static jbyte clean_card_mask_val() { return clean_card_mask; }
static jbyte dirty_card_val() { return dirty_card; }
static jbyte claimed_card_val() { return claimed_card; }
static jbyte precleaned_card_val() { return precleaned_card; }
static jbyte deferred_card_val() { return deferred_card; }
static intptr_t clean_card_row_val() { return clean_card_row; }
// Card marking array base (adjusted for heap low boundary)
// This would be the 0th element of _byte_map, if the heap started at 0x0.
// But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map.
jbyte* byte_map_base() const { return _byte_map_base; }
bool scanned_concurrently() const { return _scanned_concurrently; }
virtual bool is_in_young(oop obj) const = 0;
// Print a description of the memory for the card table
virtual void print_on(outputStream* st) const;
void verify();
void verify_guard();
// val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_SHARED_CARDTABLE_HPP

View File

@ -39,490 +39,38 @@
// enumerate ref fields that have been modified (since the last
// enumeration.)
size_t CardTableModRefBS::compute_byte_map_size()
{
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
"uninitialized, check declaration order");
assert(_page_size != 0, "uninitialized, check declaration order");
const size_t granularity = os::vm_allocation_granularity();
return align_up(_guard_index + 1, MAX2(_page_size, granularity));
}
CardTableModRefBS::CardTableModRefBS(
MemRegion whole_heap,
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
_whole_heap(whole_heap),
_guard_index(0),
_guard_region(),
_last_valid_index(0),
_page_size(os::vm_page_size()),
_byte_map_size(0),
_covered(NULL),
_committed(NULL),
_cur_covered_regions(0),
_byte_map(NULL),
byte_map_base(NULL),
_defer_initial_card_mark(false)
{
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
_defer_initial_card_mark(false),
_card_table(card_table)
{}
assert(card_size <= 512, "card_size must be less than 512"); // why?
_covered = new MemRegion[_max_covered_regions];
if (_covered == NULL) {
vm_exit_during_initialization("Could not allocate card table covered region set.");
}
}
CardTableModRefBS::CardTableModRefBS(CardTable* card_table) :
ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)),
_defer_initial_card_mark(false),
_card_table(card_table)
{}
void CardTableModRefBS::initialize() {
initialize_deferred_card_mark_barriers();
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
_byte_map_size = compute_byte_map_size();
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 0;
_committed = new MemRegion[_max_covered_regions];
if (_committed == NULL) {
vm_exit_during_initialization("Could not allocate card table committed region set.");
}
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
_page_size, heap_rs.base(), heap_rs.size());
if (!heap_rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the "
"card marking array");
}
// The assembler store_check code will do an unsigned shift of the oop,
// then add it to byte_map_base, i.e.
//
// _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
_byte_map = (jbyte*) heap_rs.base();
byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
jbyte* guard_card = &_byte_map[_guard_index];
uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
_guard_region = MemRegion((HeapWord*)guard_page, _page_size);
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
!ExecMem, "card table last card");
*guard_card = last_card;
log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
}
CardTableModRefBS::~CardTableModRefBS() {
if (_covered) {
delete[] _covered;
_covered = NULL;
}
if (_committed) {
delete[] _committed;
_committed = NULL;
}
delete _card_table;
}
int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
int i;
for (i = 0; i < _cur_covered_regions; i++) {
if (_covered[i].start() == base) return i;
if (_covered[i].start() > base) break;
}
// If we didn't find it, create a new one.
assert(_cur_covered_regions < _max_covered_regions,
"too many covered regions");
// Move the ones above up, to maintain sorted order.
for (int j = _cur_covered_regions; j > i; j--) {
_covered[j] = _covered[j-1];
_committed[j] = _committed[j-1];
}
int res = i;
_cur_covered_regions++;
_covered[res].set_start(base);
_covered[res].set_word_size(0);
jbyte* ct_start = byte_for(base);
uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
_committed[res].set_start((HeapWord*)ct_start_aligned);
_committed[res].set_word_size(0);
return res;
}
int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
for (int i = 0; i < _cur_covered_regions; i++) {
if (_covered[i].contains(addr)) {
return i;
}
}
assert(0, "address outside of heap?");
return -1;
}
HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
HeapWord* max_end = NULL;
for (int j = 0; j < ind; j++) {
HeapWord* this_end = _committed[j].end();
if (this_end > max_end) max_end = this_end;
}
return max_end;
}
MemRegion CardTableModRefBS::committed_unique_to_self(int self,
MemRegion mr) const {
MemRegion result = mr;
for (int r = 0; r < _cur_covered_regions; r += 1) {
if (r != self) {
result = result.minus(_committed[r]);
}
}
// Never include the guard page.
result = result.minus(_guard_region);
return result;
}
void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
// We don't change the start of a region, only the end.
assert(_whole_heap.contains(new_region),
"attempt to cover area not in reserved area");
debug_only(verify_guard();)
// collided is true if the expansion would push into another committed region
debug_only(bool collided = false;)
int const ind = find_covering_region_by_base(new_region.start());
MemRegion const old_region = _covered[ind];
assert(old_region.start() == new_region.start(), "just checking");
if (new_region.word_size() != old_region.word_size()) {
// Commit new or uncommit old pages, if necessary.
MemRegion cur_committed = _committed[ind];
// Extend the end of this _committed region
// to cover the end of any lower _committed regions.
// This forms overlapping regions, but never interior regions.
HeapWord* const max_prev_end = largest_prev_committed_end(ind);
if (max_prev_end > cur_committed.end()) {
cur_committed.set_end(max_prev_end);
}
// Align the end up to a page size (starts are already aligned).
jbyte* const new_end = byte_after(new_region.last());
HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
// Check the other regions (excludes "ind") to ensure that
// the new_end_aligned does not intrude onto the committed
// space of another region.
int ri = 0;
for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
if (new_end_aligned > _committed[ri].start()) {
assert(new_end_aligned <= _committed[ri].end(),
"An earlier committed region can't cover a later committed region");
// Any region containing the new end
// should start at or beyond the region found (ind)
// for the new end (committed regions are not expected to
// be proper subsets of other committed regions).
assert(_committed[ri].start() >= _committed[ind].start(),
"New end of committed region is inconsistent");
new_end_aligned = _committed[ri].start();
// new_end_aligned can be equal to the start of its
// committed region (i.e., of "ind") if a second
// region following "ind" also start at the same location
// as "ind".
assert(new_end_aligned >= _committed[ind].start(),
"New end of committed region is before start");
debug_only(collided = true;)
// Should only collide with 1 region
break;
}
}
#ifdef ASSERT
for (++ri; ri < _cur_covered_regions; ri++) {
assert(!_committed[ri].contains(new_end_aligned),
"New end of committed region is in a second committed region");
}
#endif
// The guard page is always committed and should not be committed over.
// "guarded" is used for assertion checking below and recalls the fact
// that the would-be end of the new committed region would have
// penetrated the guard page.
HeapWord* new_end_for_commit = new_end_aligned;
DEBUG_ONLY(bool guarded = false;)
if (new_end_for_commit > _guard_region.start()) {
new_end_for_commit = _guard_region.start();
DEBUG_ONLY(guarded = true;)
}
if (new_end_for_commit > cur_committed.end()) {
// Must commit new pages.
MemRegion const new_committed =
MemRegion(cur_committed.end(), new_end_for_commit);
assert(!new_committed.is_empty(), "Region should not be empty here");
os::commit_memory_or_exit((char*)new_committed.start(),
new_committed.byte_size(), _page_size,
!ExecMem, "card table expansion");
// Use new_end_aligned (as opposed to new_end_for_commit) because
// the cur_committed region may include the guard region.
} else if (new_end_aligned < cur_committed.end()) {
// Must uncommit pages.
MemRegion const uncommit_region =
committed_unique_to_self(ind, MemRegion(new_end_aligned,
cur_committed.end()));
if (!uncommit_region.is_empty()) {
// It is not safe to uncommit cards if the boundary between
// the generations is moving. A shrink can uncommit cards
// owned by generation A but being used by generation B.
if (!UseAdaptiveGCBoundary) {
if (!os::uncommit_memory((char*)uncommit_region.start(),
uncommit_region.byte_size())) {
assert(false, "Card table contraction failed");
// The call failed so don't change the end of the
// committed region. This is better than taking the
// VM down.
new_end_aligned = _committed[ind].end();
}
} else {
new_end_aligned = _committed[ind].end();
}
}
}
// In any case, we can reset the end of the current committed entry.
_committed[ind].set_end(new_end_aligned);
#ifdef ASSERT
// Check that the last card in the new region is committed according
// to the tables.
bool covered = false;
for (int cr = 0; cr < _cur_covered_regions; cr++) {
if (_committed[cr].contains(new_end - 1)) {
covered = true;
break;
}
}
assert(covered, "Card for end of new region not committed");
#endif
// The default of 0 is not necessarily clean cards.
jbyte* entry;
if (old_region.last() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start());
} else {
entry = byte_after(old_region.last());
}
assert(index_for(new_region.last()) < _guard_index,
"The guard card will be overwritten");
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last());
jbyte* const end = (jbyte*) new_end_for_commit;
assert((end >= byte_after(new_region.last())) || collided || guarded,
"Expect to be beyond new region unless impacting another region");
// do nothing if we resized downward.
#ifdef ASSERT
for (int ri = 0; ri < _cur_covered_regions; ri++) {
if (ri != ind) {
// The end of the new committed region should not
// be in any existing region unless it matches
// the start of the next region.
assert(!_committed[ri].contains(end) ||
(_committed[ri].start() == (HeapWord*) end),
"Overlapping committed regions");
}
}
#endif
if (entry < end) {
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
}
}
// In any case, the covered size changes.
_covered[ind].set_word_size(new_region.word_size());
log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
// Touch the last card of the covered region to show that it
// is committed (or SEGV).
debug_only((void) (*byte_for(_covered[ind].last()));)
debug_only(verify_guard();)
}
// Note that these versions are precise! The scanning code has to handle the
// fact that the write barrier may be either precise or imprecise.
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
while (cur < last) {
*cur = dirty_card;
cur++;
}
void CardTableModRefBS::write_ref_array_work(MemRegion mr) {
_card_table->dirty_MemRegion(mr);
}
void CardTableModRefBS::invalidate(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) dirty_MemRegion(mri);
}
_card_table->invalidate(mr);
}
void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
// Be conservative: only clean cards entirely contained within the
// region.
jbyte* cur;
if (mr.start() == _whole_heap.start()) {
cur = byte_for(mr.start());
} else {
assert(mr.start() > _whole_heap.start(), "mr is not covered.");
cur = byte_after(mr.start() - 1);
}
jbyte* last = byte_after(mr.last());
memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
}
void CardTableModRefBS::clear(MemRegion mr) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) clear_MemRegion(mri);
}
}
void CardTableModRefBS::dirty(MemRegion mr) {
jbyte* first = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
memset(first, dirty_card, last-first);
}
// Unlike several other card table methods, dirty_card_iterate()
// iterates over dirty cards ranges in increasing address order.
void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte *cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
next_entry = cur_entry + 1;
if (*cur_entry == dirty_card) {
size_t dirty_cards;
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
next_entry <= limit && *next_entry == dirty_card;
dirty_cards++, next_entry++);
MemRegion cur_cards(addr_for(cur_entry),
dirty_cards*card_size_in_words);
cl->do_MemRegion(cur_cards);
}
}
}
}
}
MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
bool reset,
int reset_val) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte* cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
next_entry = cur_entry + 1;
if (*cur_entry == dirty_card) {
size_t dirty_cards;
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
next_entry <= limit && *next_entry == dirty_card;
dirty_cards++, next_entry++);
MemRegion cur_cards(addr_for(cur_entry),
dirty_cards*card_size_in_words);
if (reset) {
for (size_t i = 0; i < dirty_cards; i++) {
cur_entry[i] = reset_val;
}
}
return cur_cards;
}
}
}
}
return MemRegion(mr.end(), mr.end());
}
uintx CardTableModRefBS::ct_max_alignment_constraint() {
return card_size * os::vm_page_size();
}
void CardTableModRefBS::verify_guard() {
// For product build verification
guarantee(_byte_map[_guard_index] == last_card,
"card table guard has been modified");
}
void CardTableModRefBS::verify() {
verify_guard();
}
#ifndef PRODUCT
void CardTableModRefBS::verify_region(MemRegion mr,
jbyte val, bool val_equals) {
jbyte* start = byte_for(mr.start());
jbyte* end = byte_for(mr.last());
bool failures = false;
for (jbyte* curr = start; curr <= end; ++curr) {
jbyte curr_val = *curr;
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) {
if (!failures) {
log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
failures = true;
}
log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
p2i(curr), p2i(addr_for(curr)),
p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
(int) curr_val);
}
}
guarantee(!failures, "there should not have been any failures");
}
void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
verify_region(mr, dirty_card, false /* val_equals */);
}
void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
verify_region(mr, dirty_card, true /* val_equals */);
}
#endif
void CardTableModRefBS::print_on(outputStream* st) const {
st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
_card_table->print_on(st);
}
// Helper for ReduceInitialCardMarks. For performance,
@ -573,7 +121,7 @@ void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_
}
// If a previous card-mark was deferred, flush it now.
flush_deferred_card_mark_barrier(thread);
if (new_obj->is_typeArray() || is_in_young(new_obj)) {
if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
// Arrays of non-references don't need a post-barrier.
// The deferred_card_mark region should be empty
// following the flush above.
@ -586,7 +134,7 @@ void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_
thread->set_deferred_card_mark(mr);
} else {
// Do the card mark
write_region(mr);
invalidate(mr);
}
}
}
@ -610,7 +158,7 @@ void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
{
// Verify that the storage points to a parsable object in heap
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
assert(!is_in_young(old_obj),
assert(!_card_table->is_in_young(old_obj),
"Else should have been filtered in on_slowpath_allocation_exit()");
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
assert(deferred.word_size() == (size_t)(old_obj->size()),
@ -633,3 +181,7 @@ void CardTableModRefBS::on_thread_detach(JavaThread* thread) {
// processing the card-table (or other remembered set).
flush_deferred_card_mark_barrier(thread);
}
bool CardTableModRefBS::card_mark_must_follow_store() const {
return _card_table->scanned_concurrently();
}

View File

@ -28,6 +28,8 @@
#include "gc/shared/modRefBarrierSet.hpp"
#include "utilities/align.hpp"
class CardTable;
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
// enumeration.)
@ -45,162 +47,29 @@ class CardTableModRefBS: public ModRefBarrierSet {
friend class VMStructs;
protected:
enum CardValues {
clean_card = -1,
// The mask contains zeros in places for all other values.
clean_card_mask = clean_card - 31,
dirty_card = 0,
precleaned_card = 1,
claimed_card = 2,
deferred_card = 4,
last_card = 8,
CT_MR_BS_last_reserved = 16
};
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used
bool _defer_initial_card_mark;
bool _defer_initial_card_mark;
CardTable* _card_table;
// a word's worth (row) of clean card values
static const intptr_t clean_card_row = (intptr_t)(-1);
// The declaration order of these const fields is important; see the
// constructor before changing.
const MemRegion _whole_heap; // the region covered by the card table
size_t _guard_index; // index of very last element in the card
// table; it is set to a guard value
// (last_card) and should never be modified
size_t _last_valid_index; // index of the last valid element
const size_t _page_size; // page size used when mapping _byte_map
size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array
// Some barrier sets create tables whose elements correspond to parts of
// the heap; the CardTableModRefBS is an example. Such barrier sets will
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. At most one covered
// region per generation is needed.
static const int _max_covered_regions = 2;
int _cur_covered_regions;
// The covered regions should be in address order.
MemRegion* _covered;
// The committed regions correspond one-to-one to the covered regions.
// They represent the card-table memory that has been committed to service
// the corresponding covered region. It may be that committed region for
// one covered region corresponds to a larger region because of page-size
// roundings. Thus, a committed region for one covered region may
// actually extend onto the card-table space for the next covered region.
MemRegion* _committed;
// The last card is a guard card, and we commit the page for it so
// we can use the card for verification purposes. We make sure we never
// uncommit the MemRegion for that page.
MemRegion _guard_region;
inline size_t compute_byte_map_size();
// Finds and return the index of the region, if any, to which the given
// region would be contiguous. If none exists, assign a new region and
// returns its index. Requires that no more than the maximum number of
// covered regions defined in the constructor are ever in use.
int find_covering_region_by_base(HeapWord* base);
// Same as above, but finds the region containing the given address
// instead of starting at a given base address.
int find_covering_region_containing(HeapWord* addr);
// Resize one of the regions covered by the remembered set.
virtual void resize_covered_region(MemRegion new_region);
// Returns the leftmost end of a committed region corresponding to a
// covered region before covered region "ind", or else "NULL" if "ind" is
// the first covered region.
HeapWord* largest_prev_committed_end(int ind) const;
// Returns the part of the region mr that doesn't intersect with
// any committed region other than self. Used to prevent uncommitting
// regions that are also committed by other regions. Also protects
// against uncommitting the guard region.
MemRegion committed_unique_to_self(int self, MemRegion mr) const;
// Mapping from address to card marking array entry
jbyte* byte_for(const void* p) const {
assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
assert(result >= _byte_map && result < _byte_map + _byte_map_size,
"out of bounds accessor for card marking array");
return result;
}
// The card table byte one after the card marking array
// entry for argument address. Typically used for higher bounds
// for loops iterating through the card table.
jbyte* byte_after(const void* p) const {
return byte_for(p) + 1;
}
// Dirty the bytes corresponding to "mr" (not all of which must be
// covered.)
void dirty_MemRegion(MemRegion mr);
// Clear (to clean_card) the bytes entirely contained within "mr" (not
// all of which must be covered.)
void clear_MemRegion(MemRegion mr);
CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
public:
// Constants
enum SomePublicConstants {
card_shift = 9,
card_size = 1 << card_shift,
card_size_in_words = card_size / sizeof(HeapWord)
};
CardTableModRefBS(CardTable* card_table);
~CardTableModRefBS();
static int clean_card_val() { return clean_card; }
static int clean_card_mask_val() { return clean_card_mask; }
static int dirty_card_val() { return dirty_card; }
static int claimed_card_val() { return claimed_card; }
static int precleaned_card_val() { return precleaned_card; }
static int deferred_card_val() { return deferred_card; }
CardTable* card_table() const { return _card_table; }
virtual void initialize();
// *** Barrier set functions.
// Initialization utilities; covered_words is the size of the covered region
// in, um, words.
inline size_t cards_required(size_t covered_words) {
// Add one for a guard card, used to detect errors.
const size_t words = align_up(covered_words, card_size_in_words);
return words / card_size_in_words + 1;
}
protected:
CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
~CardTableModRefBS();
public:
void write_region(MemRegion mr) {
dirty_MemRegion(mr);
invalidate(mr);
}
protected:
void write_ref_array_work(MemRegion mr) {
dirty_MemRegion(mr);
}
void write_ref_array_work(MemRegion mr);
public:
bool is_aligned(HeapWord* addr) {
return is_card_aligned(addr);
}
// *** Card-table-barrier-specific things.
// Record a reference update. Note that these versions are precise!
// The scanning code has to handle the fact that the write barrier may be
// either precise or imprecise. We make non-virtual inline variants of
@ -208,115 +77,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
template <DecoratorSet decorators, typename T>
void write_ref_field_post(T* field, oop newVal);
// These are used by G1, when it uses the card table as a temporary data
// structure for card claiming.
bool is_card_dirty(size_t card_index) {
return _byte_map[card_index] == dirty_card_val();
}
void mark_card_dirty(size_t card_index) {
_byte_map[card_index] = dirty_card_val();
}
bool is_card_clean(size_t card_index) {
return _byte_map[card_index] == clean_card_val();
}
// Card marking array base (adjusted for heap low boundary)
// This would be the 0th element of _byte_map, if the heap started at 0x0.
// But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map.
jbyte* byte_map_base;
// Return true if "p" is at the start of a card.
bool is_card_aligned(HeapWord* p) {
jbyte* pcard = byte_for(p);
return (addr_for(pcard) == p);
}
HeapWord* align_to_card_boundary(HeapWord* p) {
jbyte* pcard = byte_for(p + card_size_in_words - 1);
return addr_for(pcard);
}
// The kinds of precision a CardTableModRefBS may offer.
enum PrecisionStyle {
Precise,
ObjHeadPreciseArray
};
// Tells what style of precision this card table offers.
PrecisionStyle precision() {
return ObjHeadPreciseArray; // Only one supported for now.
}
// ModRefBS functions.
virtual void invalidate(MemRegion mr);
void clear(MemRegion mr);
void dirty(MemRegion mr);
// *** Card-table-RemSet-specific things.
static uintx ct_max_alignment_constraint();
// Apply closure "cl" to the dirty cards containing some part of
// MemRegion "mr".
void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
// Return the MemRegion corresponding to the first maximal run
// of dirty cards lying completely within MemRegion mr.
// If reset is "true", then sets those card table entries to the given
// value.
MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
int reset_val);
// Provide read-only access to the card table array.
const jbyte* byte_for_const(const void* p) const {
return byte_for(p);
}
const jbyte* byte_after_const(const void* p) const {
return byte_after(p);
}
// Mapping from card marking array entry to address of first word
HeapWord* addr_for(const jbyte* p) const {
assert(p >= _byte_map && p < _byte_map + _byte_map_size,
"out of bounds access to card marking array. p: " PTR_FORMAT
" _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
HeapWord* result = (HeapWord*) (delta << card_shift);
assert(_whole_heap.contains(result),
"Returning result = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
return result;
}
// Mapping from address to card marking array index.
size_t index_for(void* p) {
assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
return byte_for(p) - _byte_map;
}
const jbyte* byte_for_index(const size_t card_index) const {
return _byte_map + card_index;
}
// Print a description of the memory for the barrier set
virtual void print_on(outputStream* st) const;
void verify();
void verify_guard();
// val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
// ReduceInitialCardMarks
void initialize_deferred_card_mark_barriers();
@ -352,15 +113,15 @@ class CardTableModRefBS: public ModRefBarrierSet {
// barrier until the next slow-path allocation or gc-related safepoint.)
// This interface answers whether a particular barrier type needs the card
// mark to be thus strictly sequenced after the stores.
virtual bool card_mark_must_follow_store() const = 0;
virtual bool is_in_young(oop obj) const = 0;
virtual bool card_mark_must_follow_store() const;
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
virtual void on_thread_detach(JavaThread* thread);
virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
virtual void print_on(outputStream* st) const;
template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,17 +26,18 @@
#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTable.hpp"
#include "runtime/orderAccess.inline.hpp"
template <DecoratorSet decorators, typename T>
inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) {
volatile jbyte* byte = byte_for(field);
volatile jbyte* byte = _card_table->byte_for(field);
if (UseConcMarkSweepGC) {
// Perform a releasing store if using CMS so that it may
// scan and clear the cards concurrently during pre-cleaning.
OrderAccess::release_store(byte, jbyte(dirty_card));
OrderAccess::release_store(byte, CardTable::dirty_card_val());
} else {
*byte = dirty_card;
*byte = CardTable::dirty_card_val();
}
}

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "memory/allocation.inline.hpp"
#include "gc/shared/space.inline.hpp"
CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
CardTableModRefBS(
whole_heap,
BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
// LNC functionality
_lowest_non_clean(NULL),
_lowest_non_clean_chunk_size(NULL),
_lowest_non_clean_base_chunk_index(NULL),
_last_LNC_resizing_collection(NULL)
{ }
void CardTableModRefBSForCTRS::initialize() {
CardTableModRefBS::initialize();
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
_lowest_non_clean_chunk_size =
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
_lowest_non_clean_base_chunk_index =
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
_last_LNC_resizing_collection =
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
if (_lowest_non_clean == NULL
|| _lowest_non_clean_chunk_size == NULL
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (int i = 0; i < _max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
}
}
CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() {
if (_lowest_non_clean) {
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
_lowest_non_clean = NULL;
}
if (_lowest_non_clean_chunk_size) {
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
_lowest_non_clean_chunk_size = NULL;
}
if (_lowest_non_clean_base_chunk_index) {
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
_lowest_non_clean_base_chunk_index = NULL;
}
if (_last_LNC_resizing_collection) {
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
_last_LNC_resizing_collection = NULL;
}
}
bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
return
card_is_dirty_wrt_gen_iter(cv) ||
_rs->is_prev_nonclean_card_val(cv);
}
bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
return
cv != clean_card &&
(card_is_dirty_wrt_gen_iter(cv) ||
CardTableRS::youngergen_may_have_been_dirty(cv));
}
void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
Space* sp,
MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads)
{
if (!mr.is_empty()) {
if (n_threads > 0) {
#if INCLUDE_ALL_GCS
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.
// This is the single-threaded version used by DefNew.
const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr);
}
}
}
bool CardTableModRefBSForCTRS::is_in_young(oop obj) const {
return GenCollectedHeap::heap()->is_in_young(obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,41 +75,6 @@ void CLDRemSet::clear_mod_union() {
}
CardTableRS::CardTableRS(MemRegion whole_heap) :
_bs(NULL),
_cur_youngergen_card_val(youngergenP1_card)
{
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
_ct_bs->initialize();
set_bs(_ct_bs);
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
uint max_gens = 2;
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}
for (uint i = 0; i < max_gens + 1; i++) {
_last_cur_val_in_gen[i] = clean_card_val();
}
_ct_bs->set_CTRS(this);
}
CardTableRS::~CardTableRS() {
if (_ct_bs) {
delete _ct_bs;
_ct_bs = NULL;
}
if (_last_cur_val_in_gen) {
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
}
}
void CardTableRS::resize_covered_region(MemRegion new_region) {
_ct_bs->resize_covered_region(new_region);
}
jbyte CardTableRS::find_unused_youngergenP_card_value() {
for (jbyte v = youngergenP1_card;
v < cur_youngergen_and_prev_nonclean_card;
@ -247,7 +212,7 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
if (is_word_aligned(cur_entry)) {
jbyte* cur_row = cur_entry - BytesPerWord;
while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) {
while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) {
cur_row -= BytesPerWord;
}
cur_entry = cur_row + BytesPerWord;
@ -283,7 +248,7 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
volatile jbyte* entry = _ct_bs->byte_for(field);
volatile jbyte* entry = byte_for(field);
do {
jbyte entry_val = *entry;
// We put this first because it's probably the most common case.
@ -341,7 +306,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
ShouldNotReachHere();
}
#endif
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
}
void CardTableRS::clear_into_younger(Generation* old_gen) {
@ -642,5 +607,115 @@ void CardTableRS::verify() {
// generational heaps.
VerifyCTGenClosure blk(this);
GenCollectedHeap::heap()->generation_iterate(&blk, false);
_ct_bs->verify();
CardTable::verify();
}
CardTableRS::CardTableRS(MemRegion whole_heap) :
CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled),
_cur_youngergen_card_val(youngergenP1_card),
// LNC functionality
_lowest_non_clean(NULL),
_lowest_non_clean_chunk_size(NULL),
_lowest_non_clean_base_chunk_index(NULL),
_last_LNC_resizing_collection(NULL)
{
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
uint max_gens = 2;
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}
for (uint i = 0; i < max_gens + 1; i++) {
_last_cur_val_in_gen[i] = clean_card_val();
}
}
CardTableRS::~CardTableRS() {
if (_last_cur_val_in_gen) {
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
_last_cur_val_in_gen = NULL;
}
if (_lowest_non_clean) {
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
_lowest_non_clean = NULL;
}
if (_lowest_non_clean_chunk_size) {
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
_lowest_non_clean_chunk_size = NULL;
}
if (_lowest_non_clean_base_chunk_index) {
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
_lowest_non_clean_base_chunk_index = NULL;
}
if (_last_LNC_resizing_collection) {
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
_last_LNC_resizing_collection = NULL;
}
}
void CardTableRS::initialize() {
CardTable::initialize();
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
_lowest_non_clean_chunk_size =
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
_lowest_non_clean_base_chunk_index =
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
_last_LNC_resizing_collection =
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
if (_lowest_non_clean == NULL
|| _lowest_non_clean_chunk_size == NULL
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (int i = 0; i < _max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
}
}
bool CardTableRS::card_will_be_scanned(jbyte cv) {
return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
}
bool CardTableRS::card_may_have_been_dirty(jbyte cv) {
return
cv != clean_card &&
(card_is_dirty_wrt_gen_iter(cv) ||
CardTableRS::youngergen_may_have_been_dirty(cv));
}
void CardTableRS::non_clean_card_iterate_possibly_parallel(
Space* sp,
MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads)
{
if (!mr.is_empty()) {
if (n_threads > 0) {
#if INCLUDE_ALL_GCS
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.
// This is the single-threaded version used by DefNew.
const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr);
}
}
}
bool CardTableRS::is_in_young(oop obj) const {
return GenCollectedHeap::heap()->is_in_young(obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,11 @@
#ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP
#define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
#include "gc/shared/cardTableModRefBSForCTRS.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/memRegion.hpp"
#include "oops/oop.hpp"
class DirtyCardToOopClosure;
class Generation;
class Space;
class OopsInGenClosure;
@ -46,44 +48,28 @@ class CLDRemSet {
// This RemSet uses a card table both as shared data structure
// for a mod ref barrier set and for the rem set information.
class CardTableRS: public CHeapObj<mtGC> {
class CardTableRS: public CardTable {
friend class VMStructs;
// Below are private classes used in impl.
friend class VerifyCTSpaceClosure;
friend class ClearNoncleanCardWrapper;
static jbyte clean_card_val() {
return CardTableModRefBSForCTRS::clean_card;
}
static intptr_t clean_card_row() {
return CardTableModRefBSForCTRS::clean_card_row;
}
static bool
card_is_dirty_wrt_gen_iter(jbyte cv) {
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
}
CLDRemSet _cld_rem_set;
BarrierSet* _bs;
CardTableModRefBSForCTRS* _ct_bs;
void verify_space(Space* s, HeapWord* gen_start);
enum ExtendedCardValue {
youngergen_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 1,
youngergen_card = CT_MR_BS_last_reserved + 1,
// These are for parallel collection.
// There are three P (parallel) youngergen card values. In general, this
// needs to be more than the number of generations (including the perm
// gen) that might have younger_refs_do invoked on them separately. So
// if we add more gens, we have to add more values.
youngergenP1_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 2,
youngergenP2_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 3,
youngergenP3_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 4,
youngergenP1_card = CT_MR_BS_last_reserved + 2,
youngergenP2_card = CT_MR_BS_last_reserved + 3,
youngergenP3_card = CT_MR_BS_last_reserved + 4,
cur_youngergen_and_prev_nonclean_card =
CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 5
CT_MR_BS_last_reserved + 5
};
// An array that contains, for each generation, the card table value last
@ -116,16 +102,8 @@ public:
CardTableRS(MemRegion whole_heap);
~CardTableRS();
// Return the barrier set associated with "this."
BarrierSet* bs() { return _bs; }
// Set the barrier set.
void set_bs(BarrierSet* bs) { _bs = bs; }
CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
// Override.
@ -137,7 +115,7 @@ public:
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = _ct_bs->byte_for(field);
jbyte* byte = byte_for(field);
*byte = youngergen_card;
}
void write_ref_field_gc_work(void* field, oop new_val) {
@ -149,30 +127,17 @@ public:
// a younger card in the current collection.
virtual void write_ref_field_gc_par(void* field, oop new_val);
void resize_covered_region(MemRegion new_region);
bool is_aligned(HeapWord* addr) {
return _ct_bs->is_card_aligned(addr);
return is_card_aligned(addr);
}
void verify();
void initialize();
void clear(MemRegion mr) { _ct_bs->clear(mr); }
void clear_into_younger(Generation* old_gen);
void invalidate(MemRegion mr) {
_ct_bs->invalidate(mr);
}
void invalidate_or_clear(Generation* old_gen);
static uintx ct_max_alignment_constraint() {
return CardTableModRefBSForCTRS::ct_max_alignment_constraint();
}
jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); }
jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); }
HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); }
bool is_prev_nonclean_card_val(jbyte v) {
return
youngergen_card <= v &&
@ -184,6 +149,94 @@ public:
return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card;
}
// *** Support for parallel card scanning.
// dirty and precleaned are equivalent wrt younger_refs_iter.
static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
return cv == dirty_card || cv == precleaned_card;
}
// Returns "true" iff the value "cv" will cause the card containing it
// to be scanned in the current traversal. May be overridden by
// subtypes.
bool card_will_be_scanned(jbyte cv);
// Returns "true" iff the value "cv" may have represented a dirty card at
// some point.
bool card_may_have_been_dirty(jbyte cv);
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
// This is an array, one element per covered region of the card table.
// Each entry is itself an array, with one element per chunk in the
// covered region. Each entry of these arrays is the lowest non-clean
// card of the corresponding chunk containing part of an object from the
// previous chunk, or else NULL.
typedef jbyte* CardPtr;
typedef CardPtr* CardArr;
CardArr* _lowest_non_clean;
size_t* _lowest_non_clean_chunk_size;
uintptr_t* _lowest_non_clean_base_chunk_index;
volatile int* _last_LNC_resizing_collection;
// Initializes "lowest_non_clean" to point to the array for the region
// covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
// index of the corresponding to the first element of that array.
// Ensures that these arrays are of sufficient size, allocating if necessary.
// May be called by several threads concurrently.
void get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size);
// Returns the number of chunks necessary to cover "mr".
size_t chunks_to_cover(MemRegion mr) {
return (size_t)(addr_to_chunk_index(mr.last()) -
addr_to_chunk_index(mr.start()) + 1);
}
// Returns the index of the chunk in a stride which
// covers the given address.
uintptr_t addr_to_chunk_index(const void* addr) {
uintptr_t card = (uintptr_t) byte_for(addr);
return card / ParGCCardsPerStrideChunk;
}
// Apply cl, which must either itself apply dcto_cl or be dcto_cl,
// to the cards in the stride (of n_strides) within the given space.
void process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
// Makes sure that chunk boundaries are handled appropriately, by
// adjusting the min_done of dcto_cl, and by using a special card-table
// value to indicate how min_done should be set.
void process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
virtual bool is_in_young(oop obj) const;
};
class ClearNoncleanCardWrapper: public MemRegionClosure {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -31,6 +31,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
@ -110,7 +111,10 @@ jint GenCollectedHeap::initialize() {
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
_rem_set = new CardTableRS(reserved_region());
set_barrier_set(rem_set()->bs());
_rem_set->initialize();
CardTableModRefBS *bs = new CardTableModRefBS(_rem_set);
bs->initialize();
set_barrier_set(bs);
ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
_young_gen = _young_gen_spec->init(young_rs, rem_set());

View File

@ -47,10 +47,6 @@ public:
virtual void invalidate(MemRegion mr) = 0;
virtual void write_region(MemRegion mr) = 0;
// The caller guarantees that "mr" contains no references. (Perhaps it's
// objects have been moved elsewhere.)
virtual void clear(MemRegion mr) = 0;
// The ModRef abstraction introduces pre and post barriers
template <DecoratorSet decorators, typename BarrierSetT>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL) {
if (_sp->block_is_obj(top_obj)) {
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
if (_precision == CardTable::ObjHeadPreciseArray) {
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
// An arrayOop is starting on the dirty card - since we do exact
// store checks for objArrays we are done.
@ -125,11 +125,11 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
HeapWord* bottom_obj;
HeapWord* top_obj;
assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
_precision == CardTableModRefBS::Precise,
assert(_precision == CardTable::ObjHeadPreciseArray ||
_precision == CardTable::Precise,
"Only ones we deal with for now.");
assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
assert(_precision != CardTable::ObjHeadPreciseArray ||
_cl->idempotent() || _last_bottom == NULL ||
top <= _last_bottom,
"Not decreasing");
@ -147,7 +147,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
top = get_actual_top(top, top_obj);
// If the previous call did some part of this region, don't redo.
if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
if (_precision == CardTable::ObjHeadPreciseArray &&
_min_done != NULL &&
_min_done < top) {
top = _min_done;
@ -159,7 +159,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
bottom = MIN2(bottom, top);
MemRegion extended_mr = MemRegion(bottom, top);
assert(bottom <= top &&
(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
(_precision != CardTable::ObjHeadPreciseArray ||
_min_done == NULL ||
top <= _min_done),
"overlap!");
@ -180,7 +180,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
}
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new DirtyCardToOopClosure(this, cl, precision, boundary);
@ -189,7 +189,7 @@ DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
if (_precision == CardTable::ObjHeadPreciseArray) {
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
// An arrayOop is starting on the dirty card - since we do exact
// store checks for objArrays we are done.
@ -260,7 +260,7 @@ ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#define SHARE_VM_GC_SHARED_SPACE_HPP
#include "gc/shared/blockOffsetTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
@ -181,7 +181,7 @@ class Space: public CHeapObj<mtGC> {
// depending on the type of space in which the closure will
// operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
@ -253,7 +253,7 @@ class DirtyCardToOopClosure: public MemRegionClosureRO {
protected:
ExtendedOopClosure* _cl;
Space* _sp;
CardTableModRefBS::PrecisionStyle _precision;
CardTable::PrecisionStyle _precision;
HeapWord* _boundary; // If non-NULL, process only non-NULL oops
// pointing below boundary.
HeapWord* _min_done; // ObjHeadPreciseArray precision requires
@ -282,7 +282,7 @@ protected:
public:
DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary) :
_sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
_min_done(NULL) {
@ -619,7 +619,7 @@ class ContiguousSpace: public CompactibleSpace {
// Override.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
@ -694,7 +694,7 @@ protected:
public:
FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary) :
DirtyCardToOopClosure(sp, cl, precision, boundary) {}
};
@ -723,7 +723,7 @@ protected:
public:
ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
CardTable::PrecisionStyle precision,
HeapWord* boundary) :
FilteringDCTOC(sp, cl, precision, boundary)
{}

View File

@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
@ -48,6 +49,7 @@
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/cardTable.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/timerTrace.hpp"
@ -205,10 +207,10 @@ void CompilerToVM::Data::initialize(TRAPS) {
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->is_a(BarrierSet::CardTableModRef)) {
jbyte* base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
assert(base != 0, "unexpected byte_map_base");
jbyte* base = ci_card_table_address();
assert(base != NULL, "unexpected byte_map_base");
cardtable_start_address = base;
cardtable_shift = CardTableModRefBS::card_shift;
cardtable_shift = CardTable::card_shift;
} else {
// No card mark barriers
cardtable_start_address = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -426,7 +426,7 @@
declare_constant(BitData::null_seen_flag) \
declare_constant(BranchData::not_taken_off_set) \
\
declare_constant_with_value("CardTableModRefBS::dirty_card", CardTableModRefBS::dirty_card_val()) \
declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \
\
declare_constant(CodeInstaller::VERIFIED_ENTRY) \
declare_constant(CodeInstaller::UNVERIFIED_ENTRY) \
@ -653,7 +653,7 @@
static_field(HeapRegion, LogOfHRGrainBytes, int)
#define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value, declare_preprocessor_constant) \
declare_constant_with_value("G1SATBCardTableModRefBS::g1_young_gen", G1SATBCardTableModRefBS::g1_young_card_val())
declare_constant_with_value("G1CardTable::g1_young_gen", G1CardTable::g1_young_card_val())
#endif // INCLUDE_ALL_GCS

View File

@ -23,10 +23,13 @@
*/
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileLog.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/resourceArea.hpp"
@ -1562,9 +1565,7 @@ void GraphKit::pre_barrier(bool do_load,
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
break;
default :
@ -1579,9 +1580,7 @@ bool GraphKit::can_move_pre_barrier() const {
case BarrierSet::G1SATBCTLogging:
return true; // Can move it if no safepoint
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
case BarrierSet::CardTableModRef:
return true; // There is no pre-barrier
default :
@ -1605,14 +1604,10 @@ void GraphKit::post_barrier(Node* ctl,
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
case BarrierSet::CardTableModRef:
write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
break;
case BarrierSet::ModRef:
break;
default :
ShouldNotReachHere();
@ -3827,11 +3822,9 @@ void GraphKit::final_sync(IdealKit& ideal) {
Node* GraphKit::byte_map_base_node() {
// Get base of card map
CardTableModRefBS* ct =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
if (ct->byte_map_base != NULL) {
return makecon(TypeRawPtr::make((address)ct->byte_map_base));
jbyte* card_table_base = ci_card_table_address();
if (card_table_base != NULL) {
return makecon(TypeRawPtr::make((address)card_table_base));
} else {
return null();
}
@ -3883,7 +3876,7 @@ void GraphKit::write_barrier_post(Node* oop_store,
// Divide by card size
assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
"Only one we handle so far.");
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
// Combine card table base and card offset
Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
@ -4275,8 +4268,8 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* no_base = __ top();
float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999);
Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
Node* dirty_card = __ ConI((jint)CardTable::dirty_card_val());
Node* zeroX = __ ConX(0);
// Get the alias_index for raw card-mark memory
@ -4306,7 +4299,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* cast = __ CastPX(__ ctrl(), adr);
// Divide pointer by card size
Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
// Combine card table base and card offset
Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );

Some files were not shown because too many files have changed in this diff Show More