8198949: Modularize arraycopy stub routine GC barriers

Reviewed-by: rkennke, pliden
This commit is contained in:
Erik Österlund 2018-03-21 14:38:32 +01:00
parent 55233a42f3
commit b7aa9d3975
69 changed files with 3031 additions and 1126 deletions

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "runtime/thread.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, RegSet saved_regs) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
if (!dest_uninitialized) {
__ push(saved_regs, sp);
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
__ mov(rscratch1, c_rarg0);
__ mov(c_rarg0, c_rarg1);
__ mov(c_rarg1, rscratch1);
} else {
__ mov(c_rarg1, count);
__ mov(c_rarg0, addr);
}
} else {
__ mov(c_rarg0, addr);
__ mov(c_rarg1, count);
}
if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
}
__ pop(saved_regs, sp);
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register end, Register scratch, RegSet saved_regs) {
__ push(saved_regs, sp);
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
__ lea(scratch, Address(end, BytesPerHeapOop));
__ sub(scratch, scratch, start); // subtract start to get #bytes
__ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
__ pop(saved_regs, sp);
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, RegSet saved_regs);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register end, Register tmp, RegSet saved_regs);
};
#endif // CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class BarrierSetAssembler: public CHeapObj<mtGC> {
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, RegSet saved_regs) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register end, Register tmp, RegSet saved_regs) {}
};
#endif // CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register end, Register scratch, RegSet saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
__ lsr(start, start, CardTable::card_shift);
__ lsr(end, end, CardTable::card_shift);
__ sub(end, end, start); // number of bytes to copy
const Register count = end; // 'end' register contains bytes count now
__ load_byte_map_base(scratch);
__ add(start, start, scratch);
if (UseConcMarkSweepGC) {
__ membar(__ StoreStore);
}
__ bind(L_loop);
__ strb(zr, Address(start, count));
__ subs(count, count, 1);
__ br(Assembler::GE, L_loop);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register end, Register tmp, RegSet saved_regs);
};
#endif // #ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, RegSet saved_regs) {
if (is_oop) {
gen_write_ref_array_pre_barrier(masm, decorators, addr, count, saved_regs);
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register end, Register tmp,
RegSet saved_regs) {
if (is_oop) {
gen_write_ref_array_post_barrier(masm, decorators, start, end, tmp, saved_regs);
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, RegSet saved_regs) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register end, Register tmp, RegSet saved_regs) {}
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, RegSet saved_regs);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register end, Register tmp, RegSet saved_regs);
};
#endif // CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/instanceOop.hpp"
@ -620,111 +620,6 @@ class StubGenerator: public StubCodeGenerator {
void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
// Generate code for an array write pre barrier
//
// addr - starting address
// count - element count
// tmp - scratch register
// saved_regs - registers to be saved before calling static_write_ref_array_pre
//
// Callers must specify which registers to preserve in saved_regs.
// Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
//
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
__ push(saved_regs, sp);
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
__ mov(rscratch1, c_rarg0);
__ mov(c_rarg0, c_rarg1);
__ mov(c_rarg1, rscratch1);
} else {
__ mov(c_rarg1, count);
__ mov(c_rarg0, addr);
}
} else {
__ mov(c_rarg0, addr);
__ mov(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ pop(saved_regs, sp);
break;
case BarrierSet::CardTableBarrierSet:
break;
default:
ShouldNotReachHere();
}
}
}
//
// Generate code for an array write post barrier
//
// Input:
// start - register containing starting address of destination array
// end - register containing ending address of destination array
// scratch - scratch register
// saved_regs - registers to be saved before calling static_write_ref_array_post
//
// The input registers are overwritten.
// The ending address is inclusive.
// Callers must specify which registers to preserve in saved_regs.
// Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
assert_different_registers(start, end, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
__ push(saved_regs, sp);
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
__ lea(scratch, Address(end, BytesPerHeapOop));
__ sub(scratch, scratch, start); // subtract start to get #bytes
__ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
__ pop(saved_regs, sp);
}
break;
case BarrierSet::CardTableBarrierSet:
{
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
__ lsr(start, start, CardTable::card_shift);
__ lsr(end, end, CardTable::card_shift);
__ sub(end, end, start); // number of bytes to copy
const Register count = end; // 'end' register contains bytes count now
__ load_byte_map_base(scratch);
__ add(start, start, scratch);
if (UseConcMarkSweepGC) {
__ membar(__ StoreStore);
}
__ BIND(L_loop);
__ strb(zr, Address(start, count));
__ subs(count, count, 1);
__ br(Assembler::GE, L_loop);
}
break;
default:
ShouldNotReachHere();
}
}
// The inner part of zero_words(). This is the bulk operation,
// zeroing words in blocks, possibly using DC ZVA to do it. The
// caller is responsible for zeroing the last few words.
@ -1456,20 +1351,33 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg);
if (is_oop) {
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
copy_memory(aligned, s, d, count, rscratch1, size);
if (is_oop) {
__ pop(RegSet::of(d, count), sp);
if (VerifyOops)
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
__ leave();
__ mov(r0, zr); // return 0
__ ret(lr);
@ -1517,8 +1425,18 @@ class StubGenerator: public StubCodeGenerator {
__ cmp(rscratch1, count, Assembler::LSL, exact_log2(size));
__ br(Assembler::HS, nooverlap_target);
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs);
if (is_oop) {
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
@ -1529,8 +1447,8 @@ class StubGenerator: public StubCodeGenerator {
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
__ leave();
__ mov(r0, zr); // return 0
__ ret(lr);
@ -1871,7 +1789,14 @@ class StubGenerator: public StubCodeGenerator {
}
#endif //ASSERT
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
bool is_oop = true;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs);
// save the original count
__ mov(count_save, count);
@ -1915,7 +1840,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_do_card_marks);
__ add(to, to, -heapOopSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
bs->arraycopy_epilogue(_masm, decorators, is_oop, start_to, to, rscratch1, wb_post_saved_regs);
__ bind(L_done_pop);
__ pop(RegSet::of(r18, r19, r20, r21), sp);

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, int callee_saved_regs) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
if (!dest_uninitialized) {
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");
BLOCK_COMMENT("PreBarrier");
#ifdef AARCH64
callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}
#else
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
__ push(saved_regs | R9ifScratched);
#endif // AARCH64
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
#ifdef AARCH64
__ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
#else
if (count != R1) {
__ mov(R1, count);
}
#endif // AARCH64
if (UseCompressedOops) {
__ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
} else {
__ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
}
#ifdef AARCH64
for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
__ raw_pop(as_Register(i), as_Register(i+1));
}
#else
__ pop(saved_regs | R9ifScratched);
#endif // AARCH64
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("G1PostBarrier");
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
#ifdef AARCH64
__ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
#else
if (count != R1) {
__ mov(R1, count);
}
#if R9_IS_SCRATCHED
// Safer to save R9 here since callers may have been written
// assuming R9 survives. This is suboptimal but is not in
// general worth optimizing for the few platforms where R9
// is scratched. Note that the optimization might not be to
// difficult for this particular call site.
__ push(R9);
#endif // !R9_IS_SCRATCHED
#endif // !AARCH64
__ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
#ifndef AARCH64
#if R9_IS_SCRATCHED
__ pop(R9);
#endif // !R9_IS_SCRATCHED
#endif // !AARCH64
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
#define CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, , int callee_saved_regs);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
};
#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
#define CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class BarrierSetAssembler: public CHeapObj<mtGC> {
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, , int callee_saved_regs) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, Register tmp) {}
};
#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "runtime/globals.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BLOCK_COMMENT("CardTablePostBarrier");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done;
__ cbz_32(count, L_done); // zero count - nothing to do
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
__ sub(count, count, BytesPerHeapOop); // last addr
__ logical_shift_right(addr, addr, CardTable::card_shift);
__ logical_shift_right(count, count, CardTable::card_shift);
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
__ BIND(L_cardtable_loop);
__ strb(zero, Address(addr, 1, post_indexed));
__ subs(count, count, 1);
__ b(L_cardtable_loop, ge);
__ BIND(L_done);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
#define CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
};
#endif // #ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, int callee_saved_regs) {
if (is_oop) {
gen_write_ref_array_pre_barrier(masm, decorators, addr, count, callee_saved_regs);
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, Register tmp) {
if (is_oop) {
gen_write_ref_array_post_barrier(masm, decorators, addr, count, tmp);
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
#define CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, , int callee_saved_regs) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {}
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, int callee_saved_regs);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register addr, Register count, Register tmp);
};
#endif // CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP

View File

@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"

View File

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_arm.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/instanceOop.hpp"
@ -2855,148 +2855,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
#if INCLUDE_ALL_GCS
//
// Generate pre-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count, 32-bit int
// callee_saved_regs -
// the call must preserve this number of registers: R0, R1, ..., R[callee_saved_regs-1]
//
// callee_saved_regs must include addr and count
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs.
void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");
BLOCK_COMMENT("PreBarrier");
#ifdef AARCH64
callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}
#else
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
__ push(saved_regs | R9ifScratched);
#endif // AARCH64
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
#ifdef AARCH64
__ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
#else
if (count != R1) {
__ mov(R1, count);
}
#endif // AARCH64
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
#ifdef AARCH64
for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
__ raw_pop(as_Register(i), as_Register(i+1));
}
#else
__ pop(saved_regs | R9ifScratched);
#endif // AARCH64
}
case BarrierSet::CardTableBarrierSet:
break;
default:
ShouldNotReachHere();
}
}
#endif // INCLUDE_ALL_GCS
//
// Generate post-write barrier for array.
//
// Input:
// addr - register containing starting address (can be scratched)
// count - register containing element count, 32-bit int (can be scratched)
// tmp - scratch register
//
// Note: LR can be scratched but might be equal to addr, count or tmp
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp) {
assert_different_registers(addr, count, tmp);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
BLOCK_COMMENT("G1PostBarrier");
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
#ifdef AARCH64
__ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_post takes size_t
#else
if (count != R1) {
__ mov(R1, count);
}
#if R9_IS_SCRATCHED
// Safer to save R9 here since callers may have been written
// assuming R9 survives. This is suboptimal but is not in
// general worth optimizing for the few platforms where R9
// is scratched. Note that the optimization might not be to
// difficult for this particular call site.
__ push(R9);
#endif
#endif // !AARCH64
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
#ifndef AARCH64
#if R9_IS_SCRATCHED
__ pop(R9);
#endif
#endif // !AARCH64
}
break;
case BarrierSet::CardTableBarrierSet:
{
BLOCK_COMMENT("CardTablePostBarrier");
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done;
__ cbz_32(count, L_done); // zero count - nothing to do
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
__ sub(count, count, BytesPerHeapOop); // last addr
__ logical_shift_right(addr, addr, CardTable::card_shift);
__ logical_shift_right(count, count, CardTable::card_shift);
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
__ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
__ BIND(L_cardtable_loop);
__ strb(zero, Address(addr, 1, post_indexed));
__ subs(count, count, 1);
__ b(L_cardtable_loop, ge);
__ BIND(L_done);
}
break;
default:
ShouldNotReachHere();
}
}
// Generates pattern of code to be placed after raw data copying in generate_oop_copy
// Includes return from arraycopy stub.
@ -3007,7 +2865,7 @@ class StubGenerator: public StubCodeGenerator {
// count: total number of copied elements, 32-bit int
//
// Blows all volatile (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) and 'to', 'count', 'tmp' registers.
void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward) {
void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) {
assert_different_registers(to, count, tmp);
if (forward) {
@ -3018,7 +2876,8 @@ class StubGenerator: public StubCodeGenerator {
// 'to' is the beginning of the region
gen_write_ref_array_post_barrier(to, count, tmp);
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_epilogue(this, decorators, true, to, count, tmp);
if (status) {
__ mov(R0, 0); // OK
@ -3086,9 +2945,16 @@ class StubGenerator: public StubCodeGenerator {
__ push(LR);
#endif // AARCH64
#if INCLUDE_ALL_GCS
gen_write_ref_array_pre_barrier(to, count, callee_saved_regs);
#endif // INCLUDE_ALL_GCS
DecoratorSet decorators = 0;
if (disjoint) {
decorators |= ARRAYCOPY_DISJOINT;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
// save arguments for barrier generation (after the pre barrier)
__ mov(saved_count, count);
@ -3146,12 +3012,12 @@ class StubGenerator: public StubCodeGenerator {
}
assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
{
copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array);
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
}
if (!to_is_aligned) {
@ -3165,7 +3031,7 @@ class StubGenerator: public StubCodeGenerator {
int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
}
return start;
@ -3336,7 +3202,7 @@ class StubGenerator: public StubCodeGenerator {
const int callee_saved_regs = AARCH64_ONLY(5) NOT_AARCH64(4); // LR saved differently
Label load_element, store_element, do_card_marks, fail;
Label load_element, store_element, do_epilogue, fail;
BLOCK_COMMENT("Entry:");
@ -3351,9 +3217,10 @@ class StubGenerator: public StubCodeGenerator {
pushed+=1;
#endif // AARCH64
#if INCLUDE_ALL_GCS
gen_write_ref_array_pre_barrier(to, count, callee_saved_regs);
#endif // INCLUDE_ALL_GCS
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
#ifndef AARCH64
const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
@ -3399,7 +3266,7 @@ class StubGenerator: public StubCodeGenerator {
__ subs_32(count,count,1);
__ str(R5, Address(to, BytesPerHeapOop, post_indexed)); // store the oop
}
__ b(do_card_marks, eq); // count exhausted
__ b(do_epilogue, eq); // count exhausted
// ======== loop entry is here ========
__ BIND(load_element);
@ -3421,7 +3288,7 @@ class StubGenerator: public StubCodeGenerator {
// Note: fail marked by the fact that count differs from saved_count
__ BIND(do_card_marks);
__ BIND(do_epilogue);
Register copied = AARCH64_ONLY(R20) NOT_AARCH64(R4); // saved
Label L_not_copied;
@ -3431,7 +3298,7 @@ class StubGenerator: public StubCodeGenerator {
__ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
__ mov(R12, copied); // count arg scratched by post barrier
gen_write_ref_array_post_barrier(to, R12, R3);
bs->arraycopy_epilogue(this, decorators, true, to, R12, R3);
assert_different_registers(R3,R12,LR,copied,saved_count);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "runtime/thread.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register from, Register to, Register count,
Register preserve1, Register preserve2) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
int spill_slots = 3;
if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; }
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered;
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ lwz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ lbz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
}
__ cmpdi(CCR0, R0, 0);
__ beq(CCR0, filtered);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
int slot_nr = 0;
__ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count);
}
slot_nr = 0;
__ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame()
__ restore_LR_CR(R0);
__ bind(filtered);
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve) {
int spill_slots = (preserve != noreg) ? 1 : 0;
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count);
if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame();
__ restore_LR_CR(R0);
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
#define CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count,
Register preserve1, Register preserve2);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve);
};
#endif // CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
#define CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class InterpreterMacroAssembler;
class BarrierSetAssembler: public CHeapObj<mtGC> {
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count, Register preserve1, Register preserve2) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, Register preserve) {}
};
#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
__ sldi_(count, count, LogBytesPerHeapOop);
__ beq(CCR0, Lskip_loop); // zero length
__ addi(count, count, -BytesPerHeapOop);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srdi(addr, addr, CardTable::card_shift);
__ srdi(count, count, CardTable::card_shift);
__ subf(count, addr, count);
__ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
__ addi(count, count, 1);
__ li(R0, 0);
__ mtctr(count);
// Byte store loop
__ bind(Lstore_loop);
__ stb(R0, 0, addr);
__ addi(addr, addr, 1);
__ bdnz(Lstore_loop);
__ bind(Lskip_loop);
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
#define CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve);
};
#endif // CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count, Register preserve1, Register preserve2) {
if (type == T_OBJECT) {
gen_write_ref_array_pre_barrier(masm, decorators, src, dst, count, preserve1, preserve2);
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
if (!checkcast) {
assert_different_registers(dst, count, R9_ARG7, R10_ARG8);
// Save some arguments for epilogue, e.g. disjoint_long_copy_core destroys them.
__ mr(R9_ARG7, dst);
__ mr(R10_ARG8, count);
}
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, Register preserve) {
if (type == T_OBJECT) {
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
if (!checkcast) {
gen_write_ref_array_post_barrier(masm, decorators, R9_ARG7, R10_ARG8, preserve);
} else {
gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve);
}
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
#define CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count,
Register preserve1, Register preserve2) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve) {}
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count, Register preserve1, Register preserve2);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, Register preserve);
};
#endif // CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP

View File

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/instanceOop.hpp"
@ -612,137 +612,6 @@ class StubGenerator: public StubCodeGenerator {
#undef __
#define __ _masm->
// Generate G1 pre-write barrier for array.
//
// Input:
// from - register containing src address (only needed for spilling)
// to - register containing starting address
// count - register containing element count
// tmp - scratch register
//
// Kills:
// nothing
//
void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
Register preserve1 = noreg, Register preserve2 = noreg) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
int spill_slots = 3;
if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; }
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered;
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
}
__ cmpdi(CCR0, Rtmp1, 0);
__ beq(CCR0, filtered);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
int slot_nr = 0;
__ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
slot_nr = 0;
__ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame()
__ restore_LR_CR(R0);
__ bind(filtered);
}
break;
case BarrierSet::CardTableBarrierSet:
break;
default:
ShouldNotReachHere();
}
}
// Generate CMS/G1 post-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count
// tmp - scratch register
//
// The input registers and R0 are overwritten.
//
void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
int spill_slots = (preserve != noreg) ? 1 : 0;
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame();
__ restore_LR_CR(R0);
}
break;
case BarrierSet::CardTableBarrierSet:
{
Label Lskip_loop, Lstore_loop;
if (UseConcMarkSweepGC) {
// TODO PPC port: contribute optimization / requires shared changes
__ release();
}
CardTableBarrierSet* const ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* const ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
__ sldi(count, count, LogBytesPerHeapOop);
__ addi(count, count, -BytesPerHeapOop);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srdi(addr, addr, CardTable::card_shift);
__ srdi(count, count, CardTable::card_shift);
__ subf(count, addr, count);
assert_different_registers(R0, addr, count, tmp);
__ load_const(tmp, (address)ct->byte_map_base());
__ addic_(count, count, 1);
__ beq(CCR0, Lskip_loop);
__ li(R0, 0);
__ mtctr(count);
// Byte store loop
__ bind(Lstore_loop);
__ stbx(R0, tmp, addr);
__ addi(addr, addr, 1);
__ bdnz(Lstore_loop);
__ bind(Lskip_loop);
}
break;
case BarrierSet::ModRef:
break;
default:
ShouldNotReachHere();
}
}
// Support for void zero_words_aligned8(HeapWord* to, size_t count)
//
@ -2155,11 +2024,16 @@ class StubGenerator: public StubCodeGenerator {
STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
STUB_ENTRY(oop_disjoint_arraycopy);
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
// Save arguments.
__ mr(R9_ARG7, R4_ARG2);
__ mr(R10_ARG8, R5_ARG3);
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
if (UseCompressedOops) {
array_overlap_test(nooverlap_target, 2);
@ -2169,7 +2043,7 @@ class StubGenerator: public StubCodeGenerator {
generate_conjoint_long_copy_core(aligned);
}
gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
__ li(R3_RET, 0); // return 0
__ blr();
return start;
@ -2188,12 +2062,17 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
assert_positive_int(R5_ARG3);
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
// save some arguments, disjoint_long_copy_core destroys them.
// needed for post barrier
__ mr(R9_ARG7, R4_ARG2);
__ mr(R10_ARG8, R5_ARG3);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
@ -2201,7 +2080,7 @@ class StubGenerator: public StubCodeGenerator {
generate_disjoint_long_copy_core(aligned);
}
gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
__ li(R3_RET, 0); // return 0
__ blr();
@ -2280,11 +2159,17 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval);
//inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
Label load_element, store_element, store_null, success, do_card_marks;
Label load_element, store_element, store_null, success, do_epilogue;
__ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
__ li(R8_offset, 0); // Offset from start of arrays.
__ li(R2_minus1, -1);
@ -2328,15 +2213,15 @@ class StubGenerator: public StubCodeGenerator {
// and report their number to the caller.
__ subf_(R5_count, R9_remain, R5_count);
__ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller
__ bne(CCR0, do_card_marks);
__ bne(CCR0, do_epilogue);
__ blr();
__ bind(success);
__ li(R3_RET, 0);
__ bind(do_card_marks);
// Store check on R4_to[0..R5_count-1].
gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
__ bind(do_epilogue);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET);
__ blr();
return start;
}

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "registerSaver_s390.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "runtime/thread.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
#define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target is uninitialized.
if (!dest_uninitialized) {
// Is marking active?
Label filtered;
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
Register Rtmp1 = Z_R0_scratch;
const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active());
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
}
__ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), addr, count);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), addr, count);
}
RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
__ bind(filtered);
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, bool do_return) {
address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry);
if (!do_return) {
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
__ call_VM_leaf(entry_point, addr, count);
RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
} else {
// Tail call: call c and return to stub caller.
__ lgr_if_needed(Z_ARG1, addr);
__ lgr_if_needed(Z_ARG2, count);
__ load_const(Z_R1, entry_point);
__ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
}
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
#define CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, bool do_return);
};
#endif // CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
#define CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class InterpreterMacroAssembler;
class BarrierSetAssembler: public CHeapObj<mtGC> {
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, bool do_return = false);
};
#endif // CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP

View File

@ -0,0 +1,141 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
// Nothing to do if count <= 0.
if (!do_return) {
__ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done);
} else {
__ z_ltgr(count, count);
__ z_bcr(Assembler::bcondNotPositive, Z_R14);
}
// Note: We can't combine the shifts. We could lose a carry
// from calculating the array end address.
// count = (count-1)*BytesPerHeapOop + addr
// Count holds addr of last oop in array then.
__ z_sllg(count, count, LogBytesPerHeapOop);
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift);
__ z_srlg(count, count, CardTable::card_shift);
// Prefetch first elements of card table for update.
if (VM_Version::has_Prefetch()) {
__ z_pfd(0x02, 0, addr, Z_R1);
}
// Special case: clear just one byte.
__ clear_reg(Z_R0, true, false); // Used for doOneByte.
__ z_sgr(count, addr); // Count = n-1 now, CC used for brc below.
__ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr.
if (!do_return) {
__ z_brz(done);
} else {
__ z_bcr(Assembler::bcondZero, Z_R14);
}
__ z_cghi(count, 255);
__ z_brnh(doXC);
// MVCLE: clear a long area.
// Start addr of card table range = base + addr.
// # bytes in card table range = (count + 1)
__ add2reg_with_index(Z_R0, 0, Z_R1, addr);
__ add2reg(Z_R1, 1, count);
// dirty hack:
// There are just two callers. Both pass
// count in Z_ARG3 = Z_R4
// addr in Z_ARG2 = Z_R3
// ==> use Z_ARG2 as src len reg = 0
// Z_ARG1 as src addr (ignored)
assert(count == Z_ARG3, "count: unexpected register number");
assert(addr == Z_ARG2, "addr: unexpected register number");
__ clear_reg(Z_ARG2, true, false);
__ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0);
if (!do_return) {
__ z_bru(done);
} else {
__ z_bcr(Assembler::bcondAlways, Z_R14);
}
// XC: clear a short area.
Label XC_template; // Instr template, never exec directly!
__ bind(XC_template);
__ z_xc(0, 0, addr, 0, addr);
__ bind(doXC);
// start addr of card table range = base + addr
// end addr of card table range = base + addr + count
__ add2reg_with_index(addr, 0, Z_R1, addr);
if (VM_Version::has_ExecuteExtensions()) {
__ z_exrl(count, XC_template); // Execute XC with var. len.
} else {
__ z_larl(Z_R1, XC_template);
__ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len.
}
if (do_return) {
__ z_br(Z_R14);
}
__ bind(done);
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return);
};
#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return) {
if (do_return) { __ z_br(Z_R14); }
}
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
if (type == T_OBJECT || type == T_ARRAY) {
gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, bool do_return) {
if (type == T_OBJECT || type == T_ARRAY) {
gen_write_ref_array_post_barrier(masm, decorators, dst, count, do_return);
} else {
if (do_return) { __ z_br(Z_R14); }
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
bool do_return);
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Register count, bool do_return = false);
};
#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP

View File

@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "registerSaver_s390.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "nativeInst_s390.hpp"
@ -686,188 +686,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Generate pre-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count
//
// The input registers are overwritten.
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target is uninitialized.
if (!dest_uninitialized) {
// Is marking active?
Label filtered;
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
Register Rtmp1 = Z_R0_scratch;
const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active());
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
}
__ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
// __ push_frame_abi160(0); // implicitly done in save_live_registers()
(void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count);
(void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
// __ pop_frame(); // implicitly done in restore_live_registers()
__ bind(filtered);
}
break;
case BarrierSet::CardTableBarrierSet:
case BarrierSet::ModRef:
break;
default:
ShouldNotReachHere();
}
}
// Generate post-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count
//
// The input registers are overwritten.
void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
if (branchToEnd) {
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
// __ push_frame_abi160(0); // implicitly done in save_live_registers()
(void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
(void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
// __ pop_frame(); // implicitly done in restore_live_registers()
} else {
// Tail call: call c and return to stub caller.
address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
__ lgr_if_needed(Z_ARG1, addr);
__ lgr_if_needed(Z_ARG2, count);
__ load_const(Z_R1, entry_point);
__ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
}
}
break;
case BarrierSet::CardTableBarrierSet:
// These cases formerly known as
// void array_store_check(Register addr, Register count, bool branchToEnd).
{
NearLabel doXC, done;
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Z_R0, Z_R1, addr, count);
// Nothing to do if count <= 0.
if (branchToEnd) {
__ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done);
} else {
__ z_ltgr(count, count);
__ z_bcr(Assembler::bcondNotPositive, Z_R14);
}
// Note: We can't combine the shifts. We could lose a carry
// from calculating the array end address.
// count = (count-1)*BytesPerHeapOop + addr
// Count holds addr of last oop in array then.
__ z_sllg(count, count, LogBytesPerHeapOop);
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
// count = (count>>shift) - (addr>>shift)
__ z_srlg(addr, addr, CardTable::card_shift);
__ z_srlg(count, count, CardTable::card_shift);
// Prefetch first elements of card table for update.
if (VM_Version::has_Prefetch()) {
__ z_pfd(0x02, 0, addr, Z_R1);
}
// Special case: clear just one byte.
__ clear_reg(Z_R0, true, false); // Used for doOneByte.
__ z_sgr(count, addr); // Count = n-1 now, CC used for brc below.
__ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr.
if (branchToEnd) {
__ z_brz(done);
} else {
__ z_bcr(Assembler::bcondZero, Z_R14);
}
__ z_cghi(count, 255);
__ z_brnh(doXC);
// MVCLE: clear a long area.
// Start addr of card table range = base + addr.
// # bytes in card table range = (count + 1)
__ add2reg_with_index(Z_R0, 0, Z_R1, addr);
__ add2reg(Z_R1, 1, count);
// dirty hack:
// There are just two callers. Both pass
// count in Z_ARG3 = Z_R4
// addr in Z_ARG2 = Z_R3
// ==> use Z_ARG2 as src len reg = 0
// Z_ARG1 as src addr (ignored)
assert(count == Z_ARG3, "count: unexpected register number");
assert(addr == Z_ARG2, "addr: unexpected register number");
__ clear_reg(Z_ARG2, true, false);
__ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0);
if (branchToEnd) {
__ z_bru(done);
} else {
__ z_bcr(Assembler::bcondAlways, Z_R14);
}
// XC: clear a short area.
Label XC_template; // Instr template, never exec directly!
__ bind(XC_template);
__ z_xc(0, 0, addr, 0, addr);
__ bind(doXC);
// start addr of card table range = base + addr
// end addr of card table range = base + addr + count
__ add2reg_with_index(addr, 0, Z_R1, addr);
if (VM_Version::has_ExecuteExtensions()) {
__ z_exrl(count, XC_template); // Execute XC with var. len.
} else {
__ z_larl(Z_R1, XC_template);
__ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len.
}
if (!branchToEnd) {
__ z_br(Z_R14);
}
__ bind(done);
}
break;
case BarrierSet::ModRef:
if (!branchToEnd) { __ z_br(Z_R14); }
break;
default:
ShouldNotReachHere();
}
}
// This is to test that the count register contains a positive int value.
// Required because C2 does not respect int to long conversion for stub calls.
void assert_positive_int(Register count) {
@ -1482,11 +1300,20 @@ class StubGenerator: public StubCodeGenerator {
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
unsigned int size = UseCompressedOops ? 4 : 8;
gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
generate_disjoint_copy(aligned, size, true, true);
gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
return __ addr_at(start_off);
}
@ -1565,11 +1392,20 @@ class StubGenerator: public StubCodeGenerator {
// Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3.
gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
return __ addr_at(start_off);
}

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#define __ masm->
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Register tmp = O5;
assert_different_registers(addr, count, tmp);
Label filtered;
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
"Assumption");
__ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
}
// Is marking active?
__ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
__ save_frame(0);
// Save the necessary global regs... will be used after.
if (addr->is_global()) {
__ mov(addr, L0);
}
if (count->is_global()) {
__ mov(count, L1);
}
__ mov(addr->after_save(), O0);
// Get the count into O1
address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)
: CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry);
__ call(slowpath);
__ delayed()->mov(count->after_save(), O1);
if (addr->is_global()) {
__ mov(L0, addr);
}
if (count->is_global()) {
__ mov(L1, count);
}
__ restore();
__ bind(filtered);
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
// Get some new fresh output registers.
__ save_frame(0);
__ mov(addr->after_save(), O0);
__ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
__ delayed()->mov(count->after_save(), O1);
__ restore();
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
#define CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
};
#endif // CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
#define CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class InterpreterMacroAssembler;
class BarrierSetAssembler: public CHeapObj<mtGC> {
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {}
};
#endif // CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
Label L_loop, L_done;
__ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
__ sll_ptr(count, LogBytesPerHeapOop, count);
__ sub(count, BytesPerHeapOop, count);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srl_ptr(addr, CardTable::card_shift, addr);
__ srl_ptr(count, CardTable::card_shift, count);
__ sub(count, addr, count);
AddressLiteral rs(ct->byte_map_base());
__ set(rs, tmp);
__ BIND(L_loop);
__ stb(G0, tmp, addr);
__ subcc(count, 1, count);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->add(addr, 1, addr);
__ BIND(L_done);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
#define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
};
#endif // CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
if (type == T_OBJECT) {
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
if (!checkcast) {
// save arguments for barrier generation
__ mov(dst, G1);
__ mov(count, G5);
gen_write_ref_array_pre_barrier(masm, decorators, G1, G5);
} else {
gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
}
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
if (type == T_OBJECT) {
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
if (!checkcast) {
// O0 is used as temp register
gen_write_ref_array_post_barrier(masm, decorators, G1, G5, O0);
} else {
gen_write_ref_array_post_barrier(masm, decorators, dst, count, O3);
}
}
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
#define CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {}
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);
};
#endif // CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP

View File

@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"
@ -823,125 +823,6 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->nop();
}
//
// Generate pre-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count
// tmp - scratch register
//
// The input registers are overwritten.
//
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Register tmp = O5;
assert_different_registers(addr, count, tmp);
Label filtered;
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
} else {
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
"Assumption");
__ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
}
// Is marking active?
__ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
__ save_frame(0);
// Save the necessary global regs... will be used after.
if (addr->is_global()) {
__ mov(addr, L0);
}
if (count->is_global()) {
__ mov(count, L1);
}
__ mov(addr->after_save(), O0);
// Get the count into O1
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
__ delayed()->mov(count->after_save(), O1);
if (addr->is_global()) {
__ mov(L0, addr);
}
if (count->is_global()) {
__ mov(L1, count);
}
__ restore();
__ bind(filtered);
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
}
break;
case BarrierSet::CardTableBarrierSet:
break;
default:
ShouldNotReachHere();
}
}
//
// Generate post-write barrier for array.
//
// Input:
// addr - register containing starting address
// count - register containing element count
// tmp - scratch register
//
// The input registers are overwritten.
//
void gen_write_ref_array_post_barrier(Register addr, Register count,
Register tmp) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
// Get some new fresh output registers.
__ save_frame(0);
__ mov(addr->after_save(), O0);
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
__ delayed()->mov(count->after_save(), O1);
__ restore();
}
break;
case BarrierSet::CardTableBarrierSet:
{
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
Label L_loop, L_done;
__ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
__ sll_ptr(count, LogBytesPerHeapOop, count);
__ sub(count, BytesPerHeapOop, count);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
__ srl_ptr(addr, CardTable::card_shift, addr);
__ srl_ptr(count, CardTable::card_shift, count);
__ sub(count, addr, count);
AddressLiteral rs(ct->byte_map_base());
__ set(rs, tmp);
__ BIND(L_loop);
__ stb(G0, tmp, addr);
__ subcc(count, 1, count);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->add(addr, 1, addr);
__ BIND(L_done);
}
break;
case BarrierSet::ModRef:
break;
default:
ShouldNotReachHere();
}
}
//
// Generate main code for disjoint arraycopy
@ -2388,18 +2269,25 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
}
// save arguments for barrier generation
__ mov(to, G1);
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
} else {
generate_disjoint_long_copy_core(aligned);
}
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
@ -2438,10 +2326,16 @@ class StubGenerator: public StubCodeGenerator {
array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
// save arguments for barrier generation
__ mov(to, G1);
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned);
@ -2449,8 +2343,7 @@ class StubGenerator: public StubCodeGenerator {
generate_conjoint_long_copy_core(aligned);
}
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
@ -2552,9 +2445,16 @@ class StubGenerator: public StubCodeGenerator {
// caller can pass a 64-bit byte count here (from generic stub)
BLOCK_COMMENT("Entry:");
}
gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
Label load_element, store_element, do_card_marks, fail, done;
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
Label load_element, store_element, do_epilogue, fail, done;
__ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it
__ brx(Assembler::notZero, false, Assembler::pt, load_element);
__ delayed()->mov(G0, O5_offset); // offset from start of arrays
@ -2576,7 +2476,7 @@ class StubGenerator: public StubCodeGenerator {
__ deccc(G1_remain); // decrement the count
__ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
__ inc(O5_offset, heapOopSize); // step to next offset
__ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
__ brx(Assembler::zero, true, Assembler::pt, do_epilogue);
__ delayed()->set(0, O0); // return -1 on success
// ======== loop entry is here ========
@ -2600,8 +2500,8 @@ class StubGenerator: public StubCodeGenerator {
__ brx(Assembler::zero, false, Assembler::pt, done);
__ delayed()->not1(O2_count, O0); // report (-1^K) to caller
__ BIND(do_card_marks);
gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
__ BIND(do_epilogue);
bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
__ BIND(done);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#define __ masm->
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {
bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
if (!dest_uninitialized) {
Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
#ifndef _LP64
__ push(thread);
__ get_thread(thread);
#endif
Label filtered;
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active()));
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
NOT_LP64(__ pop(thread);)
__ jcc(Assembler::equal, filtered);
__ pusha(); // push registers
#ifdef _LP64
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, count);
__ movptr(c_rarg0, addr);
}
} else {
__ movptr(c_rarg0, addr);
__ movptr(c_rarg1, count);
}
if (UseCompressedOops) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
} else {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
}
#else
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry),
addr, count);
#endif
__ popa();
__ bind(filtered);
}
}
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
__ pusha(); // push registers (overkill)
#ifdef _LP64
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
assert_different_registers(c_rarg1, addr);
__ mov(c_rarg1, count);
__ mov(c_rarg0, addr);
} else {
assert_different_registers(c_rarg0, count);
__ mov(c_rarg0, addr);
__ mov(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
#else
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry),
addr, count);
#endif
__ popa();
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
};
#endif // CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "oops/access.hpp"
class InterpreterMacroAssembler;
class BarrierSetAssembler: public CHeapObj<mtGC> {
protected:
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {}
};
#endif // CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp) {
BarrierSet *bs = Universe::heap()->barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
intptr_t disp = (intptr_t) ct->byte_map_base();
Label L_loop, L_done;
const Register end = count;
assert_different_registers(addr, end);
__ testl(count, count);
__ jcc(Assembler::zero, L_done); // zero count - nothing to do
#ifdef _LP64
__ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
__ shrptr(addr, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, disp);
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
#else
__ lea(end, Address(addr, count, Address::times_ptr, -wordSize));
__ shrptr(addr, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, addr); // end --> count
__ BIND(L_loop);
Address cardtable(addr, count, Address::times_1, disp);
__ movb(cardtable, 0);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
#endif
__ BIND(L_done);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register tmp);
};
#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
if (type == T_OBJECT || type == T_ARRAY) {
#ifdef _LP64
if (!checkcast && !obj_int) {
// Save count for barrier
__ movptr(r11, count);
} else if (disjoint && obj_int) {
// Save dst in r11 in the disjoint case
__ movq(r11, dst);
}
#else
if (disjoint) {
__ mov(rdx, dst); // save 'to'
}
#endif
gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
}
}
void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count) {
bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
Register tmp = rax;
if (type == T_OBJECT || type == T_ARRAY) {
#ifdef _LP64
if (!checkcast && !obj_int) {
// Save count for barrier
count = r11;
} else if (disjoint && obj_int) {
// Use the saved dst in the disjoint case
dst = r11;
} else if (checkcast) {
tmp = rscratch1;
}
#else
if (disjoint) {
__ mov(dst, rdx); // restore 'to'
}
#endif
gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
}
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {}
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register src, Register dst, Register count);
};
#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP

View File

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@ -668,107 +668,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
//
// Generate pre-barrier for array stores
//
// Input:
// start - starting address
// count - element count
void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) {
assert_different_registers(start, count);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!uninitialized_target) {
Register thread = rax;
Label filtered;
__ push(thread);
__ get_thread(thread);
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active()));
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ pop(thread);
__ jcc(Assembler::equal, filtered);
__ pusha(); // push registers
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
start, count);
__ popa();
__ bind(filtered);
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableBarrierSet:
break;
default :
ShouldNotReachHere();
}
}
//
// Generate a post-barrier for an array store
//
// start - starting address
// count - element count
//
// The two input registers are overwritten.
//
void gen_write_ref_array_post_barrier(Register start, Register count) {
BarrierSet* bs = Universe::heap()->barrier_set();
assert_different_registers(start, count);
switch (bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
{
__ pusha(); // push registers
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
start, count);
__ popa();
}
break;
#endif // INCLUDE_ALL_GCS
case BarrierSet::CardTableBarrierSet:
{
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
const Register end = count; // elements count; end == start+count-1
assert_different_registers(start, end);
__ lea(end, Address(start, count, Address::times_ptr, -wordSize));
__ shrptr(start, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> count
__ BIND(L_loop);
intptr_t disp = (intptr_t) ct->byte_map_base();
Address cardtable(start, count, Address::times_1, disp);
__ movb(cardtable, 0);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
}
break;
case BarrierSet::ModRef:
break;
default :
ShouldNotReachHere();
}
}
// Copy 64 bytes chunks
//
@ -936,10 +835,19 @@ class StubGenerator: public StubCodeGenerator {
if (t == T_OBJECT) {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
__ mov(saved_to, to); // save 'to'
}
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
__ subptr(to, from); // to --> to_from
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
@ -1024,10 +932,10 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_2_bytes);
}
__ movl(count, Address(rsp, 12+12)); // reread 'count'
bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
if (t == T_OBJECT) {
__ movl(count, Address(rsp, 12+12)); // reread 'count'
__ mov(to, saved_to); // restore 'to'
gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
@ -1116,9 +1024,19 @@ class StubGenerator: public StubCodeGenerator {
if (t == T_OBJECT) {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized);
}
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
// copy from high to low
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
@ -1216,9 +1134,11 @@ class StubGenerator: public StubCodeGenerator {
} else {
__ BIND(L_copy_2_bytes);
}
__ movl2ptr(count, Address(rsp, 12+12)); // reread count
bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
if (t == T_OBJECT) {
__ movl2ptr(count, Address(rsp, 12+12)); // reread count
gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
@ -1463,8 +1383,16 @@ class StubGenerator: public StubCodeGenerator {
Address to_element_addr(end_to, count, Address::times_ptr, 0);
Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
BasicType type = T_OBJECT;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
// Copy from low to high addresses, indexed from the end of each array.
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
__ lea(end_from, end_from_addr);
__ lea(end_to, end_to_addr);
assert(length == count, ""); // else fix next line:
@ -1521,7 +1449,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_post_barrier);
__ movptr(to, to_arg); // reload
gen_write_ref_array_post_barrier(to, count);
bs->arraycopy_epilogue(_masm, decorators, type, from, to, count);
// Common exit point (success or failure).
__ BIND(L_done);

View File

@ -26,8 +26,8 @@
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@ -1190,119 +1190,6 @@ class StubGenerator: public StubCodeGenerator {
#endif
}
// Generate code for an array write pre barrier
//
// addr - starting address
// count - element count
// tmp - scratch register
//
// Destroy no registers!
//
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Label filtered;
Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active()));
// Is marking active?
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
__ cmpl(in_progress, 0);
} else {
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
__ cmpb(in_progress, 0);
}
__ jcc(Assembler::equal, filtered);
__ pusha(); // push registers
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, count);
__ movptr(c_rarg0, addr);
}
} else {
__ movptr(c_rarg0, addr);
__ movptr(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ popa();
__ bind(filtered);
}
break;
case BarrierSet::CardTableBarrierSet:
break;
default:
ShouldNotReachHere();
}
}
//
// Generate code for an array write post barrier
//
// Input:
// start - register containing starting address of destination array
// count - elements count
// scratch - scratch register
//
// The input registers are overwritten.
//
void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) {
assert_different_registers(start, count, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1BarrierSet:
{
__ pusha(); // push registers (overkill)
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
assert_different_registers(c_rarg1, start);
__ mov(c_rarg1, count);
__ mov(c_rarg0, start);
} else {
assert_different_registers(c_rarg0, count);
__ mov(c_rarg0, start);
__ mov(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
__ popa();
}
break;
case BarrierSet::CardTableBarrierSet:
{
Label L_loop, L_done;
const Register end = count;
__ testl(count, count);
__ jcc(Assembler::zero, L_done); // zero count - nothing to do
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
__ shrptr(start, CardTable::card_shift);
__ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> cards count
int64_t disp = ci_card_table_address_as<int64_t>();
__ mov64(scratch, disp);
__ addptr(start, scratch);
__ BIND(L_loop);
__ movb(Address(start, count, Address::times_1), 0);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
__ BIND(L_done);
}
break;
default:
ShouldNotReachHere();
}
}
// Copy big chunks forward
//
@ -1918,7 +1805,6 @@ class StubGenerator: public StubCodeGenerator {
const Register qword_count = count;
const Register end_from = from; // source array end address
const Register end_to = to; // destination array end address
const Register saved_to = r11; // saved destination array address
// End pointers are inclusive, and if count is not zero they point
// to the last unit copied: end_to[0] := end_from[0]
@ -1933,10 +1819,18 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
if (is_oop) {
__ movq(saved_to, to);
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BasicType type = is_oop ? T_OBJECT : T_INT;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
// 'from', 'to' and 'count' are now valid
__ movptr(dword_count, count);
@ -1963,9 +1857,7 @@ class StubGenerator: public StubCodeGenerator {
__ movl(Address(end_to, 8), rax);
__ BIND(L_exit);
if (is_oop) {
gen_write_ref_array_post_barrier(saved_to, dword_count, rax);
}
bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ vzeroupper();
@ -2022,10 +1914,18 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
if (is_oop) {
// no registers are destroyed by this call
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
DecoratorSet decorators = 0;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BasicType type = is_oop ? T_OBJECT : T_INT;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
// no registers are destroyed by this call
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
assert_clean_int(count, rax); // Make sure 'count' is clean int.
// 'from', 'to' and 'count' are now valid
@ -2062,9 +1962,7 @@ class StubGenerator: public StubCodeGenerator {
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
__ BIND(L_exit);
if (is_oop) {
gen_write_ref_array_post_barrier(to, dword_count, rax);
}
bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@ -2102,7 +2000,6 @@ class StubGenerator: public StubCodeGenerator {
const Register qword_count = rdx; // elements count
const Register end_from = from; // source array end address
const Register end_to = rcx; // destination array end address
const Register saved_to = to;
const Register saved_count = r11;
// End pointers are inclusive, and if count is not zero they point
// to the last unit copied: end_to[0] := end_from[0]
@ -2120,12 +2017,18 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
if (is_oop) {
// Save to and count for store barrier
__ movptr(saved_count, qword_count);
// no registers are destroyed by this call
gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BasicType type = is_oop ? T_OBJECT : T_LONG;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
// Copy from low to high addresses. Use 'to' as scratch.
__ lea(end_from, Address(from, qword_count, Address::times_8, -8));
@ -2154,10 +2057,8 @@ class StubGenerator: public StubCodeGenerator {
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
if (is_oop) {
__ BIND(L_exit);
gen_write_ref_array_post_barrier(saved_to, saved_count, rax);
}
bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
@ -2209,12 +2110,18 @@ class StubGenerator: public StubCodeGenerator {
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
if (is_oop) {
// Save to and count for store barrier
__ movptr(saved_count, qword_count);
// No registers are destroyed by this call
gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_DISJOINT;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
if (aligned) {
decorators |= ARRAYCOPY_ALIGNED;
}
BasicType type = is_oop ? T_OBJECT : T_LONG;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
__ jmp(L_copy_bytes);
@ -2239,10 +2146,8 @@ class StubGenerator: public StubCodeGenerator {
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
if (is_oop) {
__ BIND(L_exit);
gen_write_ref_array_post_barrier(to, saved_count, rax);
}
bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
@ -2389,7 +2294,14 @@ class StubGenerator: public StubCodeGenerator {
Address from_element_addr(end_from, count, TIMES_OOP, 0);
Address to_element_addr(end_to, count, TIMES_OOP, 0);
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
if (dest_uninitialized) {
decorators |= AS_DEST_NOT_INITIALIZED;
}
BasicType type = T_OBJECT;
BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
// Copy from low to high addresses, indexed from the end of each array.
__ lea(end_from, end_from_addr);
@ -2442,7 +2354,7 @@ class StubGenerator: public StubCodeGenerator {
__ xorptr(rax, rax); // return 0 on success
__ BIND(L_post_barrier);
gen_write_ref_array_post_barrier(to, r14_length, rscratch1);
bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length);
// Common exit point (success or failure).
__ BIND(L_done);

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
#define CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
class G1BarrierSetAssembler;
#endif // CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
#define CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
class BarrierSetAssembler;
#endif // CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
#define CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
class CardTableBarrierSetAssembler;
#endif // CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
#define CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
class ModRefBarrierSetAssembler;
#endif // CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.inline.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
@ -32,9 +33,12 @@
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/macros.hpp"
G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
CardTableBarrierSet(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(),
card_table,
BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
_dcqs(JavaThread::dirty_card_queue_set())
{ }
@ -53,11 +57,26 @@ void G1BarrierSet::enqueue(oop pre_val) {
}
}
void G1BarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
bs->write_ref_array_pre(dst, length, false);
}
void G1BarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
bs->write_ref_array_pre(dst, length, false);
}
void G1BarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
bs->G1BarrierSet::write_ref_array(dst, length);
}
template <class T> void
G1BarrierSet::write_ref_array_pre_work(T* dst, int count) {
G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;
T* elem_ptr = dst;
for (int i = 0; i < count; i++, elem_ptr++) {
for (size_t i = 0; i < count; i++, elem_ptr++) {
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
@ -65,13 +84,13 @@ G1BarrierSet::write_ref_array_pre_work(T* dst, int count) {
}
}
void G1BarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
void G1BarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
if (!dest_uninitialized) {
write_ref_array_pre_work(dst, count);
}
}
void G1BarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
void G1BarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
if (!dest_uninitialized) {
write_ref_array_pre_work(dst, count);
}

View File

@ -49,9 +49,13 @@ class G1BarrierSet: public CardTableBarrierSet {
static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
template <class T> void write_ref_array_pre_work(T* dst, int count);
virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
template <class T> void write_ref_array_pre_work(T* dst, size_t count);
virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
virtual void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
static void write_ref_array_post_entry(HeapWord* dst, size_t length);
template <DecoratorSet decorators, typename T>
void write_ref_field_pre(T* field);

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP
#define SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP
#include "utilities/macros.hpp"
#include CPU_HEADER(gc/g1/g1BarrierSetAssembler)
#endif // SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,24 +23,6 @@
*/
#include "precompiled.hpp"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "gc/shared/barrierSet.hpp"
BarrierSet* BarrierSet::_bs = NULL;
// count is number of array elements being written
void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large");
if (UseCompressedOops) {
Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count, false);
} else {
Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count, false);
}
}
// count is number of array elements being written
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
// simply delegate to instance method
Universe::heap()->barrier_set()->write_ref_array(start, count);
}

View File

@ -31,8 +31,10 @@
#include "oops/accessBackend.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/fakeRttiSupport.hpp"
#include "utilities/macros.hpp"
class JavaThread;
class BarrierSetAssembler;
// This class provides the interface between a barrier implementation and
// the rest of the system.
@ -67,6 +69,7 @@ protected:
private:
FakeRtti _fake_rtti;
BarrierSetAssembler* _barrier_set_assembler;
public:
// Metafunction mapping a class derived from BarrierSet to the
@ -87,28 +90,17 @@ public:
// End of fake RTTI support.
protected:
BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
BarrierSet(BarrierSetAssembler* barrier_set_assembler, const FakeRtti& fake_rtti) :
_fake_rtti(fake_rtti),
_barrier_set_assembler(barrier_set_assembler) { }
~BarrierSet() { }
template <class BarrierSetAssemblerT>
BarrierSetAssembler* make_barrier_set_assembler() {
return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
}
public:
// Operations on arrays, or general regions (e.g., for "clone") may be
// optimized by some barriers.
// Below length is the # array elements being written
virtual void write_ref_array_pre(oop* dst, int length,
bool dest_uninitialized = false) {}
virtual void write_ref_array_pre(narrowOop* dst, int length,
bool dest_uninitialized = false) {}
// Below count is the # array elements being written, starting
// at the address "start", which may not necessarily be HeapWord-aligned
inline void write_ref_array(HeapWord* start, size_t count);
// Static versions, suitable for calling from generated code;
// count is # array elements being written, starting with "start",
// which may not necessarily be HeapWord-aligned.
static void static_write_ref_array_pre(HeapWord* start, size_t count);
static void static_write_ref_array_post(HeapWord* start, size_t count);
// Support for optimizing compilers to call the barrier set on slow path allocations
// that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
// The allocation is safe to use iff it returns true. If not, the slow-path allocation
@ -119,15 +111,17 @@ public:
virtual void on_thread_detach(JavaThread* thread) {}
virtual void make_parsable(JavaThread* thread) {}
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
// Print a description of the memory for the barrier set
virtual void print_on(outputStream* st) const = 0;
static void set_bs(BarrierSet* bs) { _bs = bs; }
BarrierSetAssembler* barrier_set_assembler() {
assert(_barrier_set_assembler != NULL, "should be set");
return _barrier_set_assembler;
}
// The AccessBarrier of a BarrierSet subclass is called by the Access API
// (cf. oops/access.hpp) to perform decorated accesses. GC implementations
// may override these default access operations by declaring an

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
#include "gc/shared/barrierSet.hpp"
#include "utilities/align.hpp"
// count is number of array elements being written
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large");
HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
// In the case of compressed oops, start and end may potentially be misaligned;
// so we need to conservatively align the first downward (this is not
// strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. If this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this
// interface, so it is "exactly precise" (if i may be allowed the adverbial
// redundancy for emphasis) and does not include narrow oop slots not
// included in the original write interval.
HeapWord* aligned_start = align_down(start, HeapWordSize);
HeapWord* aligned_end = align_up (end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned
assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
"Expected heap word alignment of start and end");
write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP
#define SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP
#include "utilities/macros.hpp"
#include CPU_HEADER(gc/shared/barrierSetAssembler)
#endif // SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
@ -39,16 +40,18 @@
// enumerate ref fields that have been modified (since the last
// enumeration.)
CardTableBarrierSet::CardTableBarrierSet(
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)),
CardTableBarrierSet::CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
ModRefBarrierSet(barrier_set_assembler,
fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)),
_defer_initial_card_mark(false),
_card_table(card_table)
{}
CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) :
ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)),
ModRefBarrierSet(make_barrier_set_assembler<CardTableBarrierSetAssembler>(),
BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)),
_defer_initial_card_mark(false),
_card_table(card_table)
{}

View File

@ -52,7 +52,9 @@ class CardTableBarrierSet: public ModRefBarrierSet {
bool _defer_initial_card_mark;
CardTable* _card_table;
CardTableBarrierSet(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti);
public:
CardTableBarrierSet(CardTable* card_table);
@ -66,7 +68,6 @@ class CardTableBarrierSet: public ModRefBarrierSet {
invalidate(mr);
}
protected:
void write_ref_array_work(MemRegion mr);
public:

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP
#define SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP
#include "utilities/macros.hpp"
#include CPU_HEADER(gc/shared/cardTableBarrierSetAssembler)
#endif // SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP

View File

@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcLocker.inline.hpp"

View File

@ -32,8 +32,10 @@ class Klass;
class ModRefBarrierSet: public BarrierSet {
protected:
ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
: BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
ModRefBarrierSet(BarrierSetAssembler* barrier_set_assembler,
const BarrierSet::FakeRtti& fake_rtti)
: BarrierSet(barrier_set_assembler,
fake_rtti.add_tag(BarrierSet::ModRef)) { }
~ModRefBarrierSet() { }
public:
@ -47,6 +49,22 @@ public:
virtual void invalidate(MemRegion mr) = 0;
virtual void write_region(MemRegion mr) = 0;
// Operations on arrays, or general regions (e.g., for "clone") may be
// optimized by some barriers.
// Below length is the # array elements being written
virtual void write_ref_array_pre(oop* dst, size_t length,
bool dest_uninitialized = false) {}
virtual void write_ref_array_pre(narrowOop* dst, size_t length,
bool dest_uninitialized = false) {}
// Below count is the # array elements being written, starting
// at the address "start", which may not necessarily be HeapWord-aligned
inline void write_ref_array(HeapWord* start, size_t count);
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
// The ModRef abstraction introduces pre and post barriers
template <DecoratorSet decorators, typename BarrierSetT>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -25,12 +25,34 @@
#ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
#define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/modRefBarrierSet.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.hpp"
// count is number of array elements being written
void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
// In the case of compressed oops, start and end may potentially be misaligned;
// so we need to conservatively align the first downward (this is not
// strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. If this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this
// interface, so it is "exactly precise" (if i may be allowed the adverbial
// redundancy for emphasis) and does not include narrow oop slots not
// included in the original write interval.
HeapWord* aligned_start = align_down(start, HeapWordSize);
HeapWord* aligned_end = align_up (end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned
assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
"Expected heap word alignment of start and end");
write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
@ -73,7 +95,7 @@ oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// Optimized covariant case
bs->write_ref_array_pre(dst, (int)length,
bs->write_ref_array_pre(dst, length,
HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value);
Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
bs->write_ref_array((HeapWord*)dst, length);

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP
#define SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP
#include "utilities/macros.hpp"
#include CPU_HEADER(gc/shared/modRefBarrierSetAssembler)
#endif // SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP