8189355: Cleanup of BarrierSet barrier functions

Reviewed-by: shade, kbarrett, pliden, rkennke
This commit is contained in:
Erik Österlund 2017-10-19 11:25:55 +02:00
parent 9c5e52d73d
commit 29eea5d4a1
11 changed files with 64 additions and 221 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2867,46 +2867,51 @@ class StubGenerator: public StubCodeGenerator {
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs.
void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->has_write_ref_pre_barrier()) {
assert(bs->has_write_ref_array_pre_opt(),
"Else unsupported barrier set.");
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
{
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");
BLOCK_COMMENT("PreBarrier");
BLOCK_COMMENT("PreBarrier");
#ifdef AARCH64
callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}
callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}
#else
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
__ push(saved_regs | R9ifScratched);
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
__ push(saved_regs | R9ifScratched);
#endif // AARCH64
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
if (addr != R0) {
assert_different_registers(count, R0);
__ mov(R0, addr);
}
#ifdef AARCH64
__ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
__ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
#else
if (count != R1) {
__ mov(R1, count);
}
if (count != R1) {
__ mov(R1, count);
}
#endif // AARCH64
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
#ifdef AARCH64
for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
__ raw_pop(as_Register(i), as_Register(i+1));
}
for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
__ raw_pop(as_Register(i), as_Register(i+1));
}
#else
__ pop(saved_regs | R9ifScratched);
__ pop(saved_regs | R9ifScratched);
#endif // AARCH64
}
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
break;
default:
ShouldNotReachHere();
}
}
#endif // INCLUDE_ALL_GCS

View File

@ -1372,8 +1372,6 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
// barrier. The assert will fail if this is not the case.
// Note that we use the non-virtual inlineable variant of write_ref_array.
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
if (src == dst) {
// same object, no check
bs->write_ref_array_pre(dst_addr, length);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,8 +54,6 @@ public:
// pre-marking object graph.
static void enqueue(oop pre_val);
virtual bool has_write_ref_pre_barrier() { return true; }
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
@ -63,9 +61,6 @@ public:
// These are the more general virtual versions.
inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
virtual void write_ref_field_pre_work(void* field, oop new_val) {
guarantee(false, "Not needed");
}
template <class T> void write_ref_array_pre_work(T* dst, int count);
virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,50 +80,11 @@ public:
// End of fake RTTI support.
public:
enum Flags {
None = 0,
TargetUninitialized = 1
};
protected:
// Some barrier sets create tables whose elements correspond to parts of
// the heap; the CardTableModRefBS is an example. Such barrier sets will
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. At most one covered
// region per generation is needed.
static const int _max_covered_regions = 2;
BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
~BarrierSet() { }
public:
// These operations indicate what kind of barriers the BarrierSet has.
virtual bool has_read_ref_barrier() = 0;
virtual bool has_read_prim_barrier() = 0;
virtual bool has_write_ref_barrier() = 0;
virtual bool has_write_ref_pre_barrier() = 0;
virtual bool has_write_prim_barrier() = 0;
// These functions indicate whether a particular access of the given
// kinds requires a barrier.
virtual bool read_ref_needs_barrier(void* field) = 0;
virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
juint val1, juint val2) = 0;
// The first four operations provide a direct implementation of the
// barrier set. An interpreter loop, for example, could call these
// directly, as appropriate.
// Invoke the barrier, if any, necessary when reading the given ref field.
virtual void read_ref_field(void* field) = 0;
// Invoke the barrier, if any, necessary when reading the given primitive
// "field" of "bytes" bytes in "obj".
virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
// Invoke the barrier, if any, necessary when writing "new_val" into the
// ref field at "offset" in "obj".
// (For efficiency reasons, this operation is specialized for certain
@ -131,48 +92,19 @@ public:
// virtual "_work" function below, which must implement the barrier.)
// First the pre-write versions...
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
private:
// Helper for write_ref_field_pre and friends, testing for specialized cases.
bool devirtualize_reference_writes() const;
// Keep this private so as to catch violations at build time.
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
protected:
virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
public:
// ...then the post-write version.
inline void write_ref_field(void* field, oop new_val, bool release = false);
protected:
virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
public:
// Invoke the barrier, if any, necessary when writing the "bytes"-byte
// value(s) "val1" (and "val2") into the primitive "field".
virtual void write_prim_field(HeapWord* field, size_t bytes,
juint val1, juint val2) = 0;
// Operations on arrays, or general regions (e.g., for "clone") may be
// optimized by some barriers.
// The first six operations tell whether such an optimization exists for
// the particular barrier.
virtual bool has_read_ref_array_opt() = 0;
virtual bool has_read_prim_array_opt() = 0;
virtual bool has_write_ref_array_pre_opt() { return true; }
virtual bool has_write_ref_array_opt() = 0;
virtual bool has_write_prim_array_opt() = 0;
virtual bool has_read_region_opt() = 0;
virtual bool has_write_region_opt() = 0;
// These operations should assert false unless the corresponding operation
// above returns true. Otherwise, they should perform an appropriate
// barrier for an array whose elements are all in the given memory region.
virtual void read_ref_array(MemRegion mr) = 0;
virtual void read_prim_array(MemRegion mr) = 0;
// Below length is the # array elements being written
virtual void write_ref_array_pre(oop* dst, int length,
bool dest_uninitialized = false) {}
@ -193,17 +125,16 @@ public:
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
virtual void write_prim_array(MemRegion mr) = 0;
virtual void read_region(MemRegion mr) = 0;
// (For efficiency reasons, this operation is specialized for certain
// barrier types. Semantically, it should be thought of as a call to the
// virtual "_work" function below, which must implement the barrier.)
void write_region(MemRegion mr);
protected:
virtual void write_region_work(MemRegion mr) = 0;
public:
// Inform the BarrierSet that the the covered heap region that starts
// with "base" has been changed to have the given size (possibly from 0,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,37 +26,15 @@
#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "utilities/align.hpp"
// Inline functions of BarrierSet, which de-virtualize certain
// performance-critical calls when the barrier is the most common
// card-table kind.
inline bool BarrierSet::devirtualize_reference_writes() const {
switch (kind()) {
case CardTableForRS:
case CardTableExtension:
return true;
default:
return false;
}
}
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
if (devirtualize_reference_writes()) {
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
} else {
write_ref_field_pre_work(field, new_val);
}
write_ref_field_pre_work(field, new_val);
}
void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
if (devirtualize_reference_writes()) {
barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
} else {
write_ref_field_work(field, new_val, release);
}
write_ref_field_work(field, new_val, release);
}
// count is number of array elements being written
@ -84,11 +62,7 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
inline void BarrierSet::write_region(MemRegion mr) {
if (devirtualize_reference_writes()) {
barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
} else {
write_region_work(mr);
}
write_region_work(mr);
}
#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,7 +73,15 @@ class CardTableModRefBS: public ModRefBarrierSet {
size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array
// Some barrier sets create tables whose elements correspond to parts of
// the heap; the CardTableModRefBS is an example. Such barrier sets will
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. At most one covered
// region per generation is needed.
static const int _max_covered_regions = 2;
int _cur_covered_regions;
// The covered regions should be in address order.
MemRegion* _covered;
// The committed regions correspond one-to-one to the covered regions.
@ -89,7 +97,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
// uncommit the MemRegion for that page.
MemRegion _guard_region;
protected:
inline size_t compute_byte_map_size();
// Finds and return the index of the region, if any, to which the given
@ -135,7 +142,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
return byte_for(p) + 1;
}
protected:
// Dirty the bytes corresponding to "mr" (not all of which must be
// covered.)
void dirty_MemRegion(MemRegion mr);
@ -144,7 +150,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
// all of which must be covered.)
void clear_MemRegion(MemRegion mr);
public:
public:
// Constants
enum SomePublicConstants {
card_shift = 9,
@ -163,8 +169,6 @@ public:
// *** Barrier set functions.
bool has_write_ref_pre_barrier() { return false; }
// Initialization utilities; covered_words is the size of the covered region
// in, um, words.
inline size_t cards_required(size_t covered_words) {
@ -173,8 +177,7 @@ public:
return words / card_size_in_words + 1;
}
protected:
protected:
CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
~CardTableModRefBS();
@ -185,29 +188,18 @@ protected:
void write_ref_field_work(oop obj, size_t offset, oop newVal);
virtual void write_ref_field_work(void* field, oop newVal, bool release);
public:
bool has_write_ref_array_opt() { return true; }
bool has_write_region_opt() { return true; }
inline void inline_write_region(MemRegion mr) {
dirty_MemRegion(mr);
}
protected:
protected:
void write_region_work(MemRegion mr) {
inline_write_region(mr);
}
public:
inline void inline_write_ref_array(MemRegion mr) {
dirty_MemRegion(mr);
}
protected:
void write_ref_array_work(MemRegion mr) {
inline_write_ref_array(mr);
}
public:
protected:
void write_ref_array_work(MemRegion mr) {
dirty_MemRegion(mr);
}
public:
bool is_aligned(HeapWord* addr) {
return is_card_aligned(addr);
}

View File

@ -347,7 +347,6 @@ void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
"Mismatch: multiple objects?");
}
BarrierSet* bs = barrier_set();
assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
bs->write_region(deferred);
// "Clear" the deferred_card_mark field
thread->set_deferred_card_mark(MemRegion());
@ -430,7 +429,6 @@ oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
} else {
// Do the card mark
BarrierSet* bs = barrier_set();
assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
bs->write_region(mr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,57 +35,12 @@ class OopClosure;
class Generation;
class ModRefBarrierSet: public BarrierSet {
public:
// Barriers only on ref writes.
bool has_read_ref_barrier() { return false; }
bool has_read_prim_barrier() { return false; }
bool has_write_ref_barrier() { return true; }
bool has_write_prim_barrier() { return false; }
bool read_ref_needs_barrier(void* field) { return false; }
bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
juint val1, juint val2) { return false; }
void write_prim_field(oop obj, size_t offset, size_t bytes,
juint val1, juint val2) {}
void read_ref_field(void* field) {}
void read_prim_field(HeapWord* field, size_t bytes) {}
protected:
ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
: BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
~ModRefBarrierSet() { }
public:
void write_prim_field(HeapWord* field, size_t bytes,
juint val1, juint val2) {}
bool has_read_ref_array_opt() { return false; }
bool has_read_prim_array_opt() { return false; }
bool has_write_prim_array_opt() { return false; }
bool has_read_region_opt() { return false; }
// These operations should assert false unless the corresponding operation
// above returns true.
void read_ref_array(MemRegion mr) {
assert(false, "can't call");
}
void read_prim_array(MemRegion mr) {
assert(false, "can't call");
}
void write_prim_array(MemRegion mr) {
assert(false, "can't call");
}
void read_region(MemRegion mr) {
assert(false, "can't call");
}
// Causes all refs in "mr" to be assumed to be modified.
virtual void invalidate(MemRegion mr) = 0;

View File

@ -226,8 +226,6 @@ template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
// For performance reasons, we assume we are that the write barrier we
// are using has optimized modes for arrays of references. At least one
// of the asserts below will fail if this is not the case.
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
if (s == d) {
// since source and destination are equal we do not need conversion checks.

View File

@ -669,7 +669,6 @@ JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle))
// Store check (mark entire object and let gc sort it out)
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
Handle new_obj(THREAD, new_obj_oop);

View File

@ -381,14 +381,12 @@ static void gen_arraycopy_barrier_pre(oop* dest, size_t count, bool dest_uniniti
assert(count != 0, "count should be non-zero");
assert(count <= (size_t)max_intx, "count too large");
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
bs->write_ref_array_pre(dest, (int)count, dest_uninitialized);
}
static void gen_arraycopy_barrier(oop* dest, size_t count) {
assert(count != 0, "count should be non-zero");
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)dest, count);
}