8031818: Experimental VM flag for enforcing safe object construction
-XX:+AlwaysSafeConstructors to unconditionally emit the trailing constructor barrier. Reviewed-by: kvn, roland
This commit is contained in:
parent
c4bd0f58d3
commit
1d10b6813e
@ -1436,7 +1436,7 @@ void GraphBuilder::method_return(Value x) {
|
||||
|
||||
bool need_mem_bar = false;
|
||||
if (method()->name() == ciSymbol::object_initializer_name() &&
|
||||
scope()->wrote_final()) {
|
||||
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) {
|
||||
need_mem_bar = true;
|
||||
}
|
||||
|
||||
@ -1550,6 +1550,10 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
scope()->set_wrote_final();
|
||||
}
|
||||
|
||||
if (code == Bytecodes::_putfield) {
|
||||
scope()->set_wrote_fields();
|
||||
}
|
||||
|
||||
const int offset = !needs_patching ? field->offset() : -1;
|
||||
switch (code) {
|
||||
case Bytecodes::_getstatic: {
|
||||
|
@ -142,6 +142,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
|
||||
_number_of_locks = 0;
|
||||
_monitor_pairing_ok = method->has_balanced_monitors();
|
||||
_wrote_final = false;
|
||||
_wrote_fields = false;
|
||||
_start = NULL;
|
||||
|
||||
if (osr_bci == -1) {
|
||||
|
@ -150,6 +150,7 @@ class IRScope: public CompilationResourceObj {
|
||||
int _number_of_locks; // the number of monitor lock slots needed
|
||||
bool _monitor_pairing_ok; // the monitor pairing info
|
||||
bool _wrote_final; // has written final field
|
||||
bool _wrote_fields; // has written fields
|
||||
BlockBegin* _start; // the start block, successsors are method entries
|
||||
|
||||
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
|
||||
@ -184,6 +185,9 @@ class IRScope: public CompilationResourceObj {
|
||||
BlockBegin* start() const { return _start; }
|
||||
void set_wrote_final() { _wrote_final = true; }
|
||||
bool wrote_final () const { return _wrote_final; }
|
||||
void set_wrote_fields() { _wrote_fields = true; }
|
||||
bool wrote_fields () const { return _wrote_fields; }
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -338,6 +338,8 @@ class Parse : public GraphKit {
|
||||
GraphKit _exits; // Record all normal returns and throws here.
|
||||
bool _wrote_final; // Did we write a final field?
|
||||
bool _wrote_volatile; // Did we write a volatile field?
|
||||
bool _wrote_stable; // Did we write a @Stable field?
|
||||
bool _wrote_fields; // Did we write any field?
|
||||
bool _count_invocations; // update and test invocation counter
|
||||
bool _method_data_update; // update method data oop
|
||||
Node* _alloc_with_final; // An allocation node with final field
|
||||
@ -383,6 +385,10 @@ class Parse : public GraphKit {
|
||||
void set_wrote_final(bool z) { _wrote_final = z; }
|
||||
bool wrote_volatile() const { return _wrote_volatile; }
|
||||
void set_wrote_volatile(bool z) { _wrote_volatile = z; }
|
||||
bool wrote_stable() const { return _wrote_stable; }
|
||||
void set_wrote_stable(bool z) { _wrote_stable = z; }
|
||||
bool wrote_fields() const { return _wrote_fields; }
|
||||
void set_wrote_fields(bool z) { _wrote_fields = z; }
|
||||
bool count_invocations() const { return _count_invocations; }
|
||||
bool method_data_update() const { return _method_data_update; }
|
||||
Node* alloc_with_final() const { return _alloc_with_final; }
|
||||
|
@ -391,6 +391,8 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Pars
|
||||
_depth = 1 + (caller->has_method() ? caller->depth() : 0);
|
||||
_wrote_final = false;
|
||||
_wrote_volatile = false;
|
||||
_wrote_stable = false;
|
||||
_wrote_fields = false;
|
||||
_alloc_with_final = NULL;
|
||||
_entry_bci = InvocationEntryBci;
|
||||
_tf = NULL;
|
||||
@ -908,26 +910,35 @@ void Parse::do_exits() {
|
||||
Node* iophi = _exits.i_o();
|
||||
_exits.set_i_o(gvn().transform(iophi));
|
||||
|
||||
// On PPC64, also add MemBarRelease for constructors which write
|
||||
// volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
|
||||
// is set on PPC64, no sync instruction is issued after volatile
|
||||
// stores. We want to quarantee the same behaviour as on platforms
|
||||
// with total store order, although this is not required by the Java
|
||||
// memory model. So as with finals, we add a barrier here.
|
||||
if (wrote_final() PPC64_ONLY(|| (wrote_volatile() && method()->is_initializer()))) {
|
||||
// This method (which must be a constructor by the rules of Java)
|
||||
// wrote a final. The effects of all initializations must be
|
||||
// committed to memory before any code after the constructor
|
||||
// publishes the reference to the newly constructor object.
|
||||
// Rather than wait for the publication, we simply block the
|
||||
// writes here. Rather than put a barrier on only those writes
|
||||
// which are required to complete, we force all writes to complete.
|
||||
//
|
||||
// "All bets are off" unless the first publication occurs after a
|
||||
// normal return from the constructor. We do not attempt to detect
|
||||
// such unusual early publications. But no barrier is needed on
|
||||
// exceptional returns, since they cannot publish normally.
|
||||
//
|
||||
// Figure out if we need to emit the trailing barrier. The barrier is only
|
||||
// needed in the constructors, and only in three cases:
|
||||
//
|
||||
// 1. The constructor wrote a final. The effects of all initializations
|
||||
// must be committed to memory before any code after the constructor
|
||||
// publishes the reference to the newly constructed object. Rather
|
||||
// than wait for the publication, we simply block the writes here.
|
||||
// Rather than put a barrier on only those writes which are required
|
||||
// to complete, we force all writes to complete.
|
||||
//
|
||||
// 2. On PPC64, also add MemBarRelease for constructors which write
|
||||
// volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
|
||||
// is set on PPC64, no sync instruction is issued after volatile
|
||||
// stores. We want to guarantee the same behavior as on platforms
|
||||
// with total store order, although this is not required by the Java
|
||||
// memory model. So as with finals, we add a barrier here.
|
||||
//
|
||||
// 3. Experimental VM option is used to force the barrier if any field
|
||||
// was written out in the constructor.
|
||||
//
|
||||
// "All bets are off" unless the first publication occurs after a
|
||||
// normal return from the constructor. We do not attempt to detect
|
||||
// such unusual early publications. But no barrier is needed on
|
||||
// exceptional returns, since they cannot publish normally.
|
||||
//
|
||||
if (method()->is_initializer() &&
|
||||
(wrote_final() ||
|
||||
PPC64_ONLY(wrote_volatile() ||)
|
||||
(AlwaysSafeConstructors && wrote_fields()))) {
|
||||
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto && (Verbose || WizardMode)) {
|
||||
@ -937,6 +948,19 @@ void Parse::do_exits() {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Any method can write a @Stable field; insert memory barriers after
|
||||
// those also. If there is a predecessor allocation node, bind the
|
||||
// barrier there.
|
||||
if (wrote_stable()) {
|
||||
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto && (Verbose || WizardMode)) {
|
||||
method()->print_name();
|
||||
tty->print_cr(" writes @Stable and needs a memory barrier");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
|
||||
// transform each slice of the original memphi:
|
||||
mms.set_memory(_gvn.transform(mms.memory()));
|
||||
|
@ -334,13 +334,23 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
|
||||
}
|
||||
}
|
||||
|
||||
if (is_field) {
|
||||
set_wrote_fields(true);
|
||||
}
|
||||
|
||||
// If the field is final, the rules of Java say we are in <init> or <clinit>.
|
||||
// Note the presence of writes to final non-static fields, so that we
|
||||
// can insert a memory barrier later on to keep the writes from floating
|
||||
// out of the constructor.
|
||||
// Any method can write a @Stable field; insert memory barriers after those also.
|
||||
if (is_field && (field->is_final() || field->is_stable())) {
|
||||
set_wrote_final(true);
|
||||
if (field->is_final()) {
|
||||
set_wrote_final(true);
|
||||
}
|
||||
if (field->is_stable()) {
|
||||
set_wrote_stable(true);
|
||||
}
|
||||
|
||||
// Preserve allocation ptr to create precedent edge to it in membar
|
||||
// generated on exit from constructor.
|
||||
if (C->eliminate_boxing() &&
|
||||
|
@ -535,6 +535,9 @@ class CommandLineFlags {
|
||||
develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
|
||||
"Clean the chunk pool asynchronously") \
|
||||
\
|
||||
experimental(bool, AlwaysSafeConstructors, false, \
|
||||
"Force safe construction, as if all fields are final.") \
|
||||
\
|
||||
/* Temporary: See 6948537 */ \
|
||||
experimental(bool, UseMemSetInBOT, true, \
|
||||
"(Unstable) uses memset in BOT updates in GC code") \
|
||||
|
Loading…
x
Reference in New Issue
Block a user