This commit is contained in:
Andrew Haley 2016-02-24 12:38:50 +00:00
commit 087b888832
41 changed files with 1960 additions and 559 deletions

View File

@ -7236,6 +7236,7 @@ instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlags
instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
predicate(VM_Version::supports_cx8());
match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
@ -7249,6 +7250,7 @@ instruct compareAndSwapL( rRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL
instruct compareAndSwapP( rRegI res, pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
@ -7261,6 +7263,7 @@ instruct compareAndSwapP( rRegI res, pRegP mem_ptr, eAXRegP oldval, eCXRegP new
instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
@ -7271,6 +7274,31 @@ instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newv
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeL( eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
predicate(VM_Version::supports_cx8());
match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchg8(mem_ptr) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeP( pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{
match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchg(mem_ptr) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeI( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchg(mem_ptr) );
ins_pipe( pipe_cmpxchg );
%}
instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem add));

View File

@ -7281,6 +7281,7 @@ instruct compareAndSwapP(rRegI res,
%{
predicate(VM_Version::supports_cx8());
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgq $mem_ptr,$newval\t# "
@ -7305,6 +7306,7 @@ instruct compareAndSwapL(rRegI res,
%{
predicate(VM_Version::supports_cx8());
match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgq $mem_ptr,$newval\t# "
@ -7328,6 +7330,7 @@ instruct compareAndSwapI(rRegI res,
rFlagsReg cr)
%{
match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgl $mem_ptr,$newval\t# "
@ -7351,6 +7354,7 @@ instruct compareAndSwapN(rRegI res,
rax_RegN oldval, rRegN newval,
rFlagsReg cr) %{
match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgl $mem_ptr,$newval\t# "
@ -7368,6 +7372,83 @@ instruct compareAndSwapN(rRegI res,
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeI(
memory mem_ptr,
rax_RegI oldval, rRegI newval,
rFlagsReg cr)
%{
match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgl $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeL(
memory mem_ptr,
rax_RegL oldval, rRegL newval,
rFlagsReg cr)
%{
predicate(VM_Version::supports_cx8());
match(Set oldval (CompareAndExchangeL mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgq $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem_wide(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeN(
memory mem_ptr,
rax_RegN oldval, rRegN newval,
rFlagsReg cr) %{
match(Set oldval (CompareAndExchangeN mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgl $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeP(
memory mem_ptr,
rax_RegP oldval, rRegP newval,
rFlagsReg cr)
%{
predicate(VM_Version::supports_cx8());
match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgq $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem_wide(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem add));

View File

@ -3491,6 +3491,8 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"LoadPLocked",
"StorePConditional", "StoreIConditional", "StoreLConditional",
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
"WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN",
"CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN",
"StoreCM",
"ClearArray",
"GetAndAddI", "GetAndSetI", "GetAndSetP",

View File

@ -41,58 +41,37 @@ ciConstantPoolCache::ciConstantPoolCache(Arena* arena,
_keys = new (arena) GrowableArray<int>(arena, expected_size, 0, 0);
}
int ciConstantPoolCache::key_compare(const int& key, const int& elt) {
if (key < elt) return -1;
else if (key > elt) return 1;
else return 0;
}
// ------------------------------------------------------------------
// ciConstantPoolCache::get
//
// Get the entry at some index
void* ciConstantPoolCache::get(int index) {
ASSERT_IN_VM;
int pos = find(index);
if (pos >= _keys->length() ||
_keys->at(pos) != index) {
bool found = false;
int pos = _keys->find_sorted<int, ciConstantPoolCache::key_compare>(index, found);
if (!found) {
// This element is not present in the cache.
return NULL;
}
return _elements->at(pos);
}
// ------------------------------------------------------------------
// ciConstantPoolCache::find
//
// Use binary search to find the position of this index in the cache.
// If there is no entry in the cache corresponding to this oop, return
// the position at which the index would be inserted.
int ciConstantPoolCache::find(int key) {
int min = 0;
int max = _keys->length()-1;
while (max >= min) {
int mid = (max + min) / 2;
int value = _keys->at(mid);
if (value < key) {
min = mid + 1;
} else if (value > key) {
max = mid - 1;
} else {
return mid;
}
}
return min;
}
// ------------------------------------------------------------------
// ciConstantPoolCache::insert
//
// Insert a ciObject into the table at some index.
void ciConstantPoolCache::insert(int index, void* elem) {
int i;
int pos = find(index);
for (i = _keys->length()-1; i >= pos; i--) {
_keys->at_put_grow(i+1, _keys->at(i));
_elements->at_put_grow(i+1, _elements->at(i));
}
_keys->at_put_grow(pos, index);
_elements->at_put_grow(pos, elem);
bool found = false;
int pos = _keys->find_sorted<int, ciConstantPoolCache::key_compare>(index, found);
assert(!found, "duplicate");
_keys->insert_before(pos, index);
_elements->insert_before(pos, elem);
}
// ------------------------------------------------------------------

View File

@ -38,7 +38,7 @@ private:
GrowableArray<int>* _keys;
GrowableArray<void*>* _elements;
int find(int index);
static int key_compare(const int& key, const int& elt);
public:
ciConstantPoolCache(Arena* arena, int expected_size);

View File

@ -260,6 +260,13 @@ ciObject* ciObjectFactory::get(oop key) {
return new_object;
}
int ciObjectFactory::metadata_compare(Metadata* const& key, ciMetadata* const& elt) {
Metadata* value = elt->constant_encoding();
if (key < value) return -1;
else if (key > value) return 1;
else return 0;
}
// ------------------------------------------------------------------
// ciObjectFactory::get_metadata
//
@ -280,7 +287,8 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
}
#endif // ASSERT
int len = _ci_metadata->length();
int index = find(key, _ci_metadata);
bool found = false;
int index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
#ifdef ASSERT
if (CIObjectFactoryVerify) {
for (int i=0; i<_ci_metadata->length(); i++) {
@ -290,7 +298,8 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
}
}
#endif
if (!is_found_at(index, key, _ci_metadata)) {
if (!found) {
// The ciMetadata does not yet exist. Create it and insert it
// into the cache.
ciMetadata* new_object = create_new_metadata(key);
@ -300,10 +309,10 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
if (len != _ci_metadata->length()) {
// creating the new object has recursively entered new objects
// into the table. We need to recompute our index.
index = find(key, _ci_metadata);
index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
}
assert(!is_found_at(index, key, _ci_metadata), "no double insert");
insert(index, new_object, _ci_metadata);
assert(!found, "no double insert");
_ci_metadata->insert_before(index, new_object);
return new_object;
}
return _ci_metadata->at(index)->as_metadata();
@ -655,60 +664,6 @@ void ciObjectFactory::init_ident_of(ciBaseObject* obj) {
obj->set_ident(_next_ident++);
}
// ------------------------------------------------------------------
// ciObjectFactory::find
//
// Use binary search to find the position of this oop in the cache.
// If there is no entry in the cache corresponding to this oop, return
// the position at which the oop should be inserted.
int ciObjectFactory::find(Metadata* key, GrowableArray<ciMetadata*>* objects) {
int min = 0;
int max = objects->length()-1;
// print_contents();
while (max >= min) {
int mid = (max + min) / 2;
Metadata* value = objects->at(mid)->constant_encoding();
if (value < key) {
min = mid + 1;
} else if (value > key) {
max = mid - 1;
} else {
return mid;
}
}
return min;
}
// ------------------------------------------------------------------
// ciObjectFactory::is_found_at
//
// Verify that the binary seach found the given key.
bool ciObjectFactory::is_found_at(int index, Metadata* key, GrowableArray<ciMetadata*>* objects) {
return (index < objects->length() &&
objects->at(index)->constant_encoding() == key);
}
// ------------------------------------------------------------------
// ciObjectFactory::insert
//
// Insert a ciObject into the table at some index.
void ciObjectFactory::insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects) {
int len = objects->length();
if (len == index) {
objects->append(obj);
} else {
objects->append(objects->at(len-1));
int pos;
for (pos = len-2; pos >= index; pos--) {
objects->at_put(pos+1,objects->at(pos));
}
objects->at_put(index, obj);
}
}
static ciObjectFactory::NonPermObject* emptyBucket = NULL;
// ------------------------------------------------------------------

View File

@ -68,9 +68,7 @@ private:
NonPermObject* _non_perm_bucket[NON_PERM_BUCKETS];
int _non_perm_count;
int find(Metadata* key, GrowableArray<ciMetadata*>* objects);
bool is_found_at(int index, Metadata* key, GrowableArray<ciMetadata*>* objects);
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
static int metadata_compare(Metadata* const& key, ciMetadata* const& elt);
ciObject* create_new_object(oop o);
ciMetadata* create_new_metadata(Metadata* o);

View File

@ -544,6 +544,42 @@ bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method) {
case vmIntrinsics::_putLongVolatile:
case vmIntrinsics::_putFloatVolatile:
case vmIntrinsics::_putDoubleVolatile:
case vmIntrinsics::_getObjectAcquire:
case vmIntrinsics::_getBooleanAcquire:
case vmIntrinsics::_getByteAcquire:
case vmIntrinsics::_getShortAcquire:
case vmIntrinsics::_getCharAcquire:
case vmIntrinsics::_getIntAcquire:
case vmIntrinsics::_getLongAcquire:
case vmIntrinsics::_getFloatAcquire:
case vmIntrinsics::_getDoubleAcquire:
case vmIntrinsics::_putObjectRelease:
case vmIntrinsics::_putBooleanRelease:
case vmIntrinsics::_putByteRelease:
case vmIntrinsics::_putShortRelease:
case vmIntrinsics::_putCharRelease:
case vmIntrinsics::_putIntRelease:
case vmIntrinsics::_putLongRelease:
case vmIntrinsics::_putFloatRelease:
case vmIntrinsics::_putDoubleRelease:
case vmIntrinsics::_getObjectOpaque:
case vmIntrinsics::_getBooleanOpaque:
case vmIntrinsics::_getByteOpaque:
case vmIntrinsics::_getShortOpaque:
case vmIntrinsics::_getCharOpaque:
case vmIntrinsics::_getIntOpaque:
case vmIntrinsics::_getLongOpaque:
case vmIntrinsics::_getFloatOpaque:
case vmIntrinsics::_getDoubleOpaque:
case vmIntrinsics::_putObjectOpaque:
case vmIntrinsics::_putBooleanOpaque:
case vmIntrinsics::_putByteOpaque:
case vmIntrinsics::_putShortOpaque:
case vmIntrinsics::_putCharOpaque:
case vmIntrinsics::_putIntOpaque:
case vmIntrinsics::_putLongOpaque:
case vmIntrinsics::_putFloatOpaque:
case vmIntrinsics::_putDoubleOpaque:
case vmIntrinsics::_getByte_raw:
case vmIntrinsics::_getShort_raw:
case vmIntrinsics::_getChar_raw:
@ -569,9 +605,27 @@ bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method) {
case vmIntrinsics::_loadFence:
case vmIntrinsics::_storeFence:
case vmIntrinsics::_fullFence:
case vmIntrinsics::_compareAndSwapObject:
case vmIntrinsics::_compareAndSwapLong:
case vmIntrinsics::_weakCompareAndSwapLong:
case vmIntrinsics::_weakCompareAndSwapLongAcquire:
case vmIntrinsics::_weakCompareAndSwapLongRelease:
case vmIntrinsics::_compareAndSwapInt:
case vmIntrinsics::_weakCompareAndSwapInt:
case vmIntrinsics::_weakCompareAndSwapIntAcquire:
case vmIntrinsics::_weakCompareAndSwapIntRelease:
case vmIntrinsics::_compareAndSwapObject:
case vmIntrinsics::_weakCompareAndSwapObject:
case vmIntrinsics::_weakCompareAndSwapObjectAcquire:
case vmIntrinsics::_weakCompareAndSwapObjectRelease:
case vmIntrinsics::_compareAndExchangeIntVolatile:
case vmIntrinsics::_compareAndExchangeIntAcquire:
case vmIntrinsics::_compareAndExchangeIntRelease:
case vmIntrinsics::_compareAndExchangeLongVolatile:
case vmIntrinsics::_compareAndExchangeLongAcquire:
case vmIntrinsics::_compareAndExchangeLongRelease:
case vmIntrinsics::_compareAndExchangeObjectVolatile:
case vmIntrinsics::_compareAndExchangeObjectAcquire:
case vmIntrinsics::_compareAndExchangeObjectRelease:
if (!InlineUnsafeOps) return true;
break;
case vmIntrinsics::_getShortUnaligned:

View File

@ -1146,6 +1146,64 @@
do_intrinsic(_putFloatVolatile, jdk_internal_misc_Unsafe, putFloatVolatile_name, putFloat_signature, F_RN) \
do_intrinsic(_putDoubleVolatile, jdk_internal_misc_Unsafe, putDoubleVolatile_name, putDouble_signature, F_RN) \
\
do_name(getObjectOpaque_name,"getObjectOpaque") do_name(putObjectOpaque_name,"putObjectOpaque") \
do_name(getBooleanOpaque_name,"getBooleanOpaque") do_name(putBooleanOpaque_name,"putBooleanOpaque") \
do_name(getByteOpaque_name,"getByteOpaque") do_name(putByteOpaque_name,"putByteOpaque") \
do_name(getShortOpaque_name,"getShortOpaque") do_name(putShortOpaque_name,"putShortOpaque") \
do_name(getCharOpaque_name,"getCharOpaque") do_name(putCharOpaque_name,"putCharOpaque") \
do_name(getIntOpaque_name,"getIntOpaque") do_name(putIntOpaque_name,"putIntOpaque") \
do_name(getLongOpaque_name,"getLongOpaque") do_name(putLongOpaque_name,"putLongOpaque") \
do_name(getFloatOpaque_name,"getFloatOpaque") do_name(putFloatOpaque_name,"putFloatOpaque") \
do_name(getDoubleOpaque_name,"getDoubleOpaque") do_name(putDoubleOpaque_name,"putDoubleOpaque") \
\
do_intrinsic(_getObjectOpaque, jdk_internal_misc_Unsafe, getObjectOpaque_name, getObject_signature, F_R) \
do_intrinsic(_getBooleanOpaque, jdk_internal_misc_Unsafe, getBooleanOpaque_name, getBoolean_signature, F_R) \
do_intrinsic(_getByteOpaque, jdk_internal_misc_Unsafe, getByteOpaque_name, getByte_signature, F_R) \
do_intrinsic(_getShortOpaque, jdk_internal_misc_Unsafe, getShortOpaque_name, getShort_signature, F_R) \
do_intrinsic(_getCharOpaque, jdk_internal_misc_Unsafe, getCharOpaque_name, getChar_signature, F_R) \
do_intrinsic(_getIntOpaque, jdk_internal_misc_Unsafe, getIntOpaque_name, getInt_signature, F_R) \
do_intrinsic(_getLongOpaque, jdk_internal_misc_Unsafe, getLongOpaque_name, getLong_signature, F_R) \
do_intrinsic(_getFloatOpaque, jdk_internal_misc_Unsafe, getFloatOpaque_name, getFloat_signature, F_R) \
do_intrinsic(_getDoubleOpaque, jdk_internal_misc_Unsafe, getDoubleOpaque_name, getDouble_signature, F_R) \
do_intrinsic(_putObjectOpaque, jdk_internal_misc_Unsafe, putObjectOpaque_name, putObject_signature, F_R) \
do_intrinsic(_putBooleanOpaque, jdk_internal_misc_Unsafe, putBooleanOpaque_name, putBoolean_signature, F_R) \
do_intrinsic(_putByteOpaque, jdk_internal_misc_Unsafe, putByteOpaque_name, putByte_signature, F_R) \
do_intrinsic(_putShortOpaque, jdk_internal_misc_Unsafe, putShortOpaque_name, putShort_signature, F_R) \
do_intrinsic(_putCharOpaque, jdk_internal_misc_Unsafe, putCharOpaque_name, putChar_signature, F_R) \
do_intrinsic(_putIntOpaque, jdk_internal_misc_Unsafe, putIntOpaque_name, putInt_signature, F_R) \
do_intrinsic(_putLongOpaque, jdk_internal_misc_Unsafe, putLongOpaque_name, putLong_signature, F_R) \
do_intrinsic(_putFloatOpaque, jdk_internal_misc_Unsafe, putFloatOpaque_name, putFloat_signature, F_R) \
do_intrinsic(_putDoubleOpaque, jdk_internal_misc_Unsafe, putDoubleOpaque_name, putDouble_signature, F_R) \
\
do_name(getObjectAcquire_name, "getObjectAcquire") do_name(putObjectRelease_name, "putObjectRelease") \
do_name(getBooleanAcquire_name, "getBooleanAcquire") do_name(putBooleanRelease_name, "putBooleanRelease") \
do_name(getByteAcquire_name, "getByteAcquire") do_name(putByteRelease_name, "putByteRelease") \
do_name(getShortAcquire_name, "getShortAcquire") do_name(putShortRelease_name, "putShortRelease") \
do_name(getCharAcquire_name, "getCharAcquire") do_name(putCharRelease_name, "putCharRelease") \
do_name(getIntAcquire_name, "getIntAcquire") do_name(putIntRelease_name, "putIntRelease") \
do_name(getLongAcquire_name, "getLongAcquire") do_name(putLongRelease_name, "putLongRelease") \
do_name(getFloatAcquire_name, "getFloatAcquire") do_name(putFloatRelease_name, "putFloatRelease") \
do_name(getDoubleAcquire_name, "getDoubleAcquire") do_name(putDoubleRelease_name, "putDoubleRelease") \
\
do_intrinsic(_getObjectAcquire, jdk_internal_misc_Unsafe, getObjectAcquire_name, getObject_signature, F_R) \
do_intrinsic(_getBooleanAcquire, jdk_internal_misc_Unsafe, getBooleanAcquire_name, getBoolean_signature, F_R) \
do_intrinsic(_getByteAcquire, jdk_internal_misc_Unsafe, getByteAcquire_name, getByte_signature, F_R) \
do_intrinsic(_getShortAcquire, jdk_internal_misc_Unsafe, getShortAcquire_name, getShort_signature, F_R) \
do_intrinsic(_getCharAcquire, jdk_internal_misc_Unsafe, getCharAcquire_name, getChar_signature, F_R) \
do_intrinsic(_getIntAcquire, jdk_internal_misc_Unsafe, getIntAcquire_name, getInt_signature, F_R) \
do_intrinsic(_getLongAcquire, jdk_internal_misc_Unsafe, getLongAcquire_name, getLong_signature, F_R) \
do_intrinsic(_getFloatAcquire, jdk_internal_misc_Unsafe, getFloatAcquire_name, getFloat_signature, F_R) \
do_intrinsic(_getDoubleAcquire, jdk_internal_misc_Unsafe, getDoubleAcquire_name, getDouble_signature, F_R) \
do_intrinsic(_putObjectRelease, jdk_internal_misc_Unsafe, putObjectRelease_name, putObject_signature, F_R) \
do_intrinsic(_putBooleanRelease, jdk_internal_misc_Unsafe, putBooleanRelease_name, putBoolean_signature, F_R) \
do_intrinsic(_putByteRelease, jdk_internal_misc_Unsafe, putByteRelease_name, putByte_signature, F_R) \
do_intrinsic(_putShortRelease, jdk_internal_misc_Unsafe, putShortRelease_name, putShort_signature, F_R) \
do_intrinsic(_putCharRelease, jdk_internal_misc_Unsafe, putCharRelease_name, putChar_signature, F_R) \
do_intrinsic(_putIntRelease, jdk_internal_misc_Unsafe, putIntRelease_name, putInt_signature, F_R) \
do_intrinsic(_putLongRelease, jdk_internal_misc_Unsafe, putLongRelease_name, putLong_signature, F_R) \
do_intrinsic(_putFloatRelease, jdk_internal_misc_Unsafe, putFloatRelease_name, putFloat_signature, F_R) \
do_intrinsic(_putDoubleRelease, jdk_internal_misc_Unsafe, putDoubleRelease_name, putDouble_signature, F_R) \
\
do_name(getShortUnaligned_name,"getShortUnaligned") do_name(putShortUnaligned_name,"putShortUnaligned") \
do_name(getCharUnaligned_name,"getCharUnaligned") do_name(putCharUnaligned_name,"putCharUnaligned") \
do_name(getIntUnaligned_name,"getIntUnaligned") do_name(putIntUnaligned_name,"putIntUnaligned") \
@ -1197,24 +1255,68 @@
do_intrinsic(_putDouble_raw, jdk_internal_misc_Unsafe, putDouble_name, putDouble_raw_signature, F_R) \
do_intrinsic(_putAddress_raw, jdk_internal_misc_Unsafe, putAddress_name, putAddress_raw_signature, F_R) \
\
do_intrinsic(_compareAndSwapObject, jdk_internal_misc_Unsafe, compareAndSwapObject_name, compareAndSwapObject_signature, F_R) \
do_name( compareAndSwapObject_name, "compareAndSwapObject") \
do_signature(compareAndSwapObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
do_intrinsic(_compareAndSwapLong, jdk_internal_misc_Unsafe, compareAndSwapLong_name, compareAndSwapLong_signature, F_R) \
do_name( compareAndSwapLong_name, "compareAndSwapLong") \
do_signature(compareAndSwapLong_signature, "(Ljava/lang/Object;JJJ)Z") \
do_intrinsic(_compareAndSwapInt, jdk_internal_misc_Unsafe, compareAndSwapInt_name, compareAndSwapInt_signature, F_R) \
do_name( compareAndSwapInt_name, "compareAndSwapInt") \
do_signature(compareAndSwapInt_signature, "(Ljava/lang/Object;JII)Z") \
do_intrinsic(_putOrderedObject, jdk_internal_misc_Unsafe, putOrderedObject_name, putOrderedObject_signature, F_R) \
do_name( putOrderedObject_name, "putOrderedObject") \
do_alias( putOrderedObject_signature, /*(LObject;JLObject;)V*/ putObject_signature) \
do_intrinsic(_putOrderedLong, jdk_internal_misc_Unsafe, putOrderedLong_name, putOrderedLong_signature, F_R) \
do_name( putOrderedLong_name, "putOrderedLong") \
do_alias( putOrderedLong_signature, /*(Ljava/lang/Object;JJ)V*/ putLong_signature) \
do_intrinsic(_putOrderedInt, jdk_internal_misc_Unsafe, putOrderedInt_name, putOrderedInt_signature, F_R) \
do_name( putOrderedInt_name, "putOrderedInt") \
do_alias( putOrderedInt_signature, /*(Ljava/lang/Object;JI)V*/ putInt_signature) \
do_signature(compareAndSwapObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
do_signature(compareAndExchangeObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
do_signature(compareAndSwapLong_signature, "(Ljava/lang/Object;JJJ)Z") \
do_signature(compareAndExchangeLong_signature, "(Ljava/lang/Object;JJJ)J") \
do_signature(compareAndSwapInt_signature, "(Ljava/lang/Object;JII)Z") \
do_signature(compareAndExchangeInt_signature, "(Ljava/lang/Object;JII)I") \
\
do_name(compareAndSwapObject_name, "compareAndSwapObject") \
do_name(compareAndExchangeObjectVolatile_name, "compareAndExchangeObjectVolatile") \
do_name(compareAndExchangeObjectAcquire_name, "compareAndExchangeObjectAcquire") \
do_name(compareAndExchangeObjectRelease_name, "compareAndExchangeObjectRelease") \
do_name(compareAndSwapLong_name, "compareAndSwapLong") \
do_name(compareAndExchangeLongVolatile_name, "compareAndExchangeLongVolatile") \
do_name(compareAndExchangeLongAcquire_name, "compareAndExchangeLongAcquire") \
do_name(compareAndExchangeLongRelease_name, "compareAndExchangeLongRelease") \
do_name(compareAndSwapInt_name, "compareAndSwapInt") \
do_name(compareAndExchangeIntVolatile_name, "compareAndExchangeIntVolatile") \
do_name(compareAndExchangeIntAcquire_name, "compareAndExchangeIntAcquire") \
do_name(compareAndExchangeIntRelease_name, "compareAndExchangeIntRelease") \
\
do_name(weakCompareAndSwapObject_name, "weakCompareAndSwapObject") \
do_name(weakCompareAndSwapObjectAcquire_name, "weakCompareAndSwapObjectAcquire") \
do_name(weakCompareAndSwapObjectRelease_name, "weakCompareAndSwapObjectRelease") \
do_name(weakCompareAndSwapLong_name, "weakCompareAndSwapLong") \
do_name(weakCompareAndSwapLongAcquire_name, "weakCompareAndSwapLongAcquire") \
do_name(weakCompareAndSwapLongRelease_name, "weakCompareAndSwapLongRelease") \
do_name(weakCompareAndSwapInt_name, "weakCompareAndSwapInt") \
do_name(weakCompareAndSwapIntAcquire_name, "weakCompareAndSwapIntAcquire") \
do_name(weakCompareAndSwapIntRelease_name, "weakCompareAndSwapIntRelease") \
\
do_intrinsic(_compareAndSwapObject, jdk_internal_misc_Unsafe, compareAndSwapObject_name, compareAndSwapObject_signature, F_RN) \
do_intrinsic(_compareAndExchangeObjectVolatile, jdk_internal_misc_Unsafe, compareAndExchangeObjectVolatile_name, compareAndExchangeObject_signature, F_RN) \
do_intrinsic(_compareAndExchangeObjectAcquire, jdk_internal_misc_Unsafe, compareAndExchangeObjectAcquire_name, compareAndExchangeObject_signature, F_R) \
do_intrinsic(_compareAndExchangeObjectRelease, jdk_internal_misc_Unsafe, compareAndExchangeObjectRelease_name, compareAndExchangeObject_signature, F_R) \
do_intrinsic(_compareAndSwapLong, jdk_internal_misc_Unsafe, compareAndSwapLong_name, compareAndSwapLong_signature, F_RN) \
do_intrinsic(_compareAndExchangeLongVolatile, jdk_internal_misc_Unsafe, compareAndExchangeLongVolatile_name, compareAndExchangeLong_signature, F_RN) \
do_intrinsic(_compareAndExchangeLongAcquire, jdk_internal_misc_Unsafe, compareAndExchangeLongAcquire_name, compareAndExchangeLong_signature, F_R) \
do_intrinsic(_compareAndExchangeLongRelease, jdk_internal_misc_Unsafe, compareAndExchangeLongRelease_name, compareAndExchangeLong_signature, F_R) \
do_intrinsic(_compareAndSwapInt, jdk_internal_misc_Unsafe, compareAndSwapInt_name, compareAndSwapInt_signature, F_RN) \
do_intrinsic(_compareAndExchangeIntVolatile, jdk_internal_misc_Unsafe, compareAndExchangeIntVolatile_name, compareAndExchangeInt_signature, F_RN) \
do_intrinsic(_compareAndExchangeIntAcquire, jdk_internal_misc_Unsafe, compareAndExchangeIntAcquire_name, compareAndExchangeInt_signature, F_R) \
do_intrinsic(_compareAndExchangeIntRelease, jdk_internal_misc_Unsafe, compareAndExchangeIntRelease_name, compareAndExchangeInt_signature, F_R) \
\
do_intrinsic(_weakCompareAndSwapObject, jdk_internal_misc_Unsafe, weakCompareAndSwapObject_name, compareAndSwapObject_signature, F_R) \
do_intrinsic(_weakCompareAndSwapObjectAcquire, jdk_internal_misc_Unsafe, weakCompareAndSwapObjectAcquire_name, compareAndSwapObject_signature, F_R) \
do_intrinsic(_weakCompareAndSwapObjectRelease, jdk_internal_misc_Unsafe, weakCompareAndSwapObjectRelease_name, compareAndSwapObject_signature, F_R) \
do_intrinsic(_weakCompareAndSwapLong, jdk_internal_misc_Unsafe, weakCompareAndSwapLong_name, compareAndSwapLong_signature, F_R) \
do_intrinsic(_weakCompareAndSwapLongAcquire, jdk_internal_misc_Unsafe, weakCompareAndSwapLongAcquire_name, compareAndSwapLong_signature, F_R) \
do_intrinsic(_weakCompareAndSwapLongRelease, jdk_internal_misc_Unsafe, weakCompareAndSwapLongRelease_name, compareAndSwapLong_signature, F_R) \
do_intrinsic(_weakCompareAndSwapInt, jdk_internal_misc_Unsafe, weakCompareAndSwapInt_name, compareAndSwapInt_signature, F_R) \
do_intrinsic(_weakCompareAndSwapIntAcquire, jdk_internal_misc_Unsafe, weakCompareAndSwapIntAcquire_name, compareAndSwapInt_signature, F_R) \
do_intrinsic(_weakCompareAndSwapIntRelease, jdk_internal_misc_Unsafe, weakCompareAndSwapIntRelease_name, compareAndSwapInt_signature, F_R) \
\
do_intrinsic(_putOrderedObject, jdk_internal_misc_Unsafe, putOrderedObject_name, putOrderedObject_signature, F_RN) \
do_name( putOrderedObject_name, "putOrderedObject") \
do_alias( putOrderedObject_signature, /*(LObject;JLObject;)V*/ putObject_signature) \
do_intrinsic(_putOrderedLong, jdk_internal_misc_Unsafe, putOrderedLong_name, putOrderedLong_signature, F_RN) \
do_name( putOrderedLong_name, "putOrderedLong") \
do_alias( putOrderedLong_signature, /*(Ljava/lang/Object;JJ)V*/ putLong_signature) \
do_intrinsic(_putOrderedInt, jdk_internal_misc_Unsafe, putOrderedInt_name, putOrderedInt_signature, F_RN) \
do_name( putOrderedInt_name, "putOrderedInt") \
do_alias( putOrderedInt_signature, /*(Ljava/lang/Object;JI)V*/ putInt_signature) \
\
do_intrinsic(_getAndAddInt, jdk_internal_misc_Unsafe, getAndAddInt_name, getAndAddInt_signature, F_R) \
do_name( getAndAddInt_name, "getAndAddInt") \

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvmci/commandLineFlagConstraintsJVMCI.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "utilities/defaultStream.hpp"
Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(bool value, bool verbose) {
if (!EnableJVMCI) {
if (verbose == true) {
jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
}
return Flag::VIOLATES_CONSTRAINT;
} else {
return Flag::SUCCESS;
}
}
Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(intx value, bool verbose) {
if (!EnableJVMCI) {
if (verbose == true) {
jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
}
return Flag::VIOLATES_CONSTRAINT;
} else {
return Flag::SUCCESS;
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP
#define SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
/*
* Here we have JVMCI arguments constraints functions, which are called automatically
* whenever flag's value changes. If the constraint fails the function should return
* an appropriate error value.
*/
Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(bool value, bool verbose);
Flag::Error EnableJVMCIMustBeEnabledConstraintFunc(intx value, bool verbose);
#endif /* SHARE_VM_JVMCI_COMMANDLINEFLAGCONSTRAINTSJVMCI_HPP */

View File

@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "jvmci/jvmci_globals.hpp"
#include "utilities/defaultStream.hpp"
#include "runtime/globals_extension.hpp"
JVMCI_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \
MATERIALIZE_PD_DEVELOPER_FLAG, \
@ -34,3 +36,185 @@ JVMCI_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \
MATERIALIZE_NOTPRODUCT_FLAG,
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
#define JVMCI_IGNORE_FLAG_FOUR_PARAM(type, name, value, doc)
#define JVMCI_IGNORE_FLAG_THREE_PARAM(type, name, doc)
// Return true if jvmci flags are consistent.
bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
if (EnableJVMCI) {
return true;
}
// "FLAG_IS_DEFAULT" fail count.
int fail_count = 0;
// Number of "FLAG_IS_DEFAULT" fails that should be skipped before code
// detect real consistency failure.
int skip_fail_count;
// EnableJVMCI flag is false here.
// If any other flag is changed, consistency check should fail.
// JVMCI_FLAGS macros added below can handle all JVMCI flags automatically.
// But it contains check for EnableJVMCI flag too, which is required to be
// skipped. This can't be handled easily!
// So the code looks for at-least two flag changes to detect consistency
// failure when EnableJVMCI flag is changed.
// Otherwise one flag change is sufficient to detect consistency failure.
// Set skip_fail_count to 0 if EnableJVMCI flag is default.
// Set skip_fail_count to 1 if EnableJVMCI flag is changed.
// This value will be used to skip fails in macro expanded code later.
if (!FLAG_IS_DEFAULT(EnableJVMCI)) {
skip_fail_count = 1;
} else {
skip_fail_count = 0;
}
#define EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(FLAG) \
if (!FLAG_IS_DEFAULT(FLAG)) { \
fail_count++; \
if (fail_count > skip_fail_count) { \
return false; \
} \
}
#define JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
#define JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
// Check consistency of diagnostic flags if UnlockDiagnosticVMOptions is true
// or not default. UnlockDiagnosticVMOptions is default true in debug builds.
if (UnlockDiagnosticVMOptions || !FLAG_IS_DEFAULT(UnlockDiagnosticVMOptions)) {
JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
}
// Check consistency of experimental flags if UnlockExperimentalVMOptions is
// true or not default.
if (UnlockExperimentalVMOptions || !FLAG_IS_DEFAULT(UnlockExperimentalVMOptions)) {
JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
}
#ifndef PRODUCT
#define JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
#define JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
#define JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
#else
#define JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)
#define JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc)
#define JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc)
#endif
#define JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
#define JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE(type, name, value, doc) EMIT_FLAG_VALUE_CHANGED_CHECK_CODE(name)
JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
#undef EMIT_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_PD_DEVELOP_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_NOTPRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_DIAGNOSTIC_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_PD_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
#undef JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE
return true;
}
// Print jvmci arguments inconsistency error message.
void JVMCIGlobals::print_jvmci_args_inconsistency_error_message() {
const char* error_msg = "Improperly specified VM option '%s'\n";
jio_fprintf(defaultStream::error_stream(), "EnableJVMCI must be enabled\n");
#define EMIT_CHECK_PRINT_ERR_MSG_CODE(FLAG) \
if (!FLAG_IS_DEFAULT(FLAG)) { \
if (strcmp(#FLAG, "EnableJVMCI")) { \
jio_fprintf(defaultStream::error_stream(), error_msg, #FLAG); \
} \
}
#define JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
#define JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
if (UnlockDiagnosticVMOptions || !FLAG_IS_DEFAULT(UnlockDiagnosticVMOptions)) {
JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
}
if (UnlockExperimentalVMOptions || !FLAG_IS_DEFAULT(UnlockExperimentalVMOptions)) {
JVMCI_FLAGS(JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_THREE_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
}
#ifndef PRODUCT
#define JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
#define JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
#define JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
#else
#define JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)
#define JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc)
#define JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc)
#endif
#define JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
#define JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE(type, name, value, doc) EMIT_CHECK_PRINT_ERR_MSG_CODE(name)
JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_IGNORE_FLAG_FOUR_PARAM, \
JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
#undef EMIT_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_PD_DEVELOP_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_NOTPRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_PD_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_PRODUCT_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_DIAGNOSTIC_FLAG_CHECK_PRINT_ERR_MSG_CODE
#undef JVMCI_EXPERIMENTAL_FLAG_CHECK_PRINT_ERR_MSG_CODE
}
#undef JVMCI_IGNORE_FLAG_FOUR_PARAM
#undef JVMCI_IGNORE_FLAG_THREE_PARAM

View File

@ -39,29 +39,23 @@
\
experimental(bool, UseJVMCICompiler, false, \
"Use JVMCI as the default compiler") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(bool, BootstrapJVMCI, false, \
"Bootstrap JVMCI before running Java main method") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(bool, PrintBootstrap, true, \
"Print JVMCI bootstrap progress and summary") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(intx, JVMCIThreads, 1, \
"Force number of JVMCI compiler threads to use") \
range(1, max_jint) \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(intx, JVMCIHostThreads, 1, \
"Force number of compiler threads for JVMCI host compiler") \
range(1, max_jint) \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(bool, CodeInstallSafepointChecks, true, \
"Perform explicit safepoint checks while installing code") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
NOT_COMPILER2(product(intx, MaxVectorSize, 64, \
"Max vector size in bytes, " \
@ -74,28 +68,22 @@
"Trace level for JVMCI: " \
"1 means emit a message for each CompilerToVM call," \
"levels greater than 1 provide progressively greater detail") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(intx, JVMCICounterSize, 0, \
"Reserved size for benchmark counters") \
range(0, max_jint) \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(bool, JVMCICountersExcludeCompiler, true, \
"Exclude JVMCI compiler threads from benchmark counters") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
develop(bool, JVMCIUseFastLocking, true, \
"Use fast inlined locking code") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
experimental(intx, JVMCINMethodSizeLimit, (80*K)*wordSize, \
"Maximum size of a compiled method.") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
\
develop(bool, TraceUncollectedSpeculations, false, \
"Print message when a failed speculation was not collected") \
constraint(EnableJVMCIMustBeEnabledConstraintFunc,AtParse) \
"Print message when a failed speculation was not collected")
// Read default values for JVMCI globals
@ -110,4 +98,11 @@ JVMCI_FLAGS(DECLARE_DEVELOPER_FLAG, \
IGNORE_RANGE, \
IGNORE_CONSTRAINT)
class JVMCIGlobals {
public:
// Return true if jvmci flags are consistent.
static bool check_jvmci_flags_are_consistent();
// Print jvmci arguments inconsistency error message.
static void print_jvmci_args_inconsistency_error_message();
};
#endif // SHARE_VM_JVMCI_JVMCIGLOBALS_HPP

View File

@ -243,14 +243,72 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_reverseBytes_l:
if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false;
break;
/* CompareAndSwap, Object: */
case vmIntrinsics::_compareAndSwapObject:
#ifdef _LP64
if ( UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapN)) return false;
if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return false;
#else
if (!Matcher::match_rule_supported(Op_CompareAndSwapP)) return false;
#endif
break;
case vmIntrinsics::_weakCompareAndSwapObject:
case vmIntrinsics::_weakCompareAndSwapObjectAcquire:
case vmIntrinsics::_weakCompareAndSwapObjectRelease:
#ifdef _LP64
if ( UseCompressedOops && !Matcher::match_rule_supported(Op_WeakCompareAndSwapN)) return false;
if (!UseCompressedOops && !Matcher::match_rule_supported(Op_WeakCompareAndSwapP)) return false;
#else
if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapP)) return false;
#endif
break;
/* CompareAndSwap, Long: */
case vmIntrinsics::_compareAndSwapLong:
if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return false;
break;
case vmIntrinsics::_weakCompareAndSwapLong:
case vmIntrinsics::_weakCompareAndSwapLongAcquire:
case vmIntrinsics::_weakCompareAndSwapLongRelease:
if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
break;
/* CompareAndSwap, Int: */
case vmIntrinsics::_compareAndSwapInt:
if (!Matcher::match_rule_supported(Op_CompareAndSwapI)) return false;
break;
case vmIntrinsics::_weakCompareAndSwapInt:
case vmIntrinsics::_weakCompareAndSwapIntAcquire:
case vmIntrinsics::_weakCompareAndSwapIntRelease:
if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
break;
/* CompareAndExchange, Object: */
case vmIntrinsics::_compareAndExchangeObjectVolatile:
case vmIntrinsics::_compareAndExchangeObjectAcquire:
case vmIntrinsics::_compareAndExchangeObjectRelease:
#ifdef _LP64
if ( UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndExchangeN)) return false;
if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndExchangeP)) return false;
#else
if (!Matcher::match_rule_supported(Op_CompareAndExchangeP)) return false;
#endif
break;
/* CompareAndExchange, Long: */
case vmIntrinsics::_compareAndExchangeLongVolatile:
case vmIntrinsics::_compareAndExchangeLongAcquire:
case vmIntrinsics::_compareAndExchangeLongRelease:
if (!Matcher::match_rule_supported(Op_CompareAndExchangeL)) return false;
break;
/* CompareAndExchange, Int: */
case vmIntrinsics::_compareAndExchangeIntVolatile:
case vmIntrinsics::_compareAndExchangeIntAcquire:
case vmIntrinsics::_compareAndExchangeIntRelease:
if (!Matcher::match_rule_supported(Op_CompareAndExchangeI)) return false;
break;
case vmIntrinsics::_getAndAddInt:
if (!Matcher::match_rule_supported(Op_GetAndAddI)) return false;
break;
@ -382,6 +440,42 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_putLongVolatile:
case vmIntrinsics::_putFloatVolatile:
case vmIntrinsics::_putDoubleVolatile:
case vmIntrinsics::_getObjectAcquire:
case vmIntrinsics::_getBooleanAcquire:
case vmIntrinsics::_getByteAcquire:
case vmIntrinsics::_getShortAcquire:
case vmIntrinsics::_getCharAcquire:
case vmIntrinsics::_getIntAcquire:
case vmIntrinsics::_getLongAcquire:
case vmIntrinsics::_getFloatAcquire:
case vmIntrinsics::_getDoubleAcquire:
case vmIntrinsics::_putObjectRelease:
case vmIntrinsics::_putBooleanRelease:
case vmIntrinsics::_putByteRelease:
case vmIntrinsics::_putShortRelease:
case vmIntrinsics::_putCharRelease:
case vmIntrinsics::_putIntRelease:
case vmIntrinsics::_putLongRelease:
case vmIntrinsics::_putFloatRelease:
case vmIntrinsics::_putDoubleRelease:
case vmIntrinsics::_getObjectOpaque:
case vmIntrinsics::_getBooleanOpaque:
case vmIntrinsics::_getByteOpaque:
case vmIntrinsics::_getShortOpaque:
case vmIntrinsics::_getCharOpaque:
case vmIntrinsics::_getIntOpaque:
case vmIntrinsics::_getLongOpaque:
case vmIntrinsics::_getFloatOpaque:
case vmIntrinsics::_getDoubleOpaque:
case vmIntrinsics::_putObjectOpaque:
case vmIntrinsics::_putBooleanOpaque:
case vmIntrinsics::_putByteOpaque:
case vmIntrinsics::_putShortOpaque:
case vmIntrinsics::_putCharOpaque:
case vmIntrinsics::_putIntOpaque:
case vmIntrinsics::_putLongOpaque:
case vmIntrinsics::_putFloatOpaque:
case vmIntrinsics::_putDoubleOpaque:
case vmIntrinsics::_getShortUnaligned:
case vmIntrinsics::_getCharUnaligned:
case vmIntrinsics::_getIntUnaligned:
@ -390,7 +484,6 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_putCharUnaligned:
case vmIntrinsics::_putIntUnaligned:
case vmIntrinsics::_putLongUnaligned:
case vmIntrinsics::_compareAndSwapInt:
case vmIntrinsics::_putOrderedObject:
case vmIntrinsics::_putOrderedInt:
case vmIntrinsics::_putOrderedLong:

View File

@ -85,6 +85,14 @@ macro(CompareAndSwapI)
macro(CompareAndSwapL)
macro(CompareAndSwapP)
macro(CompareAndSwapN)
macro(WeakCompareAndSwapI)
macro(WeakCompareAndSwapL)
macro(WeakCompareAndSwapP)
macro(WeakCompareAndSwapN)
macro(CompareAndExchangeI)
macro(CompareAndExchangeL)
macro(CompareAndExchangeP)
macro(CompareAndExchangeN)
macro(GetAndAddI)
macro(GetAndAddL)
macro(GetAndSetI)

View File

@ -88,7 +88,27 @@ MachConstantBaseNode* Compile::mach_constant_base_node() {
// Return the index at which m must be inserted (or already exists).
// The sort order is by the address of the ciMethod, with is_virtual as minor key.
int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
class IntrinsicDescPair {
private:
ciMethod* _m;
bool _is_virtual;
public:
IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {}
static int compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
ciMethod* m= elt->method();
ciMethod* key_m = key->_m;
if (key_m < m) return -1;
else if (key_m > m) return 1;
else {
bool is_virtual = elt->is_virtual();
bool key_virtual = key->_is_virtual;
if (key_virtual < is_virtual) return -1;
else if (key_virtual > is_virtual) return 1;
else return 0;
}
}
};
int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) {
#ifdef ASSERT
for (int i = 1; i < _intrinsics->length(); i++) {
CallGenerator* cg1 = _intrinsics->at(i-1);
@ -99,63 +119,28 @@ int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
"compiler intrinsics list must stay sorted");
}
#endif
// Binary search sorted list, in decreasing intervals [lo, hi].
int lo = 0, hi = _intrinsics->length()-1;
while (lo <= hi) {
int mid = (uint)(hi + lo) / 2;
ciMethod* mid_m = _intrinsics->at(mid)->method();
if (m < mid_m) {
hi = mid-1;
} else if (m > mid_m) {
lo = mid+1;
} else {
// look at minor sort key
bool mid_virt = _intrinsics->at(mid)->is_virtual();
if (is_virtual < mid_virt) {
hi = mid-1;
} else if (is_virtual > mid_virt) {
lo = mid+1;
} else {
return mid; // exact match
}
}
}
return lo; // inexact match
IntrinsicDescPair pair(m, is_virtual);
return _intrinsics->find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
}
void Compile::register_intrinsic(CallGenerator* cg) {
if (_intrinsics == NULL) {
_intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
}
// This code is stolen from ciObjectFactory::insert.
// Really, GrowableArray should have methods for
// insert_at, remove_at, and binary_search.
int len = _intrinsics->length();
int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
if (index == len) {
_intrinsics->append(cg);
} else {
#ifdef ASSERT
CallGenerator* oldcg = _intrinsics->at(index);
assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
#endif
_intrinsics->append(_intrinsics->at(len-1));
int pos;
for (pos = len-2; pos >= index; pos--) {
_intrinsics->at_put(pos+1,_intrinsics->at(pos));
}
_intrinsics->at_put(index, cg);
}
bool found = false;
int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
assert(!found, "registering twice");
_intrinsics->insert_before(index, cg);
assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
}
CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
assert(m->is_loaded(), "don't try this on unloaded methods");
if (_intrinsics != NULL) {
int index = intrinsic_insertion_index(m, is_virtual);
if (index < _intrinsics->length()
&& _intrinsics->at(index)->method() == m
&& _intrinsics->at(index)->is_virtual() == is_virtual) {
bool found = false;
int index = intrinsic_insertion_index(m, is_virtual, found);
if (found) {
return _intrinsics->at(index);
}
}
@ -2801,6 +2786,14 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_WeakCompareAndSwapI:
case Op_WeakCompareAndSwapL:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndExchangeI:
case Op_CompareAndExchangeL:
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_GetAndAddI:
case Op_GetAndAddL:
case Op_GetAndSetI:

View File

@ -1250,7 +1250,7 @@ class Compile : public Phase {
// Intrinsic setup.
void register_library_intrinsics(); // initializer
CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper
int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper
CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
void register_intrinsic(CallGenerator* cg); // update fn

View File

@ -490,6 +490,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
}
break;
}
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
add_objload_to_connection_graph(n, delayed_worklist);
@ -499,6 +501,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
case Op_StoreN:
case Op_StoreNKlass:
case Op_StorePConditional:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
Node* adr = n->in(MemNode::Address);
@ -698,8 +702,12 @@ void ConnectionGraph::add_final_edges(Node *n) {
case Op_StoreN:
case Op_StoreNKlass:
case Op_StorePConditional:
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
Node* adr = n->in(MemNode::Address);

View File

@ -241,7 +241,9 @@ class LibraryCallKit : public GraphKit {
// Generates the guards that check whether the result of
// Unsafe.getObject should be recorded in an SATB log buffer.
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
static bool klass_needs_init_guard(Node* kls);
bool inline_unsafe_allocate();
bool inline_unsafe_copyMemory();
@ -274,9 +276,10 @@ class LibraryCallKit : public GraphKit {
JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp);
typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
bool inline_unsafe_ordered_store(BasicType type);
typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
MemNode::MemOrd access_kind_to_memord_LS(AccessKind access_kind, bool is_store);
MemNode::MemOrd access_kind_to_memord(AccessKind access_kind);
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind, AccessKind access_kind);
bool inline_unsafe_fence(vmIntrinsics::ID id);
bool inline_fp_conversions(vmIntrinsics::ID id);
bool inline_number_methods(vmIntrinsics::ID id);
@ -553,86 +556,147 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_inflateStringC:
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile, false);
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile, false);
case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile, false);
case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Relaxed, false);
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Relaxed, false);
case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Relaxed, false);
case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Relaxed, false);
case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile, false);
case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, Relaxed, false);
case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, Relaxed, false);
case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, Relaxed, false);
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile, false);
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile, false);
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile, false);
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile, false);
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile, false);
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile, false);
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile, false);
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile, false);
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile, false);
case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, Relaxed, false);
case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, Relaxed, false);
case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, Relaxed, false);
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile, false);
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile, false);
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile, false);
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile, false);
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile, false);
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile, false);
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile, false);
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile, false);
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile, false);
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Volatile, false);
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Volatile, false);
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Volatile, false);
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Volatile, false);
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Volatile, false);
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Volatile, false);
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Volatile, false);
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Volatile, false);
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Volatile, false);
case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, true);
case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, true);
case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, true);
case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, true);
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Volatile, false);
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Volatile, false);
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Volatile, false);
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Volatile, false);
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Volatile, false);
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Volatile, false);
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Volatile, false);
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Volatile, false);
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Volatile, false);
case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, true);
case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, true);
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, true);
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, true);
case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Relaxed, true);
case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Relaxed, true);
case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Relaxed, true);
case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Relaxed, true);
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg);
case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Relaxed, true);
case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Relaxed, true);
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Relaxed, true);
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Relaxed, true);
case vmIntrinsics::_putOrderedObject: return inline_unsafe_ordered_store(T_OBJECT);
case vmIntrinsics::_putOrderedInt: return inline_unsafe_ordered_store(T_INT);
case vmIntrinsics::_putOrderedLong: return inline_unsafe_ordered_store(T_LONG);
case vmIntrinsics::_putOrderedObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Release, false);
case vmIntrinsics::_putOrderedInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Release, false);
case vmIntrinsics::_putOrderedLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Release, false);
case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_xadd);
case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_xadd);
case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_xchg);
case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_xchg);
case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_xchg);
case vmIntrinsics::_getObjectAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Acquire, false);
case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Acquire, false);
case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Acquire, false);
case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Acquire, false);
case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Acquire, false);
case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Acquire, false);
case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Acquire, false);
case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Acquire, false);
case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Acquire, false);
case vmIntrinsics::_putObjectRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Release, false);
case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Release, false);
case vmIntrinsics::_putByteRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Release, false);
case vmIntrinsics::_putShortRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Release, false);
case vmIntrinsics::_putCharRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Release, false);
case vmIntrinsics::_putIntRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Release, false);
case vmIntrinsics::_putLongRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Release, false);
case vmIntrinsics::_putFloatRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Release, false);
case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Release, false);
case vmIntrinsics::_getObjectOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Opaque, false);
case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Opaque, false);
case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Opaque, false);
case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Opaque, false);
case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Opaque, false);
case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Opaque, false);
case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Opaque, false);
case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Opaque, false);
case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Opaque, false);
case vmIntrinsics::_putObjectOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Opaque, false);
case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Opaque, false);
case vmIntrinsics::_putByteOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Opaque, false);
case vmIntrinsics::_putShortOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Opaque, false);
case vmIntrinsics::_putCharOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Opaque, false);
case vmIntrinsics::_putIntOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Opaque, false);
case vmIntrinsics::_putLongOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Opaque, false);
case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Opaque, false);
case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Opaque, false);
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
case vmIntrinsics::_weakCompareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
case vmIntrinsics::_weakCompareAndSwapObjectAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
case vmIntrinsics::_weakCompareAndSwapObjectRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
case vmIntrinsics::_weakCompareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
case vmIntrinsics::_weakCompareAndSwapIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
case vmIntrinsics::_weakCompareAndSwapIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
case vmIntrinsics::_weakCompareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
case vmIntrinsics::_weakCompareAndSwapLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
case vmIntrinsics::_weakCompareAndSwapLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
case vmIntrinsics::_compareAndExchangeObjectVolatile: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
case vmIntrinsics::_compareAndExchangeObjectAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
case vmIntrinsics::_compareAndExchangeObjectRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
case vmIntrinsics::_compareAndExchangeIntVolatile: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
case vmIntrinsics::_compareAndExchangeLongVolatile: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
case vmIntrinsics::_loadFence:
case vmIntrinsics::_storeFence:
@ -1584,6 +1648,13 @@ bool LibraryCallKit::inline_string_char_access(bool is_store) {
assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
"sanity: byte[] and char[] scales agree");
// Bail when getChar over constants is requested: constant folding would
// reject folding mismatched char access over byte[]. A normal inlining for getChar
// Java method would constant fold nicely instead.
if (!is_store && value->is_Con() && index->is_Con()) {
return false;
}
Node* adr = array_element_address(value, index, T_CHAR);
if (is_store) {
(void) store_to_memory(control(), adr, ch, T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
@ -2277,8 +2348,10 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_
return NULL;
}
bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
if (callee()->is_static()) return false; // caller must have the capability!
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
#ifndef PRODUCT
{
@ -2367,7 +2440,42 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// the barriers get omitted and the unsafe reference begins to "pollute"
// the alias analysis of the rest of the graph, either Compile::can_alias
// or Compile::must_alias will throw a diagnostic assert.)
bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
bool need_mem_bar;
switch (kind) {
case Relaxed:
need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
break;
case Opaque:
// Opaque uses CPUOrder membars for protection against code movement.
case Acquire:
case Release:
case Volatile:
need_mem_bar = true;
break;
default:
ShouldNotReachHere();
}
// Some accesses require access atomicity for all types, notably longs and doubles.
// When AlwaysAtomicAccesses is enabled, all accesses are atomic.
bool requires_atomic_access = false;
switch (kind) {
case Relaxed:
case Opaque:
requires_atomic_access = AlwaysAtomicAccesses;
break;
case Acquire:
case Release:
case Volatile:
requires_atomic_access = true;
break;
default:
ShouldNotReachHere();
}
// Figure out the memory ordering.
// Acquire/Release/Volatile accesses require marking the loads/stores with MemOrd
MemNode::MemOrd mo = access_kind_to_memord_LS(kind, is_store);
// If we are reading the value of the referent field of a Reference
// object (either by using Unsafe directly or through reflection)
@ -2394,22 +2502,30 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// and it is not possible to fully distinguish unintended nulls
// from intended ones in this API.
if (is_volatile) {
// We need to emit leading and trailing CPU membars (see below) in
// addition to memory membars when is_volatile. This is a little
// too strong, but avoids the need to insert per-alias-type
// volatile membars (for stores; compare Parse::do_put_xxx), which
// we cannot do effectively here because we probably only have a
// rough approximation of type.
need_mem_bar = true;
// For Stores, place a memory ordering barrier now.
if (is_store) {
insert_mem_bar(Op_MemBarRelease);
} else {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
insert_mem_bar(Op_MemBarVolatile);
// We need to emit leading and trailing CPU membars (see below) in
// addition to memory membars for special access modes. This is a little
// too strong, but avoids the need to insert per-alias-type
// volatile membars (for stores; compare Parse::do_put_xxx), which
// we cannot do effectively here because we probably only have a
// rough approximation of type.
switch(kind) {
case Relaxed:
case Opaque:
case Acquire:
break;
case Release:
case Volatile:
if (is_store) {
insert_mem_bar(Op_MemBarRelease);
} else {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
insert_mem_bar(Op_MemBarVolatile);
}
}
}
break;
default:
ShouldNotReachHere();
}
// Memory barrier to prevent normal and 'unsafe' accesses from
@ -2453,10 +2569,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
}
}
if (p == NULL) {
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
// To be valid, unsafe loads may depend on other conditions than
// the one that guards them: pin the Load node
p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, requires_atomic_access, unaligned, mismatched);
// load value
switch (type) {
case T_BOOLEAN:
@ -2470,7 +2585,9 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
break;
case T_OBJECT:
if (need_read_barrier) {
insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
// We do not require a mem bar inside pre_barrier if need_mem_bar
// is set: the barriers would be emitted by us.
insert_pre_barrier(heap_base_oop, offset, p, !need_mem_bar);
}
break;
case T_ADDRESS:
@ -2501,9 +2618,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
break;
}
MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
if (type != T_OBJECT ) {
(void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
if (type != T_OBJECT) {
(void) store_to_memory(control(), adr, val, type, adr_type, mo, requires_atomic_access, unaligned, mismatched);
} else {
// Possibly an oop being stored to Java heap or native memory
if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
@ -2524,7 +2640,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// Update IdealKit memory.
__ sync_kit(this);
} __ else_(); {
__ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched);
__ store(__ ctrl(), adr, val, type, alias_type->index(), mo, requires_atomic_access, mismatched);
} __ end_if();
// Final sync IdealKit and GraphKit.
final_sync(ideal);
@ -2533,14 +2649,23 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
}
}
if (is_volatile) {
if (!is_store) {
insert_mem_bar(Op_MemBarAcquire);
} else {
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
insert_mem_bar(Op_MemBarVolatile);
switch(kind) {
case Relaxed:
case Opaque:
case Release:
break;
case Acquire:
case Volatile:
if (!is_store) {
insert_mem_bar(Op_MemBarAcquire);
} else {
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
insert_mem_bar(Op_MemBarVolatile);
}
}
}
break;
default:
ShouldNotReachHere();
}
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
@ -2551,21 +2676,52 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
//----------------------------inline_unsafe_load_store----------------------------
// This method serves a couple of different customers (depending on LoadStoreKind):
//
// LS_cmpxchg:
// public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
// public final native boolean compareAndSwapInt( Object o, long offset, int expected, int x);
// public final native boolean compareAndSwapLong( Object o, long offset, long expected, long x);
// LS_cmp_swap:
//
// LS_xadd:
// public int getAndAddInt( Object o, long offset, int delta)
// public long getAndAddLong(Object o, long offset, long delta)
// boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
// boolean compareAndSwapInt( Object o, long offset, int expected, int x);
// boolean compareAndSwapLong( Object o, long offset, long expected, long x);
//
// LS_cmp_swap_weak:
//
// boolean weakCompareAndSwapObject( Object o, long offset, Object expected, Object x);
// boolean weakCompareAndSwapObjectAcquire(Object o, long offset, Object expected, Object x);
// boolean weakCompareAndSwapObjectRelease(Object o, long offset, Object expected, Object x);
//
// boolean weakCompareAndSwapInt( Object o, long offset, int expected, int x);
// boolean weakCompareAndSwapIntAcquire( Object o, long offset, int expected, int x);
// boolean weakCompareAndSwapIntRelease( Object o, long offset, int expected, int x);
//
// boolean weakCompareAndSwapLong( Object o, long offset, long expected, long x);
// boolean weakCompareAndSwapLongAcquire( Object o, long offset, long expected, long x);
// boolean weakCompareAndSwapLongRelease( Object o, long offset, long expected, long x);
//
// LS_cmp_exchange:
//
// Object compareAndExchangeObjectVolatile(Object o, long offset, Object expected, Object x);
// Object compareAndExchangeObjectAcquire( Object o, long offset, Object expected, Object x);
// Object compareAndExchangeObjectRelease( Object o, long offset, Object expected, Object x);
//
// Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
// Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
// Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
//
// Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
// Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
// Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
//
// LS_get_add:
//
// int getAndAddInt( Object o, long offset, int delta)
// long getAndAddLong(Object o, long offset, long delta)
//
// LS_get_set:
//
// LS_xchg:
// int getAndSet(Object o, long offset, int newValue)
// long getAndSet(Object o, long offset, long newValue)
// Object getAndSet(Object o, long offset, Object newValue)
//
bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
// This basic scheme here is the same as inline_unsafe_access, but
// differs in enough details that combining them would make the code
// overly confusing. (This is a true fact! I originally combined
@ -2582,7 +2738,9 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
// Check the signatures.
ciSignature* sig = callee()->signature();
rtype = sig->return_type()->basic_type();
if (kind == LS_xadd || kind == LS_xchg) {
switch(kind) {
case LS_get_add:
case LS_get_set: {
// Check the signatures.
#ifdef ASSERT
assert(rtype == type, "get and set must return the expected type");
@ -2591,7 +2749,10 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
#endif // ASSERT
} else if (kind == LS_cmpxchg) {
break;
}
case LS_cmp_swap:
case LS_cmp_swap_weak: {
// Check the signatures.
#ifdef ASSERT
assert(rtype == T_BOOLEAN, "CAS must return boolean");
@ -2599,8 +2760,20 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
#endif // ASSERT
} else {
ShouldNotReachHere();
break;
}
case LS_cmp_exchange: {
// Check the signatures.
#ifdef ASSERT
assert(rtype == type, "CAS must return the expected type");
assert(sig->count() == 4, "CAS has 4 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
#endif // ASSERT
break;
}
default:
ShouldNotReachHere();
}
}
#endif //PRODUCT
@ -2613,19 +2786,29 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Node* offset = NULL;
Node* oldval = NULL;
Node* newval = NULL;
if (kind == LS_cmpxchg) {
const bool two_slot_type = type2size[type] == 2;
receiver = argument(0); // type: oop
base = argument(1); // type: oop
offset = argument(2); // type: long
oldval = argument(4); // type: oop, int, or long
newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
} else if (kind == LS_xadd || kind == LS_xchg){
receiver = argument(0); // type: oop
base = argument(1); // type: oop
offset = argument(2); // type: long
oldval = NULL;
newval = argument(4); // type: oop, int, or long
switch(kind) {
case LS_cmp_swap:
case LS_cmp_swap_weak:
case LS_cmp_exchange: {
const bool two_slot_type = type2size[type] == 2;
receiver = argument(0); // type: oop
base = argument(1); // type: oop
offset = argument(2); // type: long
oldval = argument(4); // type: oop, int, or long
newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
break;
}
case LS_get_add:
case LS_get_set: {
receiver = argument(0); // type: oop
base = argument(1); // type: oop
offset = argument(2); // type: long
oldval = NULL;
newval = argument(4); // type: oop, int, or long
break;
}
default:
ShouldNotReachHere();
}
// Null check receiver.
@ -2650,11 +2833,23 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Compile::AliasType* alias_type = C->alias_type(adr_type);
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
if (kind == LS_xchg && type == T_OBJECT) {
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
if (tjp != NULL) {
value_type = tjp;
switch (kind) {
case LS_get_set:
case LS_cmp_exchange: {
if (type == T_OBJECT) {
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
if (tjp != NULL) {
value_type = tjp;
}
}
break;
}
case LS_cmp_swap:
case LS_cmp_swap_weak:
case LS_get_add:
break;
default:
ShouldNotReachHere();
}
int alias_idx = C->get_alias_index(adr_type);
@ -2664,9 +2859,22 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
// into actual barriers on most machines, but we still need rest of
// compiler to respect ordering.
insert_mem_bar(Op_MemBarRelease);
switch (access_kind) {
case Relaxed:
case Acquire:
break;
case Release:
case Volatile:
insert_mem_bar(Op_MemBarRelease);
break;
default:
ShouldNotReachHere();
}
insert_mem_bar(Op_MemBarCPUOrder);
// Figure out the memory ordering.
MemNode::MemOrd mo = access_kind_to_memord(access_kind);
// 4984716: MemBars must be inserted before this
// memory node in order to avoid a false
// dependency which will confuse the scheduler.
@ -2677,25 +2885,45 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Node* load_store = NULL;
switch(type) {
case T_INT:
if (kind == LS_xadd) {
load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
} else if (kind == LS_xchg) {
load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
} else if (kind == LS_cmpxchg) {
load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval));
} else {
ShouldNotReachHere();
switch(kind) {
case LS_get_add:
load_store = _gvn.transform(new GetAndAddINode(control(), mem, adr, newval, adr_type));
break;
case LS_get_set:
load_store = _gvn.transform(new GetAndSetINode(control(), mem, adr, newval, adr_type));
break;
case LS_cmp_swap_weak:
load_store = _gvn.transform(new WeakCompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_swap:
load_store = _gvn.transform(new CompareAndSwapINode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_exchange:
load_store = _gvn.transform(new CompareAndExchangeINode(control(), mem, adr, newval, oldval, adr_type, mo));
break;
default:
ShouldNotReachHere();
}
break;
case T_LONG:
if (kind == LS_xadd) {
load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
} else if (kind == LS_xchg) {
load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
} else if (kind == LS_cmpxchg) {
load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval));
} else {
ShouldNotReachHere();
switch(kind) {
case LS_get_add:
load_store = _gvn.transform(new GetAndAddLNode(control(), mem, adr, newval, adr_type));
break;
case LS_get_set:
load_store = _gvn.transform(new GetAndSetLNode(control(), mem, adr, newval, adr_type));
break;
case LS_cmp_swap_weak:
load_store = _gvn.transform(new WeakCompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_swap:
load_store = _gvn.transform(new CompareAndSwapLNode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_exchange:
load_store = _gvn.transform(new CompareAndExchangeLNode(control(), mem, adr, newval, oldval, adr_type, mo));
break;
default:
ShouldNotReachHere();
}
break;
case T_OBJECT:
@ -2706,65 +2934,109 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
newval = _gvn.makecon(TypePtr::NULL_PTR);
// Reference stores need a store barrier.
if (kind == LS_xchg) {
// If pre-barrier must execute before the oop store, old value will require do_load here.
if (!can_move_pre_barrier()) {
pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
} // Else move pre_barrier to use load_store value, see below.
} else if (kind == LS_cmpxchg) {
// Same as for newval above:
if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
oldval = _gvn.makecon(TypePtr::NULL_PTR);
switch(kind) {
case LS_get_set: {
// If pre-barrier must execute before the oop store, old value will require do_load here.
if (!can_move_pre_barrier()) {
pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
} // Else move pre_barrier to use load_store value, see below.
break;
}
// The only known value which might get overwritten is oldval.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
oldval /* pre_val */,
T_OBJECT);
} else {
ShouldNotReachHere();
case LS_cmp_swap_weak:
case LS_cmp_swap:
case LS_cmp_exchange: {
// Same as for newval above:
if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
oldval = _gvn.makecon(TypePtr::NULL_PTR);
}
// The only known value which might get overwritten is oldval.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
oldval /* pre_val */,
T_OBJECT);
break;
}
default:
ShouldNotReachHere();
}
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
if (kind == LS_xchg) {
load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr,
newval_enc, adr_type, value_type->make_narrowoop()));
} else {
assert(kind == LS_cmpxchg, "wrong LoadStore operation");
Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr,
newval_enc, oldval_enc));
switch(kind) {
case LS_get_set:
load_store = _gvn.transform(new GetAndSetNNode(control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
break;
case LS_cmp_swap_weak: {
Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
load_store = _gvn.transform(new WeakCompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
break;
}
case LS_cmp_swap: {
Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
load_store = _gvn.transform(new CompareAndSwapNNode(control(), mem, adr, newval_enc, oldval_enc, mo));
break;
}
case LS_cmp_exchange: {
Node *oldval_enc = _gvn.transform(new EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
load_store = _gvn.transform(new CompareAndExchangeNNode(control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
break;
}
default:
ShouldNotReachHere();
}
} else
#endif
{
if (kind == LS_xchg) {
switch (kind) {
case LS_get_set:
load_store = _gvn.transform(new GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
} else {
assert(kind == LS_cmpxchg, "wrong LoadStore operation");
load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval));
}
break;
case LS_cmp_swap_weak:
load_store = _gvn.transform(new WeakCompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_swap:
load_store = _gvn.transform(new CompareAndSwapPNode(control(), mem, adr, newval, oldval, mo));
break;
case LS_cmp_exchange:
load_store = _gvn.transform(new CompareAndExchangePNode(control(), mem, adr, newval, oldval, adr_type, value_type->is_oopptr(), mo));
break;
default:
ShouldNotReachHere();
}
if (kind == LS_cmpxchg) {
// Emit the post barrier only when the actual store happened.
// This makes sense to check only for compareAndSet that can fail to set the value.
// CAS success path is marked more likely since we anticipate this is a performance
// critical path, while CAS failure path can use the penalty for going through unlikely
// path as backoff. Which is still better than doing a store barrier there.
IdealKit ideal(this);
ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
sync_kit(ideal);
post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
ideal.sync_kit(this);
} ideal.end_if();
final_sync(ideal);
} else {
post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
// Emit the post barrier only when the actual store happened. This makes sense
// to check only for LS_cmp_* that can fail to set the value.
// LS_cmp_exchange does not produce any branches by default, so there is no
// boolean result to piggyback on. TODO: When we merge CompareAndSwap with
// CompareAndExchange and move branches here, it would make sense to conditionalize
// post_barriers for LS_cmp_exchange as well.
//
// CAS success path is marked more likely since we anticipate this is a performance
// critical path, while CAS failure path can use the penalty for going through unlikely
// path as backoff. Which is still better than doing a store barrier there.
switch (kind) {
case LS_get_set:
case LS_cmp_exchange: {
post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
break;
}
case LS_cmp_swap_weak:
case LS_cmp_swap: {
IdealKit ideal(this);
ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
sync_kit(ideal);
post_barrier(ideal.ctrl(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
ideal.sync_kit(this);
} ideal.end_if();
final_sync(ideal);
break;
}
default:
ShouldNotReachHere();
}
break;
default:
@ -2778,7 +3050,7 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Node* proj = _gvn.transform(new SCMemProjNode(load_store));
set_memory(proj, alias_idx);
if (type == T_OBJECT && kind == LS_xchg) {
if (type == T_OBJECT && (kind == LS_get_set || kind == LS_cmp_exchange)) {
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
load_store = _gvn.transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
@ -2797,74 +3069,52 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
// Add the trailing membar surrounding the access
insert_mem_bar(Op_MemBarCPUOrder);
insert_mem_bar(Op_MemBarAcquire);
switch (access_kind) {
case Relaxed:
case Release:
break; // do nothing
case Acquire:
case Volatile:
insert_mem_bar(Op_MemBarAcquire);
break;
default:
ShouldNotReachHere();
}
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
set_result(load_store);
return true;
}
//----------------------------inline_unsafe_ordered_store----------------------
// public native void Unsafe.putOrderedObject(Object o, long offset, Object x);
// public native void Unsafe.putOrderedInt(Object o, long offset, int x);
// public native void Unsafe.putOrderedLong(Object o, long offset, long x);
bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
// This is another variant of inline_unsafe_access, differing in
// that it always issues store-store ("release") barrier and ensures
// store-atomicity (which only matters for "long").
if (callee()->is_static()) return false; // caller must have the capability!
#ifndef PRODUCT
{
ResourceMark rm;
// Check the signatures.
ciSignature* sig = callee()->signature();
#ifdef ASSERT
BasicType rtype = sig->return_type()->basic_type();
assert(rtype == T_VOID, "must return void");
assert(sig->count() == 3, "has 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
#endif // ASSERT
MemNode::MemOrd LibraryCallKit::access_kind_to_memord_LS(AccessKind kind, bool is_store) {
MemNode::MemOrd mo = MemNode::unset;
switch(kind) {
case Opaque:
case Relaxed: mo = MemNode::unordered; break;
case Acquire: mo = MemNode::acquire; break;
case Release: mo = MemNode::release; break;
case Volatile: mo = is_store ? MemNode::release : MemNode::acquire; break;
default:
ShouldNotReachHere();
}
#endif //PRODUCT
guarantee(mo != MemNode::unset, "Should select memory ordering");
return mo;
}
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
// Get arguments:
Node* receiver = argument(0); // type: oop
Node* base = argument(1); // type: oop
Node* offset = argument(2); // type: long
Node* val = argument(4); // type: oop, int, or long
// Null check receiver.
receiver = null_check(receiver);
if (stopped()) {
return true;
MemNode::MemOrd LibraryCallKit::access_kind_to_memord(AccessKind kind) {
MemNode::MemOrd mo = MemNode::unset;
switch(kind) {
case Opaque:
case Relaxed: mo = MemNode::unordered; break;
case Acquire: mo = MemNode::acquire; break;
case Release: mo = MemNode::release; break;
case Volatile: mo = MemNode::seqcst; break;
default:
ShouldNotReachHere();
}
// Build field offset expression.
assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
// 32-bit machines ignore the high half of long offsets
offset = ConvL2X(offset);
Node* adr = make_unsafe_address(base, offset);
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
const Type *value_type = Type::get_const_basic_type(type);
Compile::AliasType* alias_type = C->alias_type(adr_type);
insert_mem_bar(Op_MemBarRelease);
insert_mem_bar(Op_MemBarCPUOrder);
// Ensure that the store is atomic for longs:
const bool require_atomic_access = true;
Node* store;
if (type == T_OBJECT) // reference stores need a store barrier.
store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
else {
store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
}
insert_mem_bar(Op_MemBarCPUOrder);
return true;
guarantee(mo != MemNode::unset, "Should select memory ordering");
return mo;
}
bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {

View File

@ -2417,6 +2417,14 @@ void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
((bol->in(1)->Opcode() == Op_StorePConditional ) ||
(bol->in(1)->Opcode() == Op_StoreIConditional ) ||
(bol->in(1)->Opcode() == Op_StoreLConditional ) ||
(bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
(bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
(bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
(bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
(bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
(bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
(bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
(bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
(bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
(bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
(bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||

View File

@ -2307,6 +2307,14 @@ void Matcher::find_shared( Node *n ) {
case Op_StorePConditional:
case Op_StoreIConditional:
case Op_StoreLConditional:
case Op_CompareAndExchangeI:
case Op_CompareAndExchangeL:
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_WeakCompareAndSwapI:
case Op_WeakCompareAndSwapL:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
@ -2522,6 +2530,14 @@ bool Matcher::post_store_load_barrier(const Node* vmb) {
// that a monitor exit operation contains a serializing instruction.
if (xop == Op_MemBarVolatile ||
xop == Op_CompareAndExchangeI ||
xop == Op_CompareAndExchangeL ||
xop == Op_CompareAndExchangeP ||
xop == Op_CompareAndExchangeN ||
xop == Op_WeakCompareAndSwapL ||
xop == Op_WeakCompareAndSwapP ||
xop == Op_WeakCompareAndSwapN ||
xop == Op_WeakCompareAndSwapI ||
xop == Op_CompareAndSwapL ||
xop == Op_CompareAndSwapP ||
xop == Op_CompareAndSwapN ||

View File

@ -56,7 +56,9 @@ public:
};
typedef enum { unordered = 0,
acquire, // Load has to acquire or be succeeded by MemBarAcquire.
release // Store has to release or be preceded by MemBarRelease.
release, // Store has to release or be preceded by MemBarRelease.
seqcst, // LoadStore has to have both acquire and release semantics.
unset // The memory ordering is not set (used for testing)
} MemOrd;
protected:
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
@ -848,34 +850,121 @@ public:
virtual uint ideal_reg() const { return Op_RegFlags; }
};
class CompareAndSwapNode : public LoadStoreConditionalNode {
private:
const MemNode::MemOrd _mem_ord;
public:
CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
MemNode::MemOrd order() const {
return _mem_ord;
}
};
class CompareAndExchangeNode : public LoadStoreNode {
private:
const MemNode::MemOrd _mem_ord;
public:
enum {
ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
};
CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
init_req(ExpectedIn, ex );
}
MemNode::MemOrd order() const {
return _mem_ord;
}
};
//------------------------------CompareAndSwapLNode---------------------------
class CompareAndSwapLNode : public LoadStoreConditionalNode {
class CompareAndSwapLNode : public CompareAndSwapNode {
public:
CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapINode---------------------------
class CompareAndSwapINode : public LoadStoreConditionalNode {
class CompareAndSwapINode : public CompareAndSwapNode {
public:
CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapPNode---------------------------
class CompareAndSwapPNode : public LoadStoreConditionalNode {
class CompareAndSwapPNode : public CompareAndSwapNode {
public:
CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapNNode---------------------------
class CompareAndSwapNNode : public LoadStoreConditionalNode {
class CompareAndSwapNNode : public CompareAndSwapNode {
public:
CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------WeakCompareAndSwapLNode---------------------------
class WeakCompareAndSwapLNode : public CompareAndSwapNode {
public:
WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------WeakCompareAndSwapINode---------------------------
class WeakCompareAndSwapINode : public CompareAndSwapNode {
public:
WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------WeakCompareAndSwapPNode---------------------------
class WeakCompareAndSwapPNode : public CompareAndSwapNode {
public:
WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------WeakCompareAndSwapNNode---------------------------
class WeakCompareAndSwapNNode : public CompareAndSwapNode {
public:
WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
virtual int Opcode() const;
};
//------------------------------CompareAndExchangeLNode---------------------------
class CompareAndExchangeLNode : public CompareAndExchangeNode {
public:
CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
virtual int Opcode() const;
};
//------------------------------CompareAndExchangeINode---------------------------
class CompareAndExchangeINode : public CompareAndExchangeNode {
public:
CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
virtual int Opcode() const;
};
//------------------------------CompareAndExchangePNode---------------------------
class CompareAndExchangePNode : public CompareAndExchangeNode {
public:
CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
virtual int Opcode() const;
};
//------------------------------CompareAndExchangeNNode---------------------------
class CompareAndExchangeNNode : public CompareAndExchangeNode {
public:
CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
virtual int Opcode() const;
};

View File

@ -60,6 +60,8 @@ class CmpNode;
class CodeBuffer;
class ConstraintCastNode;
class ConNode;
class CompareAndSwapNode;
class CompareAndExchangeNode;
class CountedLoopNode;
class CountedLoopEndNode;
class DecodeNarrowPtrNode;
@ -679,6 +681,9 @@ public:
DEFINE_CLASS_ID(Store, Mem, 1)
DEFINE_CLASS_ID(StoreVector, Store, 0)
DEFINE_CLASS_ID(LoadStore, Mem, 2)
DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0)
DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0)
DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1)
DEFINE_CLASS_ID(Region, Node, 5)
DEFINE_CLASS_ID(Loop, Region, 0)

View File

@ -1117,6 +1117,44 @@ UNSAFE_END
// JSR166 ------------------------------------------------------------------
UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
UnsafeWrapper("Unsafe_CompareAndExchangeObject");
oop x = JNIHandles::resolve(x_h);
oop e = JNIHandles::resolve(e_h);
oop p = JNIHandles::resolve(obj);
HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
if (res == e)
update_barrier_set((void*)addr, x);
return JNIHandles::make_local(env, res);
UNSAFE_END
UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x))
UnsafeWrapper("Unsafe_CompareAndExchangeInt");
oop p = JNIHandles::resolve(obj);
jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
return (jint)(Atomic::cmpxchg(x, addr, e));
UNSAFE_END
UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x))
UnsafeWrapper("Unsafe_CompareAndExchangeLong");
Handle p (THREAD, JNIHandles::resolve(obj));
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
#ifdef SUPPORTS_NATIVE_CX8
return (jlong)(Atomic::cmpxchg(x, addr, e));
#else
if (VM_Version::supports_cx8())
return (jlong)(Atomic::cmpxchg(x, addr, e));
else {
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
jlong val = Atomic::load(addr);
if (val == e)
Atomic::store(x, addr);
return val;
}
#endif
UNSAFE_END
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
UnsafeWrapper("Unsafe_CompareAndSwapObject");
oop x = JNIHandles::resolve(x_h);
@ -1384,6 +1422,10 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "compareAndSwapObject", CC "(" OBJ "J" OBJ "" OBJ ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
{CC "compareAndSwapInt", CC "(" OBJ "J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
{CC "compareAndSwapLong", CC "(" OBJ "J""J""J"")Z", FN_PTR(Unsafe_CompareAndSwapLong)},
{CC "compareAndExchangeObjectVolatile", CC "(" OBJ "J" OBJ "" OBJ ")" OBJ, FN_PTR(Unsafe_CompareAndExchangeObject)},
{CC "compareAndExchangeIntVolatile", CC "(" OBJ "J""I""I"")I", FN_PTR(Unsafe_CompareAndExchangeInt)},
{CC "compareAndExchangeLongVolatile", CC "(" OBJ "J""J""J"")J", FN_PTR(Unsafe_CompareAndExchangeLong)},
{CC "putOrderedObject", CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_SetOrderedObject)},
{CC "putOrderedInt", CC "(" OBJ "JI)V", FN_PTR(Unsafe_SetOrderedInt)},
{CC "putOrderedLong", CC "(" OBJ "JJ)V", FN_PTR(Unsafe_SetOrderedLong)},

View File

@ -2314,6 +2314,17 @@ bool Arguments::sun_java_launcher_is_altjvm() {
//===========================================================================================================
// Parsing of main arguments
#if INCLUDE_JVMCI
// Check consistency of jvmci vm argument settings.
bool Arguments::check_jvmci_args_consistency() {
if (!EnableJVMCI && !JVMCIGlobals::check_jvmci_flags_are_consistent()) {
JVMCIGlobals::print_jvmci_args_inconsistency_error_message();
return false;
}
return true;
}
#endif //INCLUDE_JVMCI
// Check consistency of GC selection
bool Arguments::check_gc_consistency() {
// Ensure that the user has not selected conflicting sets
@ -2410,6 +2421,9 @@ bool Arguments::check_vm_args_consistency() {
#endif
}
#if INCLUDE_JVMCI
status = status && check_jvmci_args_consistency();
if (EnableJVMCI) {
if (!ScavengeRootsInCode) {
warning("forcing ScavengeRootsInCode non-zero because JVMCI is enabled");

View File

@ -505,7 +505,10 @@ class Arguments : AllStatic {
static void set_gc_specific_flags();
static inline bool gc_selected(); // whether a gc has been selected
static void select_gc_ergonomically();
#if INCLUDE_JVMCI
// Check consistency of jvmci vm argument settings.
static bool check_jvmci_args_consistency();
#endif
// Check for consistency in the selection of the garbage collector.
static bool check_gc_consistency(); // Check user-selected gc
// Check consistency or otherwise of VM argument settings

View File

@ -33,9 +33,6 @@
#include "runtime/commandLineFlagConstraintsRuntime.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_JVMCI
#include "jvmci/commandLineFlagConstraintsJVMCI.hpp"
#endif
class CommandLineFlagConstraint_bool : public CommandLineFlagConstraint {
CommandLineFlagConstraintFunc_bool _constraint;
@ -254,17 +251,6 @@ void CommandLineFlagConstraintList::init(void) {
IGNORE_RANGE,
EMIT_CONSTRAINT_CHECK));
#if INCLUDE_JVMCI
emit_constraint_no(NULL JVMCI_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
EMIT_CONSTRAINT_PRODUCT_FLAG,
EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
IGNORE_RANGE,
EMIT_CONSTRAINT_CHECK));
#endif // INCLUDE_JVMCI
#ifdef COMPILER1
emit_constraint_no(NULL C1_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,

View File

@ -2005,10 +2005,20 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_c2_type(LoadStoreNode, Node) \
declare_c2_type(StorePConditionalNode, LoadStoreNode) \
declare_c2_type(StoreLConditionalNode, LoadStoreNode) \
declare_c2_type(CompareAndSwapLNode, LoadStoreNode) \
declare_c2_type(CompareAndSwapINode, LoadStoreNode) \
declare_c2_type(CompareAndSwapPNode, LoadStoreNode) \
declare_c2_type(CompareAndSwapNNode, LoadStoreNode) \
declare_c2_type(CompareAndSwapNode, LoadStoreConditionalNode) \
declare_c2_type(CompareAndSwapLNode, CompareAndSwapNode) \
declare_c2_type(CompareAndSwapINode, CompareAndSwapNode) \
declare_c2_type(CompareAndSwapPNode, CompareAndSwapNode) \
declare_c2_type(CompareAndSwapNNode, CompareAndSwapNode) \
declare_c2_type(WeakCompareAndSwapLNode, CompareAndSwapNode) \
declare_c2_type(WeakCompareAndSwapINode, CompareAndSwapNode) \
declare_c2_type(WeakCompareAndSwapPNode, CompareAndSwapNode) \
declare_c2_type(WeakCompareAndSwapNNode, CompareAndSwapNode) \
declare_c2_type(CompareAndExchangeNode, LoadStoreNode) \
declare_c2_type(CompareAndExchangeLNode, CompareAndExchangeNode) \
declare_c2_type(CompareAndExchangeINode, CompareAndExchangeNode) \
declare_c2_type(CompareAndExchangePNode, CompareAndExchangeNode) \
declare_c2_type(CompareAndExchangeNNode, CompareAndExchangeNode) \
declare_c2_type(MulNode, Node) \
declare_c2_type(MulINode, MulNode) \
declare_c2_type(MulLNode, MulNode) \

View File

@ -396,7 +396,7 @@ template<class E> class GrowableArray : public GenericGrowableArray {
int max = length() - 1;
while (max >= min) {
int mid = (max + min) / 2;
int mid = (int)(((uint)max + min) / 2);
E value = at(mid);
int diff = compare(key, value);
if (diff > 0) {

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8149797
* @summary node replaced by dominating dead cast during parsing
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=200 -XX:CompileCommand=dontinline,TestDominatingDeadCheckCast::not_inlined TestDominatingDeadCheckCast
*
*/
public class TestDominatingDeadCheckCast {
static class A {
int f;
}
static class B extends A {
}
static A not_inlined() {
return new A();
}
static void inlined(A param) {
param.f = 42;
}
static A field;
static void test(boolean flag1, boolean flag2, boolean flag3, boolean flag4, boolean flag5) {
// Go through memory rather than through a local to defeat C2's replace_in_map
field = not_inlined();
// Speculation adds a CheckCast on entry of this inlined
// method for the parameter
inlined(field);
// Walk up the dominators is depth limited, make the CheckCast
// above unreachable from the last inlined call
if (flag1) {
if (flag2) {
if (flag3) {
// Speculation adds a CheckCast on entry of this
// inlined method for the parameter. This
// CheckCast is replaced by the CheckCast of the
// first inlined method call but the replaced
// CheckCast is still around during parsing.
inlined(field);
// Same as above, some useless control
if (flag4) {
if (flag5) {
// Speculation adds a CheckCast on entry
// of this inlined method for the
// parameter. This CheckCast is replaced
// by the dead CheckCast of the previous
// inlined() call.
inlined(field);
}
}
}
}
}
}
static public void main(String[] args) {
field = new A();
for (int i = 0; i < 20000; i++) {
test(true, true, true, true, true);
}
}
}

View File

@ -128,6 +128,20 @@ public class JdkInternalMiscUnsafeAccessTestBoolean {
}
// Lazy
{
UNSAFE.putBooleanRelease(base, offset, true);
boolean x = UNSAFE.getBooleanAcquire(base, offset);
assertEquals(x, true, "putRelease boolean value");
}
// Opaque
{
UNSAFE.putBooleanOpaque(base, offset, false);
boolean x = UNSAFE.getBooleanOpaque(base, offset);
assertEquals(x, false, "putOpaque boolean value");
}
}

View File

@ -157,6 +157,20 @@ public class JdkInternalMiscUnsafeAccessTestByte {
}
// Lazy
{
UNSAFE.putByteRelease(base, offset, (byte)1);
byte x = UNSAFE.getByteAcquire(base, offset);
assertEquals(x, (byte)1, "putRelease byte value");
}
// Opaque
{
UNSAFE.putByteOpaque(base, offset, (byte)2);
byte x = UNSAFE.getByteOpaque(base, offset);
assertEquals(x, (byte)2, "putOpaque byte value");
}
}

View File

@ -157,6 +157,20 @@ public class JdkInternalMiscUnsafeAccessTestChar {
}
// Lazy
{
UNSAFE.putCharRelease(base, offset, 'a');
char x = UNSAFE.getCharAcquire(base, offset);
assertEquals(x, 'a', "putRelease char value");
}
// Opaque
{
UNSAFE.putCharOpaque(base, offset, 'b');
char x = UNSAFE.getCharOpaque(base, offset);
assertEquals(x, 'b', "putOpaque char value");
}
// Unaligned
{
UNSAFE.putCharUnaligned(base, offset, 'b');

View File

@ -157,6 +157,20 @@ public class JdkInternalMiscUnsafeAccessTestDouble {
}
// Lazy
{
UNSAFE.putDoubleRelease(base, offset, 1.0d);
double x = UNSAFE.getDoubleAcquire(base, offset);
assertEquals(x, 1.0d, "putRelease double value");
}
// Opaque
{
UNSAFE.putDoubleOpaque(base, offset, 2.0d);
double x = UNSAFE.getDoubleOpaque(base, offset);
assertEquals(x, 2.0d, "putOpaque double value");
}
}

View File

@ -157,6 +157,20 @@ public class JdkInternalMiscUnsafeAccessTestFloat {
}
// Lazy
{
UNSAFE.putFloatRelease(base, offset, 1.0f);
float x = UNSAFE.getFloatAcquire(base, offset);
assertEquals(x, 1.0f, "putRelease float value");
}
// Opaque
{
UNSAFE.putFloatOpaque(base, offset, 2.0f);
float x = UNSAFE.getFloatOpaque(base, offset);
assertEquals(x, 2.0f, "putOpaque float value");
}
}

View File

@ -163,6 +163,20 @@ public class JdkInternalMiscUnsafeAccessTestInt {
assertEquals(x, 1, "putRelease int value");
}
// Lazy
{
UNSAFE.putIntRelease(base, offset, 1);
int x = UNSAFE.getIntAcquire(base, offset);
assertEquals(x, 1, "putRelease int value");
}
// Opaque
{
UNSAFE.putIntOpaque(base, offset, 2);
int x = UNSAFE.getIntOpaque(base, offset);
assertEquals(x, 2, "putOpaque int value");
}
// Unaligned
{
UNSAFE.putIntUnaligned(base, offset, 2);
@ -199,6 +213,70 @@ public class JdkInternalMiscUnsafeAccessTestInt {
assertEquals(x, 2, "failing compareAndSwap int value");
}
// Advanced compare
{
int r = UNSAFE.compareAndExchangeIntVolatile(base, offset, 2, 1);
assertEquals(r, 2, "success compareAndExchangeVolatile int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 1, "success compareAndExchangeVolatile int value");
}
{
int r = UNSAFE.compareAndExchangeIntVolatile(base, offset, 2, 3);
assertEquals(r, 1, "failing compareAndExchangeVolatile int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 1, "failing compareAndExchangeVolatile int value");
}
{
int r = UNSAFE.compareAndExchangeIntAcquire(base, offset, 1, 2);
assertEquals(r, 1, "success compareAndExchangeAcquire int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 2, "success compareAndExchangeAcquire int value");
}
{
int r = UNSAFE.compareAndExchangeIntAcquire(base, offset, 1, 3);
assertEquals(r, 2, "failing compareAndExchangeAcquire int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 2, "failing compareAndExchangeAcquire int value");
}
{
int r = UNSAFE.compareAndExchangeIntRelease(base, offset, 2, 1);
assertEquals(r, 2, "success compareAndExchangeRelease int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 1, "success compareAndExchangeRelease int value");
}
{
int r = UNSAFE.compareAndExchangeIntRelease(base, offset, 2, 3);
assertEquals(r, 1, "failing compareAndExchangeRelease int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 1, "failing compareAndExchangeRelease int value");
}
{
boolean r = UNSAFE.weakCompareAndSwapInt(base, offset, 1, 2);
assertEquals(r, true, "weakCompareAndSwap int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 2, "weakCompareAndSwap int value");
}
{
boolean r = UNSAFE.weakCompareAndSwapIntAcquire(base, offset, 2, 1);
assertEquals(r, true, "weakCompareAndSwapAcquire int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 1, "weakCompareAndSwapAcquire int");
}
{
boolean r = UNSAFE.weakCompareAndSwapIntRelease(base, offset, 1, 2);
assertEquals(r, true, "weakCompareAndSwapRelease int");
int x = UNSAFE.getInt(base, offset);
assertEquals(x, 2, "weakCompareAndSwapRelease int");
}
// Compare set and get
{
int o = UNSAFE.getAndSetInt(base, offset, 1);

View File

@ -163,6 +163,20 @@ public class JdkInternalMiscUnsafeAccessTestLong {
assertEquals(x, 1L, "putRelease long value");
}
// Lazy
{
UNSAFE.putLongRelease(base, offset, 1L);
long x = UNSAFE.getLongAcquire(base, offset);
assertEquals(x, 1L, "putRelease long value");
}
// Opaque
{
UNSAFE.putLongOpaque(base, offset, 2L);
long x = UNSAFE.getLongOpaque(base, offset);
assertEquals(x, 2L, "putOpaque long value");
}
// Unaligned
{
UNSAFE.putLongUnaligned(base, offset, 2L);
@ -199,6 +213,70 @@ public class JdkInternalMiscUnsafeAccessTestLong {
assertEquals(x, 2L, "failing compareAndSwap long value");
}
// Advanced compare
{
long r = UNSAFE.compareAndExchangeLongVolatile(base, offset, 2L, 1L);
assertEquals(r, 2L, "success compareAndExchangeVolatile long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 1L, "success compareAndExchangeVolatile long value");
}
{
long r = UNSAFE.compareAndExchangeLongVolatile(base, offset, 2L, 3L);
assertEquals(r, 1L, "failing compareAndExchangeVolatile long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 1L, "failing compareAndExchangeVolatile long value");
}
{
long r = UNSAFE.compareAndExchangeLongAcquire(base, offset, 1L, 2L);
assertEquals(r, 1L, "success compareAndExchangeAcquire long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 2L, "success compareAndExchangeAcquire long value");
}
{
long r = UNSAFE.compareAndExchangeLongAcquire(base, offset, 1L, 3L);
assertEquals(r, 2L, "failing compareAndExchangeAcquire long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 2L, "failing compareAndExchangeAcquire long value");
}
{
long r = UNSAFE.compareAndExchangeLongRelease(base, offset, 2L, 1L);
assertEquals(r, 2L, "success compareAndExchangeRelease long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 1L, "success compareAndExchangeRelease long value");
}
{
long r = UNSAFE.compareAndExchangeLongRelease(base, offset, 2L, 3L);
assertEquals(r, 1L, "failing compareAndExchangeRelease long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 1L, "failing compareAndExchangeRelease long value");
}
{
boolean r = UNSAFE.weakCompareAndSwapLong(base, offset, 1L, 2L);
assertEquals(r, true, "weakCompareAndSwap long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 2L, "weakCompareAndSwap long value");
}
{
boolean r = UNSAFE.weakCompareAndSwapLongAcquire(base, offset, 2L, 1L);
assertEquals(r, true, "weakCompareAndSwapAcquire long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 1L, "weakCompareAndSwapAcquire long");
}
{
boolean r = UNSAFE.weakCompareAndSwapLongRelease(base, offset, 1L, 2L);
assertEquals(r, true, "weakCompareAndSwapRelease long");
long x = UNSAFE.getLong(base, offset);
assertEquals(x, 2L, "weakCompareAndSwapRelease long");
}
// Compare set and get
{
long o = UNSAFE.getAndSetLong(base, offset, 1L);

View File

@ -134,6 +134,20 @@ public class JdkInternalMiscUnsafeAccessTestObject {
assertEquals(x, "foo", "putRelease Object value");
}
// Lazy
{
UNSAFE.putObjectRelease(base, offset, "foo");
Object x = UNSAFE.getObjectAcquire(base, offset);
assertEquals(x, "foo", "putRelease Object value");
}
// Opaque
{
UNSAFE.putObjectOpaque(base, offset, "bar");
Object x = UNSAFE.getObjectOpaque(base, offset);
assertEquals(x, "bar", "putOpaque Object value");
}
UNSAFE.putObject(base, offset, "foo");
@ -152,6 +166,70 @@ public class JdkInternalMiscUnsafeAccessTestObject {
assertEquals(x, "bar", "failing compareAndSwap Object value");
}
// Advanced compare
{
Object r = UNSAFE.compareAndExchangeObjectVolatile(base, offset, "bar", "foo");
assertEquals(r, "bar", "success compareAndExchangeVolatile Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "foo", "success compareAndExchangeVolatile Object value");
}
{
Object r = UNSAFE.compareAndExchangeObjectVolatile(base, offset, "bar", "baz");
assertEquals(r, "foo", "failing compareAndExchangeVolatile Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "foo", "failing compareAndExchangeVolatile Object value");
}
{
Object r = UNSAFE.compareAndExchangeObjectAcquire(base, offset, "foo", "bar");
assertEquals(r, "foo", "success compareAndExchangeAcquire Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "bar", "success compareAndExchangeAcquire Object value");
}
{
Object r = UNSAFE.compareAndExchangeObjectAcquire(base, offset, "foo", "baz");
assertEquals(r, "bar", "failing compareAndExchangeAcquire Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "bar", "failing compareAndExchangeAcquire Object value");
}
{
Object r = UNSAFE.compareAndExchangeObjectRelease(base, offset, "bar", "foo");
assertEquals(r, "bar", "success compareAndExchangeRelease Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "foo", "success compareAndExchangeRelease Object value");
}
{
Object r = UNSAFE.compareAndExchangeObjectRelease(base, offset, "bar", "baz");
assertEquals(r, "foo", "failing compareAndExchangeRelease Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "foo", "failing compareAndExchangeRelease Object value");
}
{
boolean r = UNSAFE.weakCompareAndSwapObject(base, offset, "foo", "bar");
assertEquals(r, true, "weakCompareAndSwap Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "bar", "weakCompareAndSwap Object value");
}
{
boolean r = UNSAFE.weakCompareAndSwapObjectAcquire(base, offset, "bar", "foo");
assertEquals(r, true, "weakCompareAndSwapAcquire Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "foo", "weakCompareAndSwapAcquire Object");
}
{
boolean r = UNSAFE.weakCompareAndSwapObjectRelease(base, offset, "foo", "bar");
assertEquals(r, true, "weakCompareAndSwapRelease Object");
Object x = UNSAFE.getObject(base, offset);
assertEquals(x, "bar", "weakCompareAndSwapRelease Object");
}
// Compare set and get
{
Object o = UNSAFE.getAndSetObject(base, offset, "foo");

View File

@ -157,6 +157,20 @@ public class JdkInternalMiscUnsafeAccessTestShort {
}
// Lazy
{
UNSAFE.putShortRelease(base, offset, (short)1);
short x = UNSAFE.getShortAcquire(base, offset);
assertEquals(x, (short)1, "putRelease short value");
}
// Opaque
{
UNSAFE.putShortOpaque(base, offset, (short)2);
short x = UNSAFE.getShortOpaque(base, offset);
assertEquals(x, (short)2, "putOpaque short value");
}
// Unaligned
{
UNSAFE.putShortUnaligned(base, offset, (short)2);

View File

@ -169,6 +169,22 @@ public class $Qualifier$UnsafeAccessTest$Type$ {
}
#end[Ordered]
#if[JdkInternalMisc]
// Lazy
{
UNSAFE.put$Type$Release(base, offset, $value1$);
$type$ x = UNSAFE.get$Type$Acquire(base, offset);
assertEquals(x, $value1$, "putRelease $type$ value");
}
// Opaque
{
UNSAFE.put$Type$Opaque(base, offset, $value2$);
$type$ x = UNSAFE.get$Type$Opaque(base, offset);
assertEquals(x, $value2$, "putOpaque $type$ value");
}
#end[JdkInternalMisc]
#if[JdkInternalMisc]
#if[Unaligned]
// Unaligned
@ -210,6 +226,72 @@ public class $Qualifier$UnsafeAccessTest$Type$ {
assertEquals(x, $value2$, "failing compareAndSwap $type$ value");
}
#if[JdkInternalMisc]
// Advanced compare
{
$type$ r = UNSAFE.compareAndExchange$Type$Volatile(base, offset, $value2$, $value1$);
assertEquals(r, $value2$, "success compareAndExchangeVolatile $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value1$, "success compareAndExchangeVolatile $type$ value");
}
{
$type$ r = UNSAFE.compareAndExchange$Type$Volatile(base, offset, $value2$, $value3$);
assertEquals(r, $value1$, "failing compareAndExchangeVolatile $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value1$, "failing compareAndExchangeVolatile $type$ value");
}
{
$type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value2$);
assertEquals(r, $value1$, "success compareAndExchangeAcquire $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value2$, "success compareAndExchangeAcquire $type$ value");
}
{
$type$ r = UNSAFE.compareAndExchange$Type$Acquire(base, offset, $value1$, $value3$);
assertEquals(r, $value2$, "failing compareAndExchangeAcquire $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value2$, "failing compareAndExchangeAcquire $type$ value");
}
{
$type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value1$);
assertEquals(r, $value2$, "success compareAndExchangeRelease $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value1$, "success compareAndExchangeRelease $type$ value");
}
{
$type$ r = UNSAFE.compareAndExchange$Type$Release(base, offset, $value2$, $value3$);
assertEquals(r, $value1$, "failing compareAndExchangeRelease $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value1$, "failing compareAndExchangeRelease $type$ value");
}
{
boolean r = UNSAFE.weakCompareAndSwap$Type$(base, offset, $value1$, $value2$);
assertEquals(r, true, "weakCompareAndSwap $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value2$, "weakCompareAndSwap $type$ value");
}
{
boolean r = UNSAFE.weakCompareAndSwap$Type$Acquire(base, offset, $value2$, $value1$);
assertEquals(r, true, "weakCompareAndSwapAcquire $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value1$, "weakCompareAndSwapAcquire $type$");
}
{
boolean r = UNSAFE.weakCompareAndSwap$Type$Release(base, offset, $value1$, $value2$);
assertEquals(r, true, "weakCompareAndSwapRelease $type$");
$type$ x = UNSAFE.get$Type$(base, offset);
assertEquals(x, $value2$, "weakCompareAndSwapRelease $type$");
}
#end[JdkInternalMisc]
// Compare set and get
{
$type$ o = UNSAFE.getAndSet$Type$(base, offset, $value1$);
@ -244,4 +326,5 @@ public class $Qualifier$UnsafeAccessTest$Type$ {
}
#end[!boolean]
#end[!Object]
}
}

View File

@ -0,0 +1,97 @@
#!/bin/bash
javac -d . ../../../../jdk/make/src/classes/build/tools/spp/Spp.java
SPP=build.tools.spp.Spp
# Generates unsafe access tests for objects and all primitive types
# $1 = package name to Unsafe, sun.misc | jdk.internal.misc
# $2 = test class qualifier name, SunMisc | JdkInternalMisc
function generate {
package=$1
Qualifier=$2
for type in boolean byte short char int long float double Object
do
Type="$(tr '[:lower:]' '[:upper:]' <<< ${type:0:1})${type:1}"
args="-K$type -Dtype=$type -DType=$Type"
case $type in
Object|int|long)
args="$args -KCAS -KOrdered"
;;
esac
case $type in
int|long)
args="$args -KAtomicAdd"
;;
esac
case $type in
short|char|int|long)
args="$args -KUnaligned"
;;
esac
case $type in
boolean)
value1=true
value2=false
value3=false
;;
byte)
value1=(byte)1
value2=(byte)2
value3=(byte)3
;;
short)
value1=(short)1
value2=(short)2
value3=(short)3
;;
char)
value1=\'a\'
value2=\'b\'
value3=\'c\'
;;
int)
value1=1
value2=2
value3=3
;;
long)
value1=1L
value2=2L
value3=3L
;;
float)
value1=1.0f
value2=2.0f
value3=3.0f
;;
double)
value1=1.0d
value2=2.0d
value3=3.0d
;;
Object)
value1=\"foo\"
value2=\"bar\"
value3=\"baz\"
;;
esac
args="$args -Dvalue1=$value1 -Dvalue2=$value2 -Dvalue3=$value3"
echo $args
java $SPP -nel -K$Qualifier -Dpackage=$package -DQualifier=$Qualifier \
$args < X-UnsafeAccessTest.java.template > ${Qualifier}UnsafeAccessTest${Type}.java
done
}
generate sun.misc SunMisc
generate jdk.internal.misc JdkInternalMisc
rm -fr build