8169061: Drop os::is_MP checks from Atomics

Reviewed-by: aph, dholmes
This commit is contained in:
Aleksey Shipilev 2017-04-26 09:06:59 +02:00
parent 3e3183a5a1
commit 111afb73a5
7 changed files with 89 additions and 186 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,23 +42,18 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
// Adding a lock prefix to an instruction on MP machine
#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
inline jint Atomic::add (jint add_value, volatile jint* dest) {
jint addend = add_value;
int mp = os::is_MP();
__asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)"
__asm__ volatile ( "lock xaddl %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
inline void Atomic::inc (volatile jint* dest) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
: "r" (dest), "r" (mp) : "cc", "memory");
__asm__ volatile ( "lock addl $1,(%0)" :
: "r" (dest) : "cc", "memory");
}
inline void Atomic::inc_ptr(volatile void* dest) {
@ -66,9 +61,8 @@ inline void Atomic::inc_ptr(volatile void* dest) {
}
inline void Atomic::dec (volatile jint* dest) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
: "r" (dest), "r" (mp) : "cc", "memory");
__asm__ volatile ( "lock subl $1,(%0)" :
: "r" (dest) : "cc", "memory");
}
inline void Atomic::dec_ptr(volatile void* dest) {
@ -89,19 +83,17 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
: "=a" (exchange_value)
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "q" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
@ -112,10 +104,9 @@ inline void Atomic::store (jlong store_value, volatile jlong* dest) { *
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
intptr_t addend = add_value;
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
__asm__ __volatile__ ( "lock xaddq %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
@ -125,18 +116,16 @@ inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
}
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
__asm__ __volatile__ ( "lock addq $1,(%0)"
:
: "r" (dest), "r" (mp)
: "r" (dest)
: "cc", "memory");
}
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
__asm__ __volatile__ ( "lock subq $1,(%0)"
:
: "r" (dest), "r" (mp)
: "r" (dest)
: "cc", "memory");
}
@ -149,10 +138,9 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,23 +42,18 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
// Adding a lock prefix to an instruction on MP machine
#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
inline jint Atomic::add (jint add_value, volatile jint* dest) {
jint addend = add_value;
int mp = os::is_MP();
__asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)"
__asm__ volatile ( "lock xaddl %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
inline void Atomic::inc (volatile jint* dest) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
: "r" (dest), "r" (mp) : "cc", "memory");
__asm__ volatile ( "lock addl $1,(%0)" :
: "r" (dest) : "cc", "memory");
}
inline void Atomic::inc_ptr(volatile void* dest) {
@ -66,9 +61,8 @@ inline void Atomic::inc_ptr(volatile void* dest) {
}
inline void Atomic::dec (volatile jint* dest) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
: "r" (dest), "r" (mp) : "cc", "memory");
__asm__ volatile ( "lock subl $1,(%0)" :
: "r" (dest) : "cc", "memory");
}
inline void Atomic::dec_ptr(volatile void* dest) {
@ -89,19 +83,17 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
__asm__ volatile ("lock cmpxchgb %1,(%3)"
: "=a" (exchange_value)
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "q" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
int mp = os::is_MP();
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
__asm__ volatile ("lock cmpxchgl %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
@ -112,10 +104,9 @@ inline void Atomic::store (jlong store_value, volatile jlong* dest) { *
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
intptr_t addend = add_value;
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
__asm__ __volatile__ ("lock xaddq %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
@ -125,18 +116,16 @@ inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
}
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
__asm__ __volatile__ ("lock addq $1,(%0)"
:
: "r" (dest), "r" (mp)
: "r" (dest)
: "cc", "memory");
}
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
__asm__ __volatile__ ("lock subq $1,(%0)"
:
: "r" (dest), "r" (mp)
: "r" (dest)
: "cc", "memory");
}
@ -149,10 +138,9 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
bool mp = os::is_MP();
__asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
@ -192,12 +180,12 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
}
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -614,8 +614,7 @@ mmx_acs_CopyLeft:
# Support for jlong Atomic::cmpxchg(jlong exchange_value,
# volatile jlong* dest,
# jlong compare_value,
# bool is_MP)
# jlong compare_value)
#
.p2align 4,,15
.type _Atomic_cmpxchg_long,@function
@ -628,10 +627,7 @@ _Atomic_cmpxchg_long:
movl 24(%esp), %eax # 24(%esp) : compare_value (low)
movl 28(%esp), %edx # 28(%esp) : compare_value (high)
movl 20(%esp), %edi # 20(%esp) : dest
cmpl $0, 32(%esp) # 32(%esp) : is_MP
je 1f
lock
1: cmpxchg8b (%edi)
lock cmpxchg8b (%edi)
popl %edi
popl %ebx
ret

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,31 +52,19 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
// For Sun Studio - implementation is in solaris_x86_[32/64].il.
// For gcc - implementation is just below.
// The lock prefix can be omitted for certain instructions on uniprocessors; to
// facilitate this, os::is_MP() is passed as an additional argument. 64-bit
// processors are assumed to be multi-threaded and/or multi-core, so the extra
// argument is unnecessary.
#ifndef _LP64
#define IS_MP_DECL() , int is_mp
#define IS_MP_ARG() , (int) os::is_MP()
#else
#define IS_MP_DECL()
#define IS_MP_ARG()
#endif // _LP64
extern "C" {
jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
jint _Atomic_add(jint add_value, volatile jint* dest);
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
jbyte compare_value IS_MP_DECL());
jbyte compare_value);
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
jint compare_value IS_MP_DECL());
jint compare_value);
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
jlong compare_value IS_MP_DECL());
jlong compare_value);
}
inline jint Atomic::add (jint add_value, volatile jint* dest) {
return _Atomic_add(add_value, dest IS_MP_ARG());
return _Atomic_add(add_value, dest);
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
@ -85,15 +73,15 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint*
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value);
}
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
return _Atomic_cmpxchg(exchange_value, dest, compare_value);
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
}
@ -174,25 +162,23 @@ inline void Atomic::store(jlong store_value, volatile jlong* dest) {
#endif // AMD64
#ifdef _GNU_SOURCE
// Add a lock prefix to an instruction on an MP machine
#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
extern "C" {
inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
inline jint _Atomic_add(jint add_value, volatile jint* dest) {
jint addend = add_value;
__asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)"
__asm__ volatile ("lock xaddl %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
#ifdef AMD64
inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest) {
intptr_t addend = add_value;
__asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
__asm__ __volatile__ ("lock xaddq %0,(%2)"
: "=r" (addend)
: "0" (addend), "r" (dest), "r" (mp)
: "0" (addend), "r" (dest)
: "cc", "memory");
return addend + add_value;
}
@ -215,35 +201,35 @@ extern "C" {
return exchange_value;
}
inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
__asm__ volatile ("lock cmpxchgl %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
__asm__ volatile ("lock cmpxchgb %1,(%3)"
: "=a" (exchange_value)
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "q" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
}
// This is the interface to the atomic instruction in solaris_i486.s.
jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value);
inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
#ifdef AMD64
__asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
: "=a" (exchange_value)
: "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
: "r" (exchange_value), "a" (compare_value), "r" (dest)
: "cc", "memory");
return exchange_value;
#else
return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value);
#if 0
// The code below does not work presumably because of the bug in gcc
@ -255,23 +241,19 @@ extern "C" {
volatile jlong_accessor evl, cvl, rv;
evl.long_value = exchange_value;
cvl.long_value = compare_value;
int mp = os::is_MP();
__asm__ volatile ("cmp $0, %%esi\n\t"
"je 1f \n\t"
"lock\n\t"
"1: cmpxchg8b (%%edi)\n\t"
__asm__ volatile (
"lock cmpxchg8b (%%edi)\n\t"
: "=a"(cvl.words[0]), "=d"(cvl.words[1])
: "a"(cvl.words[0]), "d"(cvl.words[1]),
"b"(evl.words[0]), "c"(evl.words[1]),
"D"(dest), "S"(mp)
"D"(dest)
: "cc", "memory");
return cvl.long_value;
#endif // if 0
#endif // AMD64
}
}
#undef LOCK_IF_MP
#endif // _GNU_SOURCE

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -55,18 +55,12 @@
.end
// Support for jint Atomic::add(jint inc, volatile jint* dest)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_add,3
movl 0(%esp), %eax // inc
movl 4(%esp), %edx // dest
movl %eax, %ecx
cmpl $0, 8(%esp) // MP test
jne 1f
xaddl %eax, (%edx)
jmp 2f
1: lock
xaddl %eax, (%edx)
2: addl %ecx, %eax
lock xaddl %eax, (%edx)
addl %ecx, %eax
.end
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
@ -79,41 +73,26 @@
// Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
// volatile jbyte *dest,
// jbyte compare_value)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_cmpxchg_byte,4
movb 8(%esp), %al // compare_value
movb 0(%esp), %cl // exchange_value
movl 4(%esp), %edx // dest
cmp $0, 12(%esp) // MP test
jne 1f
cmpxchgb %cl, (%edx)
jmp 2f
1: lock
cmpxchgb %cl, (%edx)
2:
lock cmpxchgb %cl, (%edx)
.end
// Support for jint Atomic::cmpxchg(jint exchange_value,
// volatile jint *dest,
// jint compare_value)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_cmpxchg,4
movl 8(%esp), %eax // compare_value
movl 0(%esp), %ecx // exchange_value
movl 4(%esp), %edx // dest
cmp $0, 12(%esp) // MP test
jne 1f
cmpxchgl %ecx, (%edx)
jmp 2f
1: lock
cmpxchgl %ecx, (%edx)
2:
lock cmpxchgl %ecx, (%edx)
.end
// Support for jlong Atomic::cmpxchg(jlong exchange_value,
// volatile jlong* dest,
// jlong compare_value)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_cmpxchg_long,6
pushl %ebx
pushl %edi
@ -122,13 +101,8 @@
movl 16(%esp), %edi // dest
movl 8(%esp), %ebx // exchange_value (low)
movl 12(%esp), %ecx // exchange_high (high)
cmp $0, 28(%esp) // MP test
jne 1f
cmpxchg8b (%edi)
jmp 2f
1: lock
cmpxchg8b (%edi)
2: popl %edi
lock cmpxchg8b (%edi)
popl %edi
popl %ebx
.end

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -643,8 +643,7 @@ mmx_acs_CopyLeft:
/ Support for jlong Atomic::cmpxchg(jlong exchange_value,
/ volatile jlong* dest,
/ jlong compare_value,
/ bool is_MP)
/ jlong compare_value)
/ Used only for Solaris/gcc builds
.align 16
_Atomic_cmpxchg_long_gcc:
@ -656,10 +655,7 @@ _Atomic_cmpxchg_long_gcc:
movl 24(%esp), %eax / 24(%esp) : compare_value (low)
movl 28(%esp), %edx / 28(%esp) : compare_value (high)
movl 20(%esp), %edi / 20(%esp) : dest
cmpl $0, 32(%esp) / 32(%esp) : is_MP
je 1f
lock
1: cmpxchg8b (%edi)
lock cmpxchg8b (%edi)
popl %edi
popl %ebx
ret

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,15 +57,6 @@ inline void Atomic::store (jint store_value, volatile jint* dest) { *
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
// Adding a lock prefix to an instruction on MP machine
// VC++ doesn't like the lock prefix to be on a single line
// so we can't insert a label after the lock prefix.
// By emitting a lock prefix, we can define a label after it.
#define LOCK_IF_MP(mp) __asm cmp mp, 0 \
__asm je L0 \
__asm _emit 0xF0 \
__asm L0:
#ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
@ -144,13 +135,11 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else // !AMD64
inline jint Atomic::add (jint add_value, volatile jint* dest) {
int mp = os::is_MP();
__asm {
mov edx, dest;
mov eax, add_value;
mov ecx, eax;
LOCK_IF_MP(mp)
xadd dword ptr [edx], eax;
lock xadd dword ptr [edx], eax;
add eax, ecx;
}
}
@ -165,11 +154,9 @@ inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
inline void Atomic::inc (volatile jint* dest) {
// alternative for InterlockedIncrement
int mp = os::is_MP();
__asm {
mov edx, dest;
LOCK_IF_MP(mp)
add dword ptr [edx], 1;
lock add dword ptr [edx], 1;
}
}
@ -183,11 +170,9 @@ inline void Atomic::inc_ptr(volatile void* dest) {
inline void Atomic::dec (volatile jint* dest) {
// alternative for InterlockedDecrement
int mp = os::is_MP();
__asm {
mov edx, dest;
LOCK_IF_MP(mp)
sub dword ptr [edx], 1;
lock sub dword ptr [edx], 1;
}
}
@ -219,30 +204,25 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
// alternative for InterlockedCompareExchange
int mp = os::is_MP();
__asm {
mov edx, dest
mov cl, exchange_value
mov al, compare_value
LOCK_IF_MP(mp)
cmpxchg byte ptr [edx], cl
lock cmpxchg byte ptr [edx], cl
}
}
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
// alternative for InterlockedCompareExchange
int mp = os::is_MP();
__asm {
mov edx, dest
mov ecx, exchange_value
mov eax, compare_value
LOCK_IF_MP(mp)
cmpxchg dword ptr [edx], ecx
lock cmpxchg dword ptr [edx], ecx
}
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
int mp = os::is_MP();
jint ex_lo = (jint)exchange_value;
jint ex_hi = *( ((jint*)&exchange_value) + 1 );
jint cmp_lo = (jint)compare_value;
@ -255,8 +235,7 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
mov edi, dest
mov ebx, ex_lo
mov ecx, ex_hi
LOCK_IF_MP(mp)
cmpxchg8b qword ptr [edi]
lock cmpxchg8b qword ptr [edi]
pop edi
pop ebx
}