8234740: Harmonize parameter order in Atomic - cmpxchg
Reviewed-by: rehn, dholmes
This commit is contained in:
parent
3d426623bf
commit
0ad50c2b5c
@ -537,7 +537,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
|
||||
// Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
|
||||
// used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
//
|
||||
|
@ -883,7 +883,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
//
|
||||
// markWord displaced_header = obj->mark().set_unlocked();
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
// // We stored the monitor address into the object's mark word.
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
@ -921,7 +921,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
|
||||
// Store stack address of the BasicObjectLock (this is monitor) into object.
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
@ -997,7 +997,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
|
||||
// if ((displaced_header = monitor->displaced_header()) == NULL) {
|
||||
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else {
|
||||
@ -1030,7 +1030,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_e
|
||||
cmpdi(CCR0, displaced_header, 0);
|
||||
beq(CCR0, free_slot); // recursive unlock
|
||||
|
||||
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
|
||||
|
@ -976,7 +976,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
//
|
||||
// markWord displaced_header = obj->mark().set_unlocked();
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
// // We stored the monitor address into the object's mark word.
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
@ -1011,7 +1011,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
|
||||
// Store stack address of the BasicObjectLock (this is monitor) into object.
|
||||
add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
|
||||
@ -1082,7 +1082,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
|
||||
// if ((displaced_header = monitor->displaced_header()) == NULL) {
|
||||
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else {
|
||||
@ -1123,7 +1123,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
|
||||
BasicLock::displaced_header_offset_in_bytes()));
|
||||
z_bre(done); // displaced_header == 0 -> goto done
|
||||
|
||||
// } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
|
||||
|
@ -623,7 +623,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
|
||||
// Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
|
||||
// Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
|
||||
// used by Atomic::cmpxchg(volatile jint* dest, jint compare_value, jint exchange_value)
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
@ -647,7 +648,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
|
||||
// Implementation of jlong atomic_cmpxchg_long(jlong exchange_value, volatile jlong *dest, jlong compare_value)
|
||||
// used by Atomic::cmpxchg(volatile jlong *dest, jlong compare_value, jlong exchange_value)
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
|
@ -1084,7 +1084,7 @@ jlong os::javaTimeNanos() {
|
||||
if (now <= prev) {
|
||||
return prev; // same or retrograde time;
|
||||
}
|
||||
jlong obsv = Atomic::cmpxchg(now, &max_real_time, prev);
|
||||
jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now);
|
||||
assert(obsv >= prev, "invariant"); // Monotonicity
|
||||
// If the CAS succeeded then we're done and return "now".
|
||||
// If the CAS failed and the observed value "obsv" is >= now then
|
||||
@ -1794,7 +1794,7 @@ static int check_pending_signals() {
|
||||
for (;;) {
|
||||
for (int i = 0; i < NSIG + 1; i++) {
|
||||
jint n = pending_signals[i];
|
||||
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
|
||||
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -930,7 +930,7 @@ jlong os::javaTimeNanos() {
|
||||
if (now <= prev) {
|
||||
return prev; // same or retrograde time;
|
||||
}
|
||||
const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
|
||||
const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
|
||||
assert(obsv >= prev, "invariant"); // Monotonicity
|
||||
// If the CAS succeeded then we're done and return "now".
|
||||
// If the CAS failed and the observed value "obsv" is >= now then
|
||||
@ -1833,7 +1833,7 @@ static int check_pending_signals() {
|
||||
for (;;) {
|
||||
for (int i = 0; i < NSIG + 1; i++) {
|
||||
jint n = pending_signals[i];
|
||||
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
|
||||
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -3237,7 +3237,7 @@ static inline volatile int* get_apic_to_processor_mapping() {
|
||||
mapping[i] = -1;
|
||||
}
|
||||
|
||||
if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
|
||||
if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) {
|
||||
FREE_C_HEAP_ARRAY(int, mapping);
|
||||
mapping = Atomic::load_acquire(&apic_to_processor_mapping);
|
||||
}
|
||||
@ -3263,7 +3263,7 @@ uint os::processor_id() {
|
||||
int processor_id = Atomic::load(&mapping[apic_id]);
|
||||
|
||||
while (processor_id < 0) {
|
||||
if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) {
|
||||
if (Atomic::cmpxchg(&mapping[apic_id], -1, -2) == -1) {
|
||||
Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
|
||||
}
|
||||
processor_id = Atomic::load(&mapping[apic_id]);
|
||||
|
@ -2752,7 +2752,7 @@ static int check_pending_signals() {
|
||||
for (;;) {
|
||||
for (int i = 0; i < NSIG + 1; i++) {
|
||||
jint n = pending_signals[i];
|
||||
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
|
||||
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -1900,7 +1900,7 @@ void os::PlatformEvent::park() { // AKA "down()"
|
||||
// atomically decrement _event
|
||||
for (;;) {
|
||||
v = _event;
|
||||
if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
|
||||
}
|
||||
guarantee(v >= 0, "invariant");
|
||||
|
||||
@ -1940,7 +1940,7 @@ int os::PlatformEvent::park(jlong millis) {
|
||||
// atomically decrement _event
|
||||
for (;;) {
|
||||
v = _event;
|
||||
if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
|
||||
}
|
||||
guarantee(v >= 0, "invariant");
|
||||
|
||||
|
@ -1024,7 +1024,7 @@ inline hrtime_t getTimeNanos() {
|
||||
if (now <= prev) {
|
||||
return prev; // same or retrograde time;
|
||||
}
|
||||
const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
|
||||
const hrtime_t obsv = Atomic::cmpxchg(&max_hrtime, prev, now);
|
||||
assert(obsv >= prev, "invariant"); // Monotonicity
|
||||
// If the CAS succeeded then we're done and return "now".
|
||||
// If the CAS failed and the observed value "obsv" is >= now then
|
||||
@ -1984,7 +1984,7 @@ static int check_pending_signals() {
|
||||
while (true) {
|
||||
for (int i = 0; i < Sigexit + 1; i++) {
|
||||
jint n = pending_signals[i];
|
||||
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
|
||||
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -4710,7 +4710,7 @@ void os::PlatformEvent::park() { // AKA: down()
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
|
||||
}
|
||||
guarantee(v >= 0, "invariant");
|
||||
if (v == 0) {
|
||||
@ -4748,7 +4748,7 @@ int os::PlatformEvent::park(jlong millis) {
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
|
||||
}
|
||||
guarantee(v >= 0, "invariant");
|
||||
if (v != 0) return OS_OK;
|
||||
|
@ -2096,7 +2096,7 @@ static int check_pending_signals() {
|
||||
while (true) {
|
||||
for (int i = 0; i < NSIG + 1; i++) {
|
||||
jint n = pending_signals[i];
|
||||
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
|
||||
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -3751,7 +3751,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
if (what != EPT_THREAD) {
|
||||
// Atomically set process_exiting before the critical section
|
||||
// to increase the visibility between racing threads.
|
||||
Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
|
||||
Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
|
||||
}
|
||||
EnterCriticalSection(&crit_sect);
|
||||
|
||||
@ -5136,7 +5136,7 @@ int os::PlatformEvent::park(jlong Millis) {
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
|
||||
}
|
||||
guarantee((v == 0) || (v == 1), "invariant");
|
||||
if (v != 0) return OS_OK;
|
||||
@ -5198,7 +5198,7 @@ void os::PlatformEvent::park() {
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
|
||||
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
|
||||
}
|
||||
guarantee((v == 0) || (v == 1), "invariant");
|
||||
if (v != 0) return;
|
||||
|
@ -56,7 +56,7 @@ ThreadCritical::ThreadCritical() {
|
||||
|
||||
if (lock_owner != current_thread) {
|
||||
// Grab the lock before doing anything.
|
||||
while (Atomic::cmpxchg(0, &lock_count, -1) != -1) {
|
||||
while (Atomic::cmpxchg(&lock_count, -1, 0) != -1) {
|
||||
if (initialized) {
|
||||
DWORD ret = WaitForSingleObject(lock_event, INFINITE);
|
||||
assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject");
|
||||
|
@ -232,9 +232,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
|
||||
@ -302,9 +302,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
|
||||
@ -352,9 +352,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
|
||||
|
@ -64,9 +64,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
|
||||
@ -78,9 +78,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
|
||||
@ -120,9 +120,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
|
||||
@ -142,12 +142,12 @@ extern "C" {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
template<>
|
||||
|
@ -633,9 +633,9 @@ mmx_acs_CopyLeft:
|
||||
ret
|
||||
|
||||
|
||||
# Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
|
||||
# Support for int64_t Atomic::cmpxchg(int64_t compare_value,
|
||||
# volatile int64_t* dest,
|
||||
# int64_t compare_value)
|
||||
# int64_t exchange_value)
|
||||
#
|
||||
.p2align 4,,15
|
||||
ELF_TYPE(_Atomic_cmpxchg_long,@function)
|
||||
@ -665,4 +665,3 @@ SYMBOL(_Atomic_move_long):
|
||||
movl 8(%esp), %eax # dest
|
||||
fistpll (%eax)
|
||||
ret
|
||||
|
||||
|
@ -239,16 +239,16 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value);
|
||||
#else
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
#endif // M68K
|
||||
@ -257,9 +257,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
|
@ -57,9 +57,9 @@ inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
|
||||
|
||||
template<size_t byte_size>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(byte_size == sizeof(T));
|
||||
if (order == memory_order_relaxed) {
|
||||
|
@ -119,22 +119,22 @@ inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
|
||||
|
@ -232,9 +232,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
|
||||
@ -302,9 +302,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
|
||||
@ -352,9 +352,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
|
||||
|
@ -289,9 +289,9 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T cmp_val,
|
||||
T xchg_val,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T old;
|
||||
@ -313,9 +313,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T cmp_val,
|
||||
T xchg_val,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old;
|
||||
|
@ -124,9 +124,9 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv;
|
||||
@ -140,9 +140,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv;
|
||||
|
@ -64,9 +64,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
__asm__ volatile ("lock cmpxchgb %1,(%3)"
|
||||
@ -78,9 +78,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ("lock cmpxchgl %1,(%3)"
|
||||
@ -120,9 +120,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
|
||||
@ -142,12 +142,12 @@ extern "C" {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
template<>
|
||||
|
@ -1,4 +1,4 @@
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
@ -19,15 +19,15 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
|
||||
# NOTE WELL! The _Copy functions are called directly
|
||||
# from server-compiler-generated code via CallLeafNoFP,
|
||||
# which means that they *must* either not use floating
|
||||
# point or use it in the same manner as does the server
|
||||
# compiler.
|
||||
|
||||
|
||||
.globl _Copy_conjoint_bytes
|
||||
.globl _Copy_arrayof_conjoint_bytes
|
||||
.globl _Copy_conjoint_jshorts_atomic
|
||||
@ -174,7 +174,7 @@ _Copy_arrayof_conjoint_bytes:
|
||||
leal -1(%esi,%ecx),%eax # from + count - 1
|
||||
jbe acb_CopyRight
|
||||
cmpl %eax,%edi
|
||||
jbe acb_CopyLeft
|
||||
jbe acb_CopyLeft
|
||||
# copy from low to high
|
||||
acb_CopyRight:
|
||||
cmpl $3,%ecx
|
||||
@ -262,7 +262,7 @@ _Copy_conjoint_jshorts_atomic:
|
||||
leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
|
||||
jbe cs_CopyRight
|
||||
cmpl %eax,%edi
|
||||
jbe cs_CopyLeft
|
||||
jbe cs_CopyLeft
|
||||
# copy from low to high
|
||||
cs_CopyRight:
|
||||
# align source address at dword address boundary
|
||||
@ -283,7 +283,7 @@ cs_CopyRight:
|
||||
jbe 2f # <= 32 dwords
|
||||
# copy aligned dwords
|
||||
rep; smovl
|
||||
jmp 4f
|
||||
jmp 4f
|
||||
# copy aligned dwords
|
||||
2: subl %esi,%edi
|
||||
.p2align 4,,15
|
||||
@ -349,7 +349,7 @@ _Copy_arrayof_conjoint_jshorts:
|
||||
leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
|
||||
jbe acs_CopyRight
|
||||
cmpl %eax,%edi
|
||||
jbe acs_CopyLeft
|
||||
jbe acs_CopyLeft
|
||||
acs_CopyRight:
|
||||
movl %ecx,%eax # word count
|
||||
sarl %ecx # dword count
|
||||
@ -358,10 +358,10 @@ acs_CopyRight:
|
||||
jbe 2f # <= 32 dwords
|
||||
# copy aligned dwords
|
||||
rep; smovl
|
||||
jmp 4f
|
||||
jmp 4f
|
||||
# copy aligned dwords
|
||||
.space 5
|
||||
2: subl %esi,%edi
|
||||
2: subl %esi,%edi
|
||||
.p2align 4,,15
|
||||
3: movl (%esi),%edx
|
||||
movl %edx,(%edi,%esi,1)
|
||||
@ -428,7 +428,7 @@ _Copy_arrayof_conjoint_jints:
|
||||
leal -4(%esi,%ecx,4),%eax # from + count*4 - 4
|
||||
jbe ci_CopyRight
|
||||
cmpl %eax,%edi
|
||||
jbe ci_CopyLeft
|
||||
jbe ci_CopyLeft
|
||||
ci_CopyRight:
|
||||
cmpl $32,%ecx
|
||||
jbe 2f # <= 32 dwords
|
||||
@ -471,7 +471,7 @@ ci_CopyLeft:
|
||||
popl %edi
|
||||
popl %esi
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
|
||||
# jlong* to,
|
||||
# size_t count)
|
||||
@ -537,7 +537,7 @@ mmx_acs_CopyRight:
|
||||
je 5f
|
||||
cmpl $33,%ecx
|
||||
jae 3f
|
||||
1: subl %esi,%edi
|
||||
1: subl %esi,%edi
|
||||
.p2align 4,,15
|
||||
2: movl (%esi),%edx
|
||||
movl %edx,(%edi,%esi,1)
|
||||
@ -545,7 +545,7 @@ mmx_acs_CopyRight:
|
||||
subl $1,%ecx
|
||||
jnz 2b
|
||||
addl %esi,%edi
|
||||
jmp 5f
|
||||
jmp 5f
|
||||
3: smovl # align to 8 bytes, we know we are 4 byte aligned to start
|
||||
subl $1,%ecx
|
||||
4: .p2align 4,,15
|
||||
@ -612,9 +612,9 @@ mmx_acs_CopyLeft:
|
||||
ret
|
||||
|
||||
|
||||
# Support for jlong Atomic::cmpxchg(jlong exchange_value,
|
||||
# volatile jlong* dest,
|
||||
# jlong compare_value)
|
||||
# Support for jlong Atomic::cmpxchg(volatile jlong* dest,
|
||||
# jlong compare_value,
|
||||
# jlong exchange_value)
|
||||
#
|
||||
.p2align 4,,15
|
||||
.type _Atomic_cmpxchg_long,@function
|
||||
@ -643,4 +643,3 @@ _Atomic_move_long:
|
||||
movl 8(%esp), %eax # dest
|
||||
fistpll (%eax)
|
||||
ret
|
||||
|
||||
|
@ -93,9 +93,9 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
@ -103,9 +103,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
|
@ -35,7 +35,7 @@ struct Atomic::PlatformAdd {
|
||||
D old_value = *dest;
|
||||
while (true) {
|
||||
D new_value = old_value + add_value;
|
||||
D result = cmpxchg(new_value, dest, old_value);
|
||||
D result = cmpxchg(dest, old_value, new_value);
|
||||
if (result == old_value) break;
|
||||
old_value = result;
|
||||
}
|
||||
@ -64,7 +64,7 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_value = *dest;
|
||||
while (true) {
|
||||
T result = cmpxchg(exchange_value, dest, old_value);
|
||||
T result = cmpxchg(dest, old_value, exchange_value);
|
||||
if (result == old_value) break;
|
||||
old_value = result;
|
||||
}
|
||||
@ -77,9 +77,9 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv;
|
||||
@ -93,9 +93,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv;
|
||||
|
@ -104,9 +104,9 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
@ -117,9 +117,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
@ -130,9 +130,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
|
@ -81,27 +81,27 @@
|
||||
movq %rdi, %rax
|
||||
.end
|
||||
|
||||
// Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
|
||||
// volatile jbyte *dest,
|
||||
// jbyte compare_value)
|
||||
// Support for jbyte Atomic::cmpxchg(volatile jbyte *dest,
|
||||
// jbyte compare_value,
|
||||
// jbyte exchange_value)
|
||||
.inline _Atomic_cmpxchg_byte,3
|
||||
movb %dl, %al // compare_value
|
||||
lock
|
||||
cmpxchgb %dil, (%rsi)
|
||||
.end
|
||||
|
||||
// Support for jint Atomic::cmpxchg(jint exchange_value,
|
||||
// volatile jint *dest,
|
||||
// jint compare_value)
|
||||
// Support for jint Atomic::cmpxchg(volatile jint *dest,
|
||||
// int compare_value,
|
||||
// jint exchange_value)
|
||||
.inline _Atomic_cmpxchg,3
|
||||
movl %edx, %eax // compare_value
|
||||
lock
|
||||
cmpxchgl %edi, (%rsi)
|
||||
.end
|
||||
|
||||
// Support for jlong Atomic::cmpxchg(jlong exchange_value,
|
||||
// volatile jlong* dest,
|
||||
// jlong compare_value)
|
||||
// Support for jlong Atomic::cmpxchg(volatile jlong* dest,
|
||||
// jlong compare_value,
|
||||
// jlong exchange_value)
|
||||
.inline _Atomic_cmpxchg_long,3
|
||||
movq %rdx, %rax // compare_value
|
||||
lock
|
||||
|
@ -91,15 +91,15 @@ DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
|
||||
|
||||
#undef DEFINE_STUB_XCHG
|
||||
|
||||
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
|
||||
T volatile* dest, \
|
||||
T compare_value, \
|
||||
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \
|
||||
T compare_value, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \
|
||||
}
|
||||
|
||||
DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
|
||||
@ -141,9 +141,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(1 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
@ -157,9 +157,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedCompareExchange
|
||||
@ -173,9 +173,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
int32_t ex_lo = (int32_t)exchange_value;
|
||||
|
@ -347,7 +347,7 @@ void AOTCodeHeap::publish_aot(const methodHandle& mh, AOTMethodData* method_data
|
||||
AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id);
|
||||
assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
|
||||
_code_to_aot[code_id]._aot = aot; // Should set this first
|
||||
if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
|
||||
if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
|
||||
_code_to_aot[code_id]._aot = NULL; // Clean
|
||||
} else { // success
|
||||
// Publish method
|
||||
@ -410,7 +410,7 @@ void AOTCodeHeap::register_stubs() {
|
||||
AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i);
|
||||
assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
|
||||
_code_to_aot[code_id]._aot = aot;
|
||||
if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
|
||||
if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
|
||||
fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state);
|
||||
}
|
||||
// Adjust code buffer boundaries only for stubs because they are last in the buffer.
|
||||
@ -721,7 +721,7 @@ void AOTCodeHeap::sweep_dependent_methods(int* indexes, int methods_cnt) {
|
||||
for (int i = 0; i < methods_cnt; ++i) {
|
||||
int code_id = indexes[i];
|
||||
// Invalidate aot code.
|
||||
if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
|
||||
if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, invalid) != not_set) {
|
||||
if (_code_to_aot[code_id]._state == in_use) {
|
||||
AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
|
||||
assert(aot != NULL, "aot should be set");
|
||||
|
@ -273,7 +273,7 @@ void ClassLoaderData::clear_claim(int claim) {
|
||||
return;
|
||||
}
|
||||
int new_claim = old_claim & ~claim;
|
||||
if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
|
||||
if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -286,7 +286,7 @@ bool ClassLoaderData::try_claim(int claim) {
|
||||
return false;
|
||||
}
|
||||
int new_claim = old_claim | claim;
|
||||
if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
|
||||
if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -676,7 +676,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
while (head != NULL) {
|
||||
Klass* next = next_klass_in_cldg(head);
|
||||
|
||||
Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
|
||||
Klass* old_head = Atomic::cmpxchg(&_next_klass, head, next);
|
||||
|
||||
if (old_head == head) {
|
||||
return head; // Won the CAS.
|
||||
|
@ -749,7 +749,7 @@ void CodeCache::release_exception_cache(ExceptionCache* entry) {
|
||||
for (;;) {
|
||||
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
|
||||
entry->set_purge_list_next(purge_list_head);
|
||||
if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
|
||||
if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
|
||||
// next pointers always point at live ExceptionCaches, that are not removed due
|
||||
// to concurrent ExceptionCache cleanup.
|
||||
ExceptionCache* next = ec->next();
|
||||
if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
|
||||
if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
|
||||
CodeCache::release_exception_cache(ec);
|
||||
}
|
||||
continue;
|
||||
@ -143,7 +143,7 @@ void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
|
||||
new_entry->set_next(ec);
|
||||
}
|
||||
}
|
||||
if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
|
||||
if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -176,7 +176,7 @@ void CompiledMethod::clean_exception_cache() {
|
||||
// Try to clean head; this is contended by concurrent inserts, that
|
||||
// both lazily clean the head, and insert entries at the head. If
|
||||
// the CAS fails, the operation is restarted.
|
||||
if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
|
||||
if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
|
||||
prev = NULL;
|
||||
curr = exception_cache_acquire();
|
||||
continue;
|
||||
|
@ -101,7 +101,7 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) {
|
||||
for (;;) {
|
||||
nmethodBucket* head = Atomic::load(_dependency_context_addr);
|
||||
new_head->set_next(head);
|
||||
if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
|
||||
if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -124,7 +124,7 @@ void DependencyContext::release(nmethodBucket* b) {
|
||||
for (;;) {
|
||||
nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
|
||||
b->set_purge_list_next(purge_list_head);
|
||||
if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
|
||||
if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -272,7 +272,7 @@ bool DependencyContext::claim_cleanup() {
|
||||
if (last_cleanup >= cleaning_epoch) {
|
||||
return false;
|
||||
}
|
||||
return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
|
||||
return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
|
||||
}
|
||||
|
||||
// Retrieve the first nmethodBucket that has a dependent that does not correspond to
|
||||
@ -291,7 +291,7 @@ nmethodBucket* DependencyContext::dependencies_not_unloading() {
|
||||
// Unstable load of head w.r.t. head->next
|
||||
continue;
|
||||
}
|
||||
if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
|
||||
if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
|
||||
// Release is_unloading entries if unlinking was claimed
|
||||
DependencyContext::release(head);
|
||||
}
|
||||
@ -345,7 +345,7 @@ nmethodBucket* nmethodBucket::next_not_unloading() {
|
||||
// Unstable load of next w.r.t. next->next
|
||||
continue;
|
||||
}
|
||||
if (Atomic::cmpxchg(next_next, &_next, next) == next) {
|
||||
if (Atomic::cmpxchg(&_next, next, next_next) == next) {
|
||||
// Release is_unloading entries if unlinking was claimed
|
||||
DependencyContext::release(next);
|
||||
}
|
||||
|
@ -1150,7 +1150,7 @@ bool nmethod::try_transition(int new_state_int) {
|
||||
// Ensure monotonicity of transitions.
|
||||
return false;
|
||||
}
|
||||
if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
|
||||
if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1849,7 +1849,7 @@ bool nmethod::oops_do_try_claim_weak_request() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
|
||||
|
||||
if ((_oops_do_mark_link == NULL) &&
|
||||
(Atomic::replace_if_null(mark_link(this, claim_weak_request_tag), &_oops_do_mark_link))) {
|
||||
(Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
|
||||
oops_do_log_change("oops_do, mark weak request");
|
||||
return true;
|
||||
}
|
||||
@ -1863,7 +1863,7 @@ void nmethod::oops_do_set_strong_done(nmethod* old_head) {
|
||||
nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
|
||||
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_done_tag), &_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag));
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
|
||||
if (old_next == NULL) {
|
||||
oops_do_log_change("oops_do, mark strong done");
|
||||
}
|
||||
@ -1874,7 +1874,7 @@ nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oop
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
|
||||
assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
|
||||
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_request_tag), &_oops_do_mark_link, next);
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
|
||||
if (old_next == next) {
|
||||
oops_do_log_change("oops_do, mark strong request");
|
||||
}
|
||||
@ -1885,7 +1885,7 @@ bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_l
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
|
||||
assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
|
||||
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(extract_nmethod(next), claim_strong_done_tag), &_oops_do_mark_link, next);
|
||||
oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
|
||||
if (old_next == next) {
|
||||
oops_do_log_change("oops_do, mark weak done -> mark strong done");
|
||||
return true;
|
||||
@ -1906,7 +1906,7 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
|
||||
old_head = this;
|
||||
}
|
||||
// Try to install end of list and weak done tag.
|
||||
if (Atomic::cmpxchg(mark_link(old_head, claim_weak_done_tag), &_oops_do_mark_link, mark_link(this, claim_weak_request_tag)) == mark_link(this, claim_weak_request_tag)) {
|
||||
if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
|
||||
oops_do_log_change("oops_do, mark weak done");
|
||||
return NULL;
|
||||
} else {
|
||||
|
@ -335,7 +335,7 @@ public:
|
||||
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
|
||||
static bool set_should_compile_new_jobs(jint new_state) {
|
||||
// Return success if the current caller set it
|
||||
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
|
||||
jint old = Atomic::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state);
|
||||
bool success = (old == (1-new_state));
|
||||
if (success) {
|
||||
if (new_state == run_compilation) {
|
||||
@ -359,7 +359,7 @@ public:
|
||||
static void handle_full_code_cache(int code_blob_type);
|
||||
// Ensures that warning is only printed once.
|
||||
static bool should_print_compiler_warning() {
|
||||
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
|
||||
jint old = Atomic::cmpxchg(&_print_compilation_warning, 0, 1);
|
||||
return old == 0;
|
||||
}
|
||||
// Return total compilation ticks
|
||||
|
@ -156,7 +156,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
// Allocation successful, update counters
|
||||
{
|
||||
size_t last = _last_counter_update;
|
||||
if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
|
||||
if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
|
||||
_monitoring_support->update_counters();
|
||||
}
|
||||
}
|
||||
@ -164,7 +164,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
// ...and print the occupancy line, if needed
|
||||
{
|
||||
size_t last = _last_heap_print;
|
||||
if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
|
||||
if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
|
||||
print_heap_info(used);
|
||||
print_metaspace_info();
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ void G1CodeRootSet::allocate_small_table() {
|
||||
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
|
||||
for (;;) {
|
||||
table->_purge_next = _purge_list;
|
||||
G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
|
||||
G1CodeRootSetTable* old = Atomic::cmpxchg(&_purge_list, table->_purge_next, table);
|
||||
if (old == table->_purge_next) {
|
||||
break;
|
||||
}
|
||||
|
@ -3377,7 +3377,7 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
|
||||
BufferNode* next = Atomic::load(&_nodes);
|
||||
while (next != NULL) {
|
||||
BufferNode* node = next;
|
||||
next = Atomic::cmpxchg(node->next(), &_nodes, node);
|
||||
next = Atomic::cmpxchg(&_nodes, node, node->next());
|
||||
if (next == node) {
|
||||
cl->apply_to_buffer(node, buffer_size, worker_id);
|
||||
next = node->next();
|
||||
|
@ -1906,7 +1906,7 @@ G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
|
||||
|
||||
// Is the gap between reading the finger and doing the CAS too long?
|
||||
HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
|
||||
HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
|
||||
if (res == finger && curr_region != NULL) {
|
||||
// we succeeded
|
||||
HeapWord* bottom = curr_region->bottom();
|
||||
|
@ -79,7 +79,7 @@ uint G1FreeIdSet::claim_par_id() {
|
||||
index = head_index(old_head);
|
||||
assert(index < _size, "invariant");
|
||||
uintx new_head = make_head(_next[index], old_head);
|
||||
new_head = Atomic::cmpxchg(new_head, &_head, old_head);
|
||||
new_head = Atomic::cmpxchg(&_head, old_head, new_head);
|
||||
if (new_head == old_head) break;
|
||||
old_head = new_head;
|
||||
}
|
||||
@ -95,7 +95,7 @@ void G1FreeIdSet::release_par_id(uint id) {
|
||||
while (true) {
|
||||
_next[index] = head_index(old_head);
|
||||
uintx new_head = make_head(index, old_head);
|
||||
new_head = Atomic::cmpxchg(new_head, &_head, old_head);
|
||||
new_head = Atomic::cmpxchg(&_head, old_head, new_head);
|
||||
if (new_head == old_head) break;
|
||||
old_head = new_head;
|
||||
}
|
||||
|
@ -78,9 +78,9 @@ CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
|
||||
// card_ptr in favor of the other option, which would be starting over. This
|
||||
// should be OK since card_ptr will likely be the older card already when/if
|
||||
// this ever happens.
|
||||
CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
|
||||
&_hot_cache[masked_index],
|
||||
current_ptr);
|
||||
CardValue* previous_ptr = Atomic::cmpxchg(&_hot_cache[masked_index],
|
||||
current_ptr,
|
||||
card_ptr);
|
||||
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ bool JVMCICleaningTask::claim_cleaning_task() {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Atomic::cmpxchg(1, &_cleaning_claimed, 0) == 0;
|
||||
return Atomic::cmpxchg(&_cleaning_claimed, 0, 1) == 0;
|
||||
}
|
||||
|
||||
void JVMCICleaningTask::work(bool unloading_occurred) {
|
||||
|
@ -177,7 +177,7 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
|
||||
bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
|
||||
if (marked_as_dirty) {
|
||||
uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
|
||||
_buffer[allocated] = region;
|
||||
@ -437,7 +437,7 @@ public:
|
||||
if (_collection_set_iter_state[region]) {
|
||||
return false;
|
||||
}
|
||||
return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false);
|
||||
return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
|
||||
}
|
||||
|
||||
bool has_cards_to_scan(uint region) {
|
||||
@ -1137,7 +1137,7 @@ public:
|
||||
if (_initial_evacuation &&
|
||||
p->fast_reclaim_humongous_candidates() > 0 &&
|
||||
!_fast_reclaim_handled &&
|
||||
!Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) {
|
||||
!Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
|
||||
|
||||
G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
|
||||
|
||||
|
@ -61,7 +61,7 @@ inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
|
||||
size_t want_to_allocate = MIN2(available, desired_word_size);
|
||||
if (want_to_allocate >= min_word_size) {
|
||||
HeapWord* new_top = obj + want_to_allocate;
|
||||
HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj);
|
||||
HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top);
|
||||
// result can be one of two:
|
||||
// the old top value: the exchange succeeded
|
||||
// otherwise: the new value of the top is returned.
|
||||
|
@ -609,6 +609,6 @@ bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
|
||||
|
||||
bool HeapRegionClaimer::claim_region(uint region_index) {
|
||||
assert(region_index < _n_regions, "Invalid index.");
|
||||
uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
|
||||
uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
|
||||
return old_val == Unclaimed;
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ PerRegionTable* PerRegionTable::alloc(HeapRegion* hr) {
|
||||
PerRegionTable* fl = _free_list;
|
||||
while (fl != NULL) {
|
||||
PerRegionTable* nxt = fl->next();
|
||||
PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
|
||||
PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, nxt);
|
||||
if (res == fl) {
|
||||
fl->init(hr, true);
|
||||
return fl;
|
||||
|
@ -229,7 +229,7 @@ public:
|
||||
while (true) {
|
||||
PerRegionTable* fl = _free_list;
|
||||
last->set_next(fl);
|
||||
PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
|
||||
PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, prt);
|
||||
if (res == fl) {
|
||||
return;
|
||||
}
|
||||
|
@ -864,7 +864,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||
if (p != NULL) {
|
||||
HeapWord* cur_top, *cur_chunk_top = p + size;
|
||||
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
|
||||
if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
|
||||
if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
|
||||
HeapWord* obj = top();
|
||||
if (pointer_delta(end(), obj) >= size) {
|
||||
HeapWord* new_top = obj + size;
|
||||
HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
|
||||
HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
|
||||
// result can be one of two:
|
||||
// the old top value: the exchange succeeded
|
||||
// otherwise: the new value of the top is returned.
|
||||
@ -213,7 +213,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
|
||||
// Try to deallocate previous allocation. Returns true upon success.
|
||||
bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
||||
HeapWord* expected_top = obj + size;
|
||||
return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
|
||||
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
|
||||
|
@ -584,7 +584,7 @@ inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
|
||||
#ifdef ASSERT
|
||||
HeapWord* tmp = _highest_ref;
|
||||
while (addr > tmp) {
|
||||
tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
|
||||
tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr);
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
}
|
||||
@ -592,7 +592,7 @@ inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
|
||||
inline bool ParallelCompactData::RegionData::claim()
|
||||
{
|
||||
const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
|
||||
const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
|
||||
const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los);
|
||||
return old == los;
|
||||
}
|
||||
|
||||
|
@ -211,13 +211,13 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
|
||||
return Raw::atomic_cmpxchg(new_value, addr, compare_value);
|
||||
static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) {
|
||||
return Raw::atomic_cmpxchg(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
|
||||
return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
|
||||
static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
|
||||
return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -261,12 +261,12 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
|
||||
return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -297,8 +297,8 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -139,7 +139,7 @@ inline bool ClearNoncleanCardWrapper::clear_card_parallel(CardValue* entry) {
|
||||
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|
||||
|| _ct->is_prev_youngergen_card_val(entry_val)) {
|
||||
CardValue res =
|
||||
Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
|
||||
Atomic::cmpxchg(entry, entry_val, CardTableRS::clean_card_val());
|
||||
if (res == entry_val) {
|
||||
break;
|
||||
} else {
|
||||
@ -264,7 +264,7 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
|
||||
// Mark it as both cur and prev youngergen; card cleaning thread will
|
||||
// eventually remove the previous stuff.
|
||||
CardValue new_val = cur_youngergen_and_prev_nonclean_card;
|
||||
CardValue res = Atomic::cmpxchg(new_val, entry, entry_val);
|
||||
CardValue res = Atomic::cmpxchg(entry, entry_val, new_val);
|
||||
// Did the CAS succeed?
|
||||
if (res == entry_val) return;
|
||||
// Otherwise, retry, to see the new value.
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
template <typename T>
|
||||
static void oop_store_in_heap(T* addr, oop value);
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
|
||||
@ -98,8 +98,8 @@ public:
|
||||
return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
|
||||
}
|
||||
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
|
||||
return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
@ -67,10 +67,10 @@ oop_store_in_heap(T* addr, oop value) {
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
|
||||
bs->template write_ref_field_pre<decorators>(addr);
|
||||
oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
|
||||
oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
if (result == compare_value) {
|
||||
bs->template write_ref_field_post<decorators>(addr, new_value);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ oop* OopStorage::Block::allocate() {
|
||||
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
|
||||
unsigned index = count_trailing_zeros(~allocated);
|
||||
uintx new_value = allocated | bitmask_for_index(index);
|
||||
uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
|
||||
uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value);
|
||||
if (fetched == allocated) {
|
||||
return get_pointer(index); // CAS succeeded; return entry for index.
|
||||
}
|
||||
@ -595,7 +595,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
while (true) {
|
||||
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
|
||||
uintx new_value = old_allocated ^ releasing;
|
||||
uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
|
||||
uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
|
||||
if (fetched == old_allocated) break; // Successful update.
|
||||
old_allocated = fetched; // Retry with updated bitmask.
|
||||
}
|
||||
@ -614,12 +614,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
// then someone else has made such a claim and the deferred update has not
|
||||
// yet been processed and will include our change, so we don't need to do
|
||||
// anything further.
|
||||
if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
|
||||
if (Atomic::replace_if_null(&_deferred_updates_next, this)) {
|
||||
// Successfully claimed. Push, with self-loop for end-of-list.
|
||||
Block* head = owner->_deferred_updates;
|
||||
while (true) {
|
||||
_deferred_updates_next = (head == NULL) ? this : head;
|
||||
Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head);
|
||||
Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this);
|
||||
if (fetched == head) break; // Successful update.
|
||||
head = fetched; // Retry with updated head.
|
||||
}
|
||||
@ -651,7 +651,7 @@ bool OopStorage::reduce_deferred_updates() {
|
||||
// Try atomic pop of block from list.
|
||||
Block* tail = block->deferred_updates_next();
|
||||
if (block == tail) tail = NULL; // Handle self-loop end marker.
|
||||
Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
|
||||
Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail);
|
||||
if (fetched == block) break; // Update successful.
|
||||
block = fetched; // Retry with updated block.
|
||||
}
|
||||
@ -825,7 +825,7 @@ bool OopStorage::has_cleanup_work_and_reset() {
|
||||
// Set the request flag false and return its old value.
|
||||
// Needs to be atomic to avoid dropping a concurrent request.
|
||||
// Can't use Atomic::xchg, which may not support bool.
|
||||
return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
|
||||
return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
|
||||
}
|
||||
|
||||
// Record that cleanup is needed, without notifying the Service thread.
|
||||
|
@ -94,7 +94,7 @@ void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, i
|
||||
}
|
||||
}
|
||||
|
||||
} while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
|
||||
} while (Atomic::cmpxchg(&_claimed_nmethod, first, last.method()) != first);
|
||||
}
|
||||
|
||||
void CodeCacheUnloadingTask::work(uint worker_id) {
|
||||
@ -130,7 +130,7 @@ bool KlassCleaningTask::claim_clean_klass_tree_task() {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0;
|
||||
return Atomic::cmpxchg(&_clean_klass_tree_claimed, 0, 1) == 0;
|
||||
}
|
||||
|
||||
InstanceKlass* KlassCleaningTask::claim_next_klass() {
|
||||
|
@ -197,7 +197,7 @@ void BufferNode::Allocator::release(BufferNode* node) {
|
||||
bool BufferNode::Allocator::try_transfer_pending() {
|
||||
// Attempt to claim the lock.
|
||||
if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
|
||||
Atomic::cmpxchg(true, &_transfer_lock, false)) {
|
||||
Atomic::cmpxchg(&_transfer_lock, false, true)) {
|
||||
return false;
|
||||
}
|
||||
// Have the lock; perform the transfer.
|
||||
|
@ -1031,7 +1031,7 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// The last ref must have its discovered field pointing to itself.
|
||||
oop next_discovered = (current_head != NULL) ? current_head : obj;
|
||||
|
||||
oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
|
||||
oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
|
||||
|
||||
if (retest == NULL) {
|
||||
// This thread just won the right to enqueue the object.
|
||||
|
@ -136,7 +136,7 @@ static void increment_count(volatile size_t* cfptr, size_t threshold) {
|
||||
value += 2;
|
||||
assert(value > old, "overflow");
|
||||
if (value > threshold) value |= 1;
|
||||
value = Atomic::cmpxchg(value, cfptr, old);
|
||||
value = Atomic::cmpxchg(cfptr, old, value);
|
||||
} while (value != old);
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ static void decrement_count(volatile size_t* cfptr) {
|
||||
old = value;
|
||||
value -= 2;
|
||||
if (value <= 1) value = 0;
|
||||
value = Atomic::cmpxchg(value, cfptr, old);
|
||||
value = Atomic::cmpxchg(cfptr, old, value);
|
||||
} while (value != old);
|
||||
}
|
||||
|
||||
|
@ -554,7 +554,7 @@ inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
|
||||
HeapWord* obj = top();
|
||||
if (pointer_delta(end(), obj) >= size) {
|
||||
HeapWord* new_top = obj + size;
|
||||
HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
|
||||
HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
|
||||
// result can be one of two:
|
||||
// the old top value: the exchange succeeded
|
||||
// otherwise: the new value of the top is returned.
|
||||
|
@ -244,7 +244,7 @@ bool ParallelTaskTerminator::complete_or_exit_termination() {
|
||||
return true;
|
||||
}
|
||||
expected_value = current_offered;
|
||||
} while ((current_offered = Atomic::cmpxchg(current_offered - 1, &_offered_termination, current_offered)) != expected_value);
|
||||
} while ((current_offered = Atomic::cmpxchg(&_offered_termination, current_offered, current_offered - 1)) != expected_value);
|
||||
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
return false;
|
||||
|
@ -321,7 +321,7 @@ GenericTaskQueueSet<T, F>::steal(uint queue_num, E& t) {
|
||||
|
||||
template <unsigned int N, MEMFLAGS F>
|
||||
inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
|
||||
return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
|
||||
return Atomic::cmpxchg(&_data, old_age._data, new_age._data);
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F, unsigned int N>
|
||||
|
@ -426,7 +426,7 @@ bool SubTasksDone::try_claim_task(uint t) {
|
||||
assert(t < _n_tasks, "bad task id.");
|
||||
uint old = _tasks[t];
|
||||
if (old == 0) {
|
||||
old = Atomic::cmpxchg(1u, &_tasks[t], 0u);
|
||||
old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
|
||||
}
|
||||
bool res = old == 0;
|
||||
#ifdef ASSERT
|
||||
@ -443,7 +443,7 @@ void SubTasksDone::all_tasks_completed(uint n_threads) {
|
||||
uint old;
|
||||
do {
|
||||
old = observed;
|
||||
observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
|
||||
observed = Atomic::cmpxchg(&_threads_completed, old, old+1);
|
||||
} while (observed != old);
|
||||
// If this was the last thread checking in, clear the tasks.
|
||||
uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
|
||||
@ -471,7 +471,7 @@ bool SequentialSubTasksDone::valid() {
|
||||
bool SequentialSubTasksDone::try_claim_task(uint& t) {
|
||||
t = _n_claimed;
|
||||
while (t < _n_tasks) {
|
||||
uint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
|
||||
uint res = Atomic::cmpxchg(&_n_claimed, t, t+1);
|
||||
if (res == t) {
|
||||
return true;
|
||||
}
|
||||
@ -483,7 +483,7 @@ bool SequentialSubTasksDone::try_claim_task(uint& t) {
|
||||
bool SequentialSubTasksDone::all_tasks_completed() {
|
||||
uint complete = _n_completed;
|
||||
while (true) {
|
||||
uint res = Atomic::cmpxchg(complete+1, &_n_completed, complete);
|
||||
uint res = Atomic::cmpxchg(&_n_completed, complete, complete+1);
|
||||
if (res == complete) {
|
||||
break;
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ public:
|
||||
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value);
|
||||
@ -160,8 +160,8 @@ public:
|
||||
static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
@ -184,7 +184,7 @@ public:
|
||||
static void oop_store_not_in_heap(T* addr, oop value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
|
||||
|
@ -99,12 +99,12 @@ inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_st
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
oop res;
|
||||
oop expected = compare_value;
|
||||
do {
|
||||
compare_value = expected;
|
||||
res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
|
||||
res = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
expected = res;
|
||||
} while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected)));
|
||||
if (res != NULL) {
|
||||
@ -116,9 +116,9 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value) {
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
|
||||
oop result = oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
(result == compare_value) &&
|
||||
@ -130,15 +130,15 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, addr, compare_value);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(addr, compare_value, new_value);
|
||||
keep_alive_if_weak(decorators, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
|
||||
oop result = oop_atomic_cmpxchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ void ShenandoahEvacUpdateOopStorageRootsClosure::do_oop(oop* p) {
|
||||
resolved = _heap->evacuate_object(obj, _thread);
|
||||
}
|
||||
|
||||
Atomic::cmpxchg(resolved, p, obj);
|
||||
Atomic::cmpxchg(p, obj, resolved);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
|
||||
int current = count++;
|
||||
if ((current & stride_mask) == 0) {
|
||||
process_block = (current >= _claimed_idx) &&
|
||||
(Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
|
||||
(Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
|
||||
}
|
||||
if (process_block) {
|
||||
if (cb->is_alive()) {
|
||||
|
@ -146,7 +146,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
|
||||
|
||||
while(index < num_regions) {
|
||||
if (is_in(index)) {
|
||||
jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
|
||||
jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
|
||||
assert(cur >= (jint)saved_current, "Must move forward");
|
||||
if (cur == saved_current) {
|
||||
assert(is_in(index), "Invariant");
|
||||
|
@ -59,7 +59,7 @@ void ShenandoahEvacOOMHandler::enter_evacuation() {
|
||||
}
|
||||
|
||||
while (true) {
|
||||
jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac);
|
||||
jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, threads_in_evac + 1);
|
||||
if (other == threads_in_evac) {
|
||||
// Success: caller may safely enter evacuation
|
||||
DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), true));
|
||||
@ -98,8 +98,7 @@ void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() {
|
||||
|
||||
jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
|
||||
while (true) {
|
||||
jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK,
|
||||
&_threads_in_evac, threads_in_evac);
|
||||
jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, (threads_in_evac - 1) | OOM_MARKER_MASK);
|
||||
if (other == threads_in_evac) {
|
||||
// Success: wait for other threads to get out of the protocol and return.
|
||||
wait_for_no_evac_threads();
|
||||
|
@ -131,20 +131,20 @@ inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
|
||||
|
||||
inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
|
||||
assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
|
||||
return (oop) Atomic::cmpxchg(n, addr, c);
|
||||
return (oop) Atomic::cmpxchg(addr, c, n);
|
||||
}
|
||||
|
||||
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
|
||||
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
|
||||
narrowOop val = CompressedOops::encode(n);
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c));
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, c, val));
|
||||
}
|
||||
|
||||
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
|
||||
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
|
||||
narrowOop cmp = CompressedOops::encode(c);
|
||||
narrowOop val = CompressedOops::encode(n);
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, cmp, val));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -76,7 +76,7 @@ void ShenandoahHeapRegionCounters::update() {
|
||||
jlong current = os::javaTimeMillis();
|
||||
jlong last = _last_sample_millis;
|
||||
if (current - last > ShenandoahRegionSamplingRate &&
|
||||
Atomic::cmpxchg(current, &_last_sample_millis, last) == last) {
|
||||
Atomic::cmpxchg(&_last_sample_millis, last, current) == last) {
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
jlong status = 0;
|
||||
|
@ -97,7 +97,7 @@ ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() {
|
||||
|
||||
while(index < num_regions) {
|
||||
if (_set->is_in(index)) {
|
||||
jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
|
||||
jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
|
||||
assert(cur >= (jint)saved_current, "Must move forward");
|
||||
if (cur == saved_current) {
|
||||
assert(_set->is_in(index), "Invariant");
|
||||
|
@ -210,7 +210,7 @@ bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
|
||||
return false;
|
||||
}
|
||||
new_val = cur - tax;
|
||||
} while (Atomic::cmpxchg(new_val, &_budget, cur) != cur);
|
||||
} while (Atomic::cmpxchg(&_budget, cur, new_val) != cur);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ typedef struct ShenandoahSharedFlag {
|
||||
if (is_set()) {
|
||||
return false;
|
||||
}
|
||||
ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET);
|
||||
ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
|
||||
return old == UNSET; // success
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ typedef struct ShenandoahSharedFlag {
|
||||
if (!is_set()) {
|
||||
return false;
|
||||
}
|
||||
ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET);
|
||||
ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
|
||||
return old == SET; // success
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ typedef struct ShenandoahSharedBitmap {
|
||||
}
|
||||
|
||||
ShenandoahSharedValue nv = ov | mask_val;
|
||||
if (Atomic::cmpxchg(nv, &value, ov) == ov) {
|
||||
if (Atomic::cmpxchg(&value, ov, nv) == ov) {
|
||||
// successfully set
|
||||
return;
|
||||
}
|
||||
@ -143,7 +143,7 @@ typedef struct ShenandoahSharedBitmap {
|
||||
}
|
||||
|
||||
ShenandoahSharedValue nv = ov & ~mask_val;
|
||||
if (Atomic::cmpxchg(nv, &value, ov) == ov) {
|
||||
if (Atomic::cmpxchg(&value, ov, nv) == ov) {
|
||||
// successfully unset
|
||||
return;
|
||||
}
|
||||
@ -221,7 +221,7 @@ struct ShenandoahSharedEnumFlag {
|
||||
T cmpxchg(T new_value, T expected) {
|
||||
assert (new_value >= 0, "sanity");
|
||||
assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
|
||||
return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected);
|
||||
return (T)Atomic::cmpxchg(&value, (ShenandoahSharedValue)expected, (ShenandoahSharedValue)new_value);
|
||||
}
|
||||
|
||||
volatile ShenandoahSharedValue* addr_of() {
|
||||
|
@ -48,7 +48,7 @@ inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_
|
||||
}
|
||||
|
||||
// Heal
|
||||
const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr);
|
||||
const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
|
||||
if (prev_addr == addr) {
|
||||
// Success
|
||||
return;
|
||||
|
@ -70,8 +70,8 @@ public:
|
||||
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
@ -91,7 +91,7 @@ public:
|
||||
static oop oop_load_not_in_heap(T* addr);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
|
||||
|
@ -132,16 +132,16 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_in_heap
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
ZBarrier::load_barrier_on_oop_field(addr);
|
||||
return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
|
||||
return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
@ -150,7 +150,7 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxc
|
||||
// with the motivation that if you're doing Unsafe operations on a Reference.referent
|
||||
// field, then you're on your own anyway.
|
||||
ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
|
||||
return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
|
||||
return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
@ -222,11 +222,11 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
|
||||
return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
|
@ -55,7 +55,7 @@ inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) {
|
||||
inc_live = false;
|
||||
return false;
|
||||
}
|
||||
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
|
||||
const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val);
|
||||
if (cur_val == old_val) {
|
||||
// Success
|
||||
const bm_word_t marked_mask = bit_mask(bit);
|
||||
|
@ -63,7 +63,7 @@ inline bool ZForwarding::inc_refcount() {
|
||||
while (refcount > 0) {
|
||||
const uint32_t old_refcount = refcount;
|
||||
const uint32_t new_refcount = old_refcount + 1;
|
||||
const uint32_t prev_refcount = Atomic::cmpxchg(new_refcount, &_refcount, old_refcount);
|
||||
const uint32_t prev_refcount = Atomic::cmpxchg(&_refcount, old_refcount, new_refcount);
|
||||
if (prev_refcount == old_refcount) {
|
||||
return true;
|
||||
}
|
||||
@ -139,7 +139,7 @@ inline uintptr_t ZForwarding::insert(uintptr_t from_index, uintptr_t to_offset,
|
||||
const ZForwardingEntry old_entry; // Empty
|
||||
|
||||
for (;;) {
|
||||
const ZForwardingEntry prev_entry = Atomic::cmpxchg(new_entry, entries() + *cursor, old_entry);
|
||||
const ZForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry);
|
||||
if (!prev_entry.populated()) {
|
||||
// Success
|
||||
return to_offset;
|
||||
|
@ -58,7 +58,7 @@ void ZLiveMap::reset(size_t index) {
|
||||
seqnum != ZGlobalSeqNum;
|
||||
seqnum = Atomic::load_acquire(&_seqnum)) {
|
||||
if ((seqnum != seqnum_initializing) &&
|
||||
(Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
|
||||
(Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) {
|
||||
// Reset marking information
|
||||
_live_bytes = 0;
|
||||
_live_objects = 0;
|
||||
|
@ -121,7 +121,7 @@ inline void ZStackList<T>::push(T* stack) {
|
||||
for (;;) {
|
||||
decode_versioned_pointer(vstack, stack->next_addr(), &version);
|
||||
T* const new_vstack = encode_versioned_pointer(stack, version + 1);
|
||||
T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
|
||||
T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
|
||||
if (prev_vstack == vstack) {
|
||||
// Success
|
||||
break;
|
||||
@ -145,7 +145,7 @@ inline T* ZStackList<T>::pop() {
|
||||
}
|
||||
|
||||
T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
|
||||
T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
|
||||
T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
|
||||
if (prev_vstack == vstack) {
|
||||
// Success
|
||||
return stack;
|
||||
|
@ -70,7 +70,7 @@ uintptr_t ZMarkStackSpace::alloc_space(size_t size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top);
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(&_top, top, new_top);
|
||||
if (prev_top == top) {
|
||||
// Success
|
||||
return top;
|
||||
|
@ -49,7 +49,7 @@ inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) {
|
||||
}
|
||||
|
||||
const uint new_nworking = nworking + 1;
|
||||
const uint prev_nworking = Atomic::cmpxchg(new_nworking, nworking_stage, nworking);
|
||||
const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking);
|
||||
if (prev_nworking == nworking) {
|
||||
// Success
|
||||
return true;
|
||||
|
@ -97,7 +97,7 @@ uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
|
||||
|
||||
retry:
|
||||
// Install new page
|
||||
ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
|
||||
ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page);
|
||||
if (prev_page != page) {
|
||||
if (prev_page == NULL) {
|
||||
// Previous page was retired, retry installing the new page
|
||||
|
@ -96,7 +96,7 @@ inline void ZPhantomCleanOopClosure::do_oop(oop* p) {
|
||||
// oop here again (the object would be strongly live and we would
|
||||
// not consider clearing such oops), so therefore we don't have an
|
||||
// ABA problem here.
|
||||
Atomic::cmpxchg(oop(NULL), p, obj);
|
||||
Atomic::cmpxchg(p, obj, oop(NULL));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ inline uintptr_t ZPage::alloc_object_atomic(size_t size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr);
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top);
|
||||
if (prev_top == addr) {
|
||||
// Success
|
||||
return ZAddress::good(addr);
|
||||
@ -299,7 +299,7 @@ inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top);
|
||||
const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top);
|
||||
if (prev_top == old_top) {
|
||||
// Success
|
||||
return true;
|
||||
|
@ -91,7 +91,7 @@ ZSerialOopsDo<T, F>::ZSerialOopsDo(T* iter) :
|
||||
|
||||
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
|
||||
void ZSerialOopsDo<T, F>::oops_do(ZRootsIteratorClosure* cl) {
|
||||
if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
|
||||
if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) {
|
||||
(_iter->*F)(cl);
|
||||
}
|
||||
}
|
||||
@ -118,7 +118,7 @@ ZSerialWeakOopsDo<T, F>::ZSerialWeakOopsDo(T* iter) :
|
||||
|
||||
template <typename T, void (T::*F)(BoolObjectClosure*, ZRootsIteratorClosure*)>
|
||||
void ZSerialWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
|
||||
if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
|
||||
if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) {
|
||||
(_iter->*F)(is_alive, cl);
|
||||
}
|
||||
}
|
||||
|
@ -772,7 +772,7 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
|
||||
}
|
||||
|
||||
const uint64_t new_max = value;
|
||||
const uint64_t prev_max = Atomic::cmpxchg(new_max, &cpu_data->_max, max);
|
||||
const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max);
|
||||
if (prev_max == max) {
|
||||
// Success
|
||||
break;
|
||||
|
@ -2163,7 +2163,7 @@ run:
|
||||
HeapWord* compare_to = *Universe::heap()->top_addr();
|
||||
HeapWord* new_top = compare_to + obj_size;
|
||||
if (new_top <= *Universe::heap()->end_addr()) {
|
||||
if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
|
||||
if (Atomic::cmpxchg(Universe::heap()->top_addr(), compare_to, new_top) != compare_to) {
|
||||
goto retry;
|
||||
}
|
||||
result = (oop) compare_to;
|
||||
|
@ -452,7 +452,7 @@ OopMapCacheEntry* OopMapCache::entry_at(int i) const {
|
||||
}
|
||||
|
||||
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
|
||||
return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
|
||||
return Atomic::cmpxchg(&_array[i % _size], old, entry) == old;
|
||||
}
|
||||
|
||||
void OopMapCache::flush() {
|
||||
@ -564,7 +564,7 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
|
||||
do {
|
||||
head = _old_entries;
|
||||
entry->_next = head;
|
||||
success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
|
||||
success = Atomic::cmpxchg(&_old_entries, head, entry) == head;
|
||||
} while (!success);
|
||||
|
||||
if (log_is_enabled(Debug, interpreter, oopmap)) {
|
||||
|
@ -93,7 +93,7 @@ static volatile int _lock = 0;
|
||||
|
||||
ObjectSampler* ObjectSampler::acquire() {
|
||||
assert(is_created(), "invariant");
|
||||
while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
|
||||
while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
|
||||
return _instance;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ static traceid atomic_inc(traceid volatile* const dest) {
|
||||
do {
|
||||
compare_value = *dest;
|
||||
exchange_value = compare_value + 1;
|
||||
} while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
|
||||
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ inline void set_bits_cas_form(jbyte bits, jbyte* const dest) {
|
||||
do {
|
||||
const jbyte current = *dest;
|
||||
const jbyte new_value = op(current, bits);
|
||||
if (Atomic::cmpxchg(new_value, dest, current) == current) {
|
||||
if (Atomic::cmpxchg(dest, current, new_value) == current) {
|
||||
return;
|
||||
}
|
||||
} while (true);
|
||||
|
@ -418,7 +418,7 @@ static bool prepare_for_emergency_dump(Thread* thread) {
|
||||
static volatile int jfr_shutdown_lock = 0;
|
||||
|
||||
static bool guard_reentrancy() {
|
||||
return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
|
||||
return Atomic::cmpxchg(&jfr_shutdown_lock, 0, 1) == 0;
|
||||
}
|
||||
|
||||
class JavaThreadInVM : public StackObj {
|
||||
|
@ -87,7 +87,7 @@ void JfrPostBox::deposit(int new_messages) {
|
||||
const int current_msgs = Atomic::load(&_messages);
|
||||
// OR the new message
|
||||
const int exchange_value = current_msgs | new_messages;
|
||||
const int result = Atomic::cmpxchg(exchange_value, &_messages, current_msgs);
|
||||
const int result = Atomic::cmpxchg(&_messages, current_msgs, exchange_value);
|
||||
if (result == current_msgs) {
|
||||
return;
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ void JfrBuffer::set_top(const u1* new_top) {
|
||||
const u1* JfrBuffer::concurrent_top() const {
|
||||
do {
|
||||
const u1* current_top = stable_top();
|
||||
if (Atomic::cmpxchg(MUTEX_CLAIM, &_top, current_top) == current_top) {
|
||||
if (Atomic::cmpxchg(&_top, current_top, MUTEX_CLAIM) == current_top) {
|
||||
return current_top;
|
||||
}
|
||||
} while (true);
|
||||
@ -128,13 +128,13 @@ void JfrBuffer::acquire(const void* id) {
|
||||
const void* current_id;
|
||||
do {
|
||||
current_id = Atomic::load(&_identity);
|
||||
} while (current_id != NULL || Atomic::cmpxchg(id, &_identity, current_id) != current_id);
|
||||
} while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
|
||||
}
|
||||
|
||||
bool JfrBuffer::try_acquire(const void* id) {
|
||||
assert(id != NULL, "invariant");
|
||||
const void* const current_id = Atomic::load(&_identity);
|
||||
return current_id == NULL && Atomic::cmpxchg(id, &_identity, current_id) == current_id;
|
||||
return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
|
||||
}
|
||||
|
||||
void JfrBuffer::release() {
|
||||
|
@ -34,7 +34,7 @@ static jlong atomic_add(size_t value, size_t volatile* const dest) {
|
||||
do {
|
||||
compare_value = *dest;
|
||||
exchange_value = compare_value + value;
|
||||
} while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
|
||||
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
@ -45,7 +45,7 @@ static jlong atomic_dec(size_t volatile* const dest) {
|
||||
compare_value = *dest;
|
||||
assert(compare_value >= 1, "invariant");
|
||||
exchange_value = compare_value - 1;
|
||||
} while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
|
||||
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
@ -137,4 +137,3 @@ bool JfrStorageControl::should_scavenge() const {
|
||||
void JfrStorageControl::set_scavenge_threshold(size_t number_of_dead_buffers) {
|
||||
_scavenge_threshold = number_of_dead_buffers;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ static jlong atomic_add_jlong(jlong value, jlong volatile* const dest) {
|
||||
do {
|
||||
compare_value = *dest;
|
||||
exchange_value = compare_value + value;
|
||||
} while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
|
||||
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ class JfrTryLock {
|
||||
bool _has_lock;
|
||||
|
||||
public:
|
||||
JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(1, lock, 0) == 0) {}
|
||||
JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(lock, 0, 1) == 0) {}
|
||||
|
||||
~JfrTryLock() {
|
||||
if (_has_lock) {
|
||||
|
@ -1586,7 +1586,7 @@ class AttachDetach : public StackObj {
|
||||
jint res = main_vm.AttachCurrentThread((void**)&hotspotEnv, NULL);
|
||||
_attached = res == JNI_OK;
|
||||
static volatile int report_attach_error = 0;
|
||||
if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(1, &report_attach_error, 0) == 0) {
|
||||
if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(&report_attach_error, 0, 1) == 0) {
|
||||
// Only report an attach error once
|
||||
jio_printf("Warning: attaching current thread to VM failed with %d (future attach errors are suppressed)\n", res);
|
||||
}
|
||||
@ -1599,7 +1599,7 @@ class AttachDetach : public StackObj {
|
||||
extern struct JavaVM_ main_vm;
|
||||
jint res = main_vm.DetachCurrentThread();
|
||||
static volatile int report_detach_error = 0;
|
||||
if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(1, &report_detach_error, 0) == 0) {
|
||||
if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(&report_detach_error, 0, 1) == 0) {
|
||||
// Only report an attach error once
|
||||
jio_printf("Warning: detaching current thread from VM failed with %d (future attach errors are suppressed)\n", res);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user