8234739: Harmonize parameter order in Atomic - xchg
Reviewed-by: rehn, dholmes
This commit is contained in:
parent
213af1161a
commit
3d426623bf
@ -488,7 +488,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
|
||||
// Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
|
||||
// used by Atomic::add(volatile jint* dest, jint exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
//
|
||||
|
@ -585,7 +585,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
||||
// Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
|
||||
// used by Atomic::xchg(volatile jint* dest, jint exchange_value)
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
|
@ -430,7 +430,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
|
||||
// Implementation of int32_t atomic_xchg(int32_t exchange_value, volatile int32_t* dest)
|
||||
// used by Atomic::xchg(volatile int32_t* dest, int32_t exchange_value)
|
||||
//
|
||||
// xchg exists as far back as 8086, lock needed for MP only
|
||||
// Stack layout immediately after call:
|
||||
|
@ -552,7 +552,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
|
||||
// Implementation of jint atomic_xchg(jint add_value, volatile jint* dest)
|
||||
// used by Atomic::xchg(volatile jint* dest, jint exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
@ -571,7 +572,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
|
||||
// Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest)
|
||||
// used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
|
||||
//
|
||||
// Arguments :
|
||||
// c_rarg0: exchange_value
|
||||
|
@ -1998,7 +1998,7 @@ void os::PlatformEvent::unpark() {
|
||||
// but only in the correctly written condition checking loops of ObjectMonitor,
|
||||
// Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
|
||||
|
||||
if (Atomic::xchg(1, &_event) >= 0) return;
|
||||
if (Atomic::xchg(&_event, 1) >= 0) return;
|
||||
|
||||
int status = pthread_mutex_lock(_mutex);
|
||||
assert_status(status == 0, status, "mutex_lock");
|
||||
@ -2046,7 +2046,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
// Return immediately if a permit is available.
|
||||
// We depend on Atomic::xchg() having full barrier semantics
|
||||
// since we are doing a lock-free update to _counter.
|
||||
if (Atomic::xchg(0, &_counter) > 0) return;
|
||||
if (Atomic::xchg(&_counter, 0) > 0) return;
|
||||
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread->is_Java_thread(), "Must be JavaThread");
|
||||
|
@ -4797,7 +4797,7 @@ void os::PlatformEvent::unpark() {
|
||||
// from the first park() call after an unpark() call which will help
|
||||
// shake out uses of park() and unpark() without condition variables.
|
||||
|
||||
if (Atomic::xchg(1, &_Event) >= 0) return;
|
||||
if (Atomic::xchg(&_Event, 1) >= 0) return;
|
||||
|
||||
// If the thread associated with the event was parked, wake it.
|
||||
// Wait for the thread assoc with the PlatformEvent to vacate.
|
||||
@ -4896,7 +4896,7 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
// Return immediately if a permit is available.
|
||||
// We depend on Atomic::xchg() having full barrier semantics
|
||||
// since we are doing a lock-free update to _counter.
|
||||
if (Atomic::xchg(0, &_counter) > 0) return;
|
||||
if (Atomic::xchg(&_counter, 0) > 0) return;
|
||||
|
||||
// Optional fast-exit: Check interrupt before trying to wait
|
||||
Thread* thread = Thread::current();
|
||||
|
@ -5236,7 +5236,7 @@ void os::PlatformEvent::unpark() {
|
||||
// from the first park() call after an unpark() call which will help
|
||||
// shake out uses of park() and unpark() without condition variables.
|
||||
|
||||
if (Atomic::xchg(1, &_Event) >= 0) return;
|
||||
if (Atomic::xchg(&_Event, 1) >= 0) return;
|
||||
|
||||
::SetEvent(_ParkHandle);
|
||||
}
|
||||
|
@ -153,8 +153,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
@ -192,8 +192,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
|
@ -51,8 +51,8 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
@ -107,8 +107,8 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
|
@ -197,15 +197,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
|
||||
return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);
|
||||
#else
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
// operation. Note that some platforms only support this with the
|
||||
@ -224,8 +224,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
|
@ -46,8 +46,8 @@ struct Atomic::PlatformAdd
|
||||
|
||||
template<size_t byte_size>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(byte_size == sizeof(T));
|
||||
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
|
||||
|
@ -86,11 +86,11 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
|
||||
return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value);
|
||||
}
|
||||
|
||||
|
||||
|
@ -153,8 +153,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
@ -192,8 +192,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg doesn't necessarily do an acquire
|
||||
|
@ -208,8 +208,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
|
||||
// replacement succeeded.
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T old;
|
||||
@ -232,8 +232,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order unused) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old;
|
||||
|
@ -83,8 +83,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
@ -98,8 +98,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
|
@ -51,8 +51,8 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
@ -108,7 +108,7 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
|
@ -59,8 +59,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
@ -78,8 +78,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
|
@ -45,8 +45,8 @@ struct Atomic::PlatformAdd {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "swap [%2],%0"
|
||||
@ -58,8 +58,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_value = *dest;
|
||||
|
@ -74,8 +74,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
@ -87,8 +87,8 @@ extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* d
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
|
@ -67,13 +67,15 @@
|
||||
addq %rdi, %rax
|
||||
.end
|
||||
|
||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
||||
// Implementation of jint _Atomic_xchg(jint exchange_value, volatile jint* dest)
|
||||
// used by Atomic::xchg(volatile jint* dest, jint exchange_value)
|
||||
.inline _Atomic_xchg,2
|
||||
xchgl (%rsi), %edi
|
||||
movl %edi, %eax
|
||||
.end
|
||||
|
||||
// Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest).
|
||||
// Implementation of jlong _Atomic_xchg(jlong exchange_value, volatile jlong* dest)
|
||||
// used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
|
||||
.inline _Atomic_xchg_long,2
|
||||
xchgq (%rsi), %rdi
|
||||
movq %rdi, %rax
|
||||
|
@ -79,11 +79,11 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
|
||||
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
|
||||
T volatile* dest, \
|
||||
inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \
|
||||
T exchange_value, \
|
||||
atomic_memory_order order) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
|
||||
return xchg_using_helper<StubType>(StubName, dest, exchange_value); \
|
||||
}
|
||||
|
||||
DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
|
||||
@ -127,8 +127,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedExchange
|
||||
|
@ -1900,7 +1900,7 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
|
||||
extract_state(_oops_do_mark_link) == claim_strong_request_tag,
|
||||
"must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
|
||||
|
||||
nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
|
||||
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
|
||||
// Self-loop if needed.
|
||||
if (old_head == NULL) {
|
||||
old_head = this;
|
||||
@ -1917,7 +1917,7 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
|
||||
void nmethod::oops_do_add_to_list_as_strong_done() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
|
||||
|
||||
nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
|
||||
nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
|
||||
// Self-loop if needed.
|
||||
if (old_head == NULL) {
|
||||
old_head = this;
|
||||
|
@ -350,7 +350,7 @@ public:
|
||||
static void disable_compilation_forever() {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
|
||||
Atomic::xchg(&_should_compile_new_jobs, jint(shutdown_compilation));
|
||||
}
|
||||
|
||||
static bool is_compilation_disabled_forever() {
|
||||
|
@ -221,13 +221,13 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_xchg_in_heap(T new_value, T* addr) {
|
||||
return Raw::atomic_xchg(new_value, addr);
|
||||
static T atomic_xchg_in_heap(T* addr, T new_value) {
|
||||
return Raw::atomic_xchg(addr, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
return Raw::atomic_xchg_at(new_value, base, offset);
|
||||
static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
return Raw::atomic_xchg_at(base, offset, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -270,12 +270,12 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
|
||||
return Raw::oop_atomic_xchg(new_value, addr);
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
return Raw::oop_atomic_xchg(addr, new_value);
|
||||
}
|
||||
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
return Raw::oop_atomic_xchg_at(new_value, base, offset);
|
||||
static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
|
||||
return Raw::oop_atomic_xchg_at(base, offset, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -302,8 +302,8 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
|
||||
return Raw::oop_atomic_xchg(new_value, addr);
|
||||
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
|
||||
return Raw::oop_atomic_xchg(addr, new_value);
|
||||
}
|
||||
|
||||
// Clone barrier support
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
template <typename T>
|
||||
static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
@ -94,8 +94,8 @@ public:
|
||||
oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
|
||||
}
|
||||
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
|
||||
static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
|
||||
return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
|
||||
}
|
||||
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
|
||||
|
@ -80,10 +80,10 @@ oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_atomic_xchg_in_heap(oop new_value, T* addr) {
|
||||
oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
|
||||
bs->template write_ref_field_pre<decorators>(addr);
|
||||
oop result = Raw::oop_atomic_xchg(new_value, addr);
|
||||
oop result = Raw::oop_atomic_xchg(addr, new_value);
|
||||
bs->template write_ref_field_post<decorators>(addr, new_value);
|
||||
return result;
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap_impl(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value);
|
||||
|
||||
public:
|
||||
// Heap oop accesses. These accessors get resolved when
|
||||
@ -164,8 +164,8 @@ public:
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
@ -187,7 +187,7 @@ public:
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
|
||||
|
||||
};
|
||||
|
||||
|
@ -145,8 +145,8 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
|
||||
oop previous = Raw::oop_atomic_xchg(new_value, addr);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
|
||||
oop previous = Raw::oop_atomic_xchg(addr, new_value);
|
||||
if (previous != NULL) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
|
||||
} else {
|
||||
@ -156,9 +156,9 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(oop new_value, T* addr) {
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(T* addr, oop new_value) {
|
||||
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
|
||||
oop result = oop_atomic_xchg_not_in_heap(new_value, addr);
|
||||
oop result = oop_atomic_xchg_not_in_heap(addr, new_value);
|
||||
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
|
||||
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
|
||||
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
|
||||
@ -169,15 +169,15 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_ato
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(new_value, addr);
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(addr, new_value);
|
||||
keep_alive_if_weak(addr, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
|
||||
inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
|
||||
oop result = oop_atomic_xchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
|
||||
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
|
||||
return result;
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ void ShenandoahControlThread::run_service() {
|
||||
bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
|
||||
|
||||
// This control loop iteration have seen this much allocations.
|
||||
size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
|
||||
size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0);
|
||||
|
||||
// Choose which GC mode to run in. The block below should select a single mode.
|
||||
GCMode mode = none;
|
||||
|
@ -191,7 +191,7 @@ size_t ShenandoahPacer::update_and_get_progress_history() {
|
||||
void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
|
||||
size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
|
||||
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
|
||||
Atomic::xchg((intptr_t)initial, &_budget);
|
||||
Atomic::xchg(&_budget, (intptr_t)initial);
|
||||
Atomic::store(&_tax_rate, tax_rate);
|
||||
Atomic::inc(&_epoch);
|
||||
}
|
||||
|
@ -74,8 +74,8 @@ public:
|
||||
static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
@ -94,7 +94,7 @@ public:
|
||||
static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
|
||||
|
||||
template <typename T>
|
||||
static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
|
||||
static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -155,20 +155,20 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxc
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
|
||||
const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value);
|
||||
return ZBarrier::load_barrier_on_oop(o);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
|
||||
const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value);
|
||||
return ZBarrier::load_barrier_on_oop(o);
|
||||
}
|
||||
|
||||
@ -231,11 +231,11 @@ inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxc
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
|
||||
inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
|
||||
verify_decorators_present<ON_STRONG_OOP_REF>();
|
||||
verify_decorators_absent<AS_NO_KEEPALIVE>();
|
||||
|
||||
return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
|
||||
return Raw::oop_atomic_xchg_not_in_heap(addr, new_value);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
|
||||
|
@ -316,7 +316,7 @@ void ZReferenceProcessor::work() {
|
||||
|
||||
// Prepend discovered references to internal pending list
|
||||
if (*list != NULL) {
|
||||
*p = Atomic::xchg(*list, _pending_list.addr());
|
||||
*p = Atomic::xchg(_pending_list.addr(), *list);
|
||||
if (*p == NULL) {
|
||||
// First to prepend to list, record tail
|
||||
_pending_list_tail = p;
|
||||
|
@ -424,9 +424,9 @@ ZStatSamplerData ZStatSampler::collect_and_reset() const {
|
||||
for (uint32_t i = 0; i < ncpus; i++) {
|
||||
ZStatSamplerData* const cpu_data = get_cpu_local<ZStatSamplerData>(i);
|
||||
if (cpu_data->_nsamples > 0) {
|
||||
const uint64_t nsamples = Atomic::xchg((uint64_t)0, &cpu_data->_nsamples);
|
||||
const uint64_t sum = Atomic::xchg((uint64_t)0, &cpu_data->_sum);
|
||||
const uint64_t max = Atomic::xchg((uint64_t)0, &cpu_data->_max);
|
||||
const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0);
|
||||
const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0);
|
||||
const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0);
|
||||
all._nsamples += nsamples;
|
||||
all._sum += sum;
|
||||
if (all._max < max) {
|
||||
@ -459,7 +459,7 @@ void ZStatCounter::sample_and_reset() const {
|
||||
const uint32_t ncpus = ZCPU::count();
|
||||
for (uint32_t i = 0; i < ncpus; i++) {
|
||||
ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
|
||||
counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
|
||||
counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0);
|
||||
}
|
||||
|
||||
ZStatSample(_sampler, counter);
|
||||
@ -481,7 +481,7 @@ ZStatCounterData ZStatUnsampledCounter::collect_and_reset() const {
|
||||
const uint32_t ncpus = ZCPU::count();
|
||||
for (uint32_t i = 0; i < ncpus; i++) {
|
||||
ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
|
||||
all._counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
|
||||
all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0);
|
||||
}
|
||||
|
||||
return all;
|
||||
|
@ -139,7 +139,7 @@ bool JfrPostBox::is_empty() const {
|
||||
|
||||
int JfrPostBox::collect() {
|
||||
// get pending and reset to 0
|
||||
const int messages = Atomic::xchg(0, &_messages);
|
||||
const int messages = Atomic::xchg(&_messages, 0);
|
||||
if (check_waiters(messages)) {
|
||||
_has_waiters = true;
|
||||
assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_read_serial is protected by JfrMsg_lock");
|
||||
|
@ -511,7 +511,7 @@ bool Universe::has_reference_pending_list() {
|
||||
|
||||
oop Universe::swap_reference_pending_list(oop list) {
|
||||
assert_pll_locked(is_locked);
|
||||
return Atomic::xchg(list, &_reference_pending_list);
|
||||
return Atomic::xchg(&_reference_pending_list, list);
|
||||
}
|
||||
|
||||
#undef assert_pll_locked
|
||||
|
@ -171,9 +171,9 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
verify_primitive_decorators<atomic_xchg_mo_decorators>();
|
||||
return AccessInternal::atomic_xchg_at<decorators>(new_value, base, offset);
|
||||
return AccessInternal::atomic_xchg_at<decorators>(base, offset, new_value);
|
||||
}
|
||||
|
||||
// Oop heap accesses
|
||||
@ -200,11 +200,11 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
static inline T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
verify_heap_oop_decorators<atomic_xchg_mo_decorators>();
|
||||
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
|
||||
OopType new_oop_value = new_value;
|
||||
return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset);
|
||||
return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, new_oop_value);
|
||||
}
|
||||
|
||||
// Clone an object from src to dst
|
||||
@ -233,9 +233,9 @@ public:
|
||||
}
|
||||
|
||||
template <typename P, typename T>
|
||||
static inline T atomic_xchg(T new_value, P* addr) {
|
||||
static inline T atomic_xchg(P* addr, T new_value) {
|
||||
verify_primitive_decorators<atomic_xchg_mo_decorators>();
|
||||
return AccessInternal::atomic_xchg<decorators>(new_value, addr);
|
||||
return AccessInternal::atomic_xchg<decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
// Oop accesses
|
||||
@ -263,11 +263,11 @@ public:
|
||||
}
|
||||
|
||||
template <typename P, typename T>
|
||||
static inline T oop_atomic_xchg(T new_value, P* addr) {
|
||||
static inline T oop_atomic_xchg(P* addr, T new_value) {
|
||||
verify_oop_decorators<atomic_xchg_mo_decorators>();
|
||||
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
|
||||
OopType new_oop_value = new_value;
|
||||
return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr);
|
||||
return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(addr, new_oop_value);
|
||||
}
|
||||
|
||||
static oop resolve(oop obj) {
|
||||
|
@ -90,16 +90,16 @@ namespace AccessInternal {
|
||||
template <class GCBarrierType, DecoratorSet decorators>
|
||||
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic {
|
||||
template <typename T>
|
||||
static T access_barrier(T new_value, void* addr) {
|
||||
return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr));
|
||||
static T access_barrier(void* addr, T new_value) {
|
||||
return GCBarrierType::atomic_xchg_in_heap(reinterpret_cast<T*>(addr), new_value);
|
||||
}
|
||||
|
||||
static oop oop_access_barrier(oop new_value, void* addr) {
|
||||
static oop oop_access_barrier(void* addr, oop new_value) {
|
||||
typedef typename HeapOopType<decorators>::type OopType;
|
||||
if (HasDecorator<decorators, IN_HEAP>::value) {
|
||||
return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr));
|
||||
return GCBarrierType::oop_atomic_xchg_in_heap(reinterpret_cast<OopType*>(addr), new_value);
|
||||
} else {
|
||||
return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr));
|
||||
return GCBarrierType::oop_atomic_xchg_not_in_heap(reinterpret_cast<OopType*>(addr), new_value);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -171,12 +171,12 @@ namespace AccessInternal {
|
||||
template <class GCBarrierType, DecoratorSet decorators>
|
||||
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic {
|
||||
template <typename T>
|
||||
static T access_barrier(T new_value, oop base, ptrdiff_t offset) {
|
||||
return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset);
|
||||
static T access_barrier(oop base, ptrdiff_t offset, T new_value) {
|
||||
return GCBarrierType::atomic_xchg_in_heap_at(base, offset, new_value);
|
||||
}
|
||||
|
||||
static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) {
|
||||
return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset);
|
||||
static oop oop_access_barrier(oop base, ptrdiff_t offset, oop new_value) {
|
||||
return GCBarrierType::oop_atomic_xchg_in_heap_at(base, offset, new_value);
|
||||
}
|
||||
};
|
||||
|
||||
@ -323,17 +323,17 @@ namespace AccessInternal {
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
|
||||
T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(void* addr, T new_value) {
|
||||
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
|
||||
_atomic_xchg_func = function;
|
||||
return function(new_value, addr);
|
||||
return function(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
|
||||
T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value) {
|
||||
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
|
||||
_atomic_xchg_at_func = function;
|
||||
return function(new_value, base, offset);
|
||||
return function(base, offset, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
|
@ -103,12 +103,12 @@ namespace AccessInternal {
|
||||
typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
|
||||
typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
|
||||
typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
|
||||
typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
|
||||
typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
|
||||
|
||||
typedef T (*load_func_t)(void* addr);
|
||||
typedef void (*store_func_t)(void* addr, T value);
|
||||
typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
|
||||
typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
|
||||
typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
|
||||
|
||||
typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
@ -303,7 +303,7 @@ protected:
|
||||
template <DecoratorSet ds, typename T>
|
||||
static typename EnableIf<
|
||||
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
|
||||
atomic_xchg_internal(T new_value, void* addr);
|
||||
atomic_xchg_internal(void* addr, T new_value);
|
||||
|
||||
// The following *_locked mechanisms serve the purpose of handling atomic operations
|
||||
// that are larger than a machine can handle, and then possibly opt for using
|
||||
@ -324,14 +324,14 @@ protected:
|
||||
template <DecoratorSet ds, typename T>
|
||||
static inline typename EnableIf<
|
||||
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_xchg_maybe_locked(T new_value, void* addr) {
|
||||
return atomic_xchg_internal<ds>(new_value, addr);
|
||||
atomic_xchg_maybe_locked(void* addr, T new_value) {
|
||||
return atomic_xchg_internal<ds>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet ds, typename T>
|
||||
static typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
atomic_xchg_maybe_locked(T new_value, void* addr);
|
||||
atomic_xchg_maybe_locked(void* addr, T new_value);
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
@ -350,8 +350,8 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline T atomic_xchg(T new_value, void* addr) {
|
||||
return atomic_xchg_maybe_locked<decorators>(new_value, addr);
|
||||
static inline T atomic_xchg(void* addr, T new_value) {
|
||||
return atomic_xchg_maybe_locked<decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -375,9 +375,9 @@ public:
|
||||
static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
|
||||
|
||||
template <typename T>
|
||||
static T oop_atomic_xchg(T new_value, void* addr);
|
||||
static T oop_atomic_xchg(void* addr, T new_value);
|
||||
template <typename T>
|
||||
static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
|
||||
static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
|
||||
|
||||
template <typename T>
|
||||
static void store_at(oop base, ptrdiff_t offset, T value) {
|
||||
@ -395,8 +395,8 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
return atomic_xchg(new_value, field_addr(base, offset));
|
||||
static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
return atomic_xchg(field_addr(base, offset), new_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -539,10 +539,10 @@ namespace AccessInternal {
|
||||
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
|
||||
static func_t _atomic_xchg_func;
|
||||
|
||||
static T atomic_xchg_init(T new_value, void* addr);
|
||||
static T atomic_xchg_init(void* addr, T new_value);
|
||||
|
||||
static inline T atomic_xchg(T new_value, void* addr) {
|
||||
return _atomic_xchg_func(new_value, addr);
|
||||
static inline T atomic_xchg(void* addr, T new_value) {
|
||||
return _atomic_xchg_func(addr, new_value);
|
||||
}
|
||||
};
|
||||
|
||||
@ -551,10 +551,10 @@ namespace AccessInternal {
|
||||
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
|
||||
static func_t _atomic_xchg_at_func;
|
||||
|
||||
static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
|
||||
static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
|
||||
|
||||
static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
return _atomic_xchg_at_func(new_value, base, offset);
|
||||
static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
return _atomic_xchg_at_func(base, offset, new_value);
|
||||
}
|
||||
};
|
||||
|
||||
@ -838,56 +838,56 @@ namespace AccessInternal {
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline static typename EnableIf<
|
||||
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
|
||||
atomic_xchg(T new_value, void* addr) {
|
||||
atomic_xchg(void* addr, T new_value) {
|
||||
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
|
||||
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
|
||||
return Raw::oop_atomic_xchg(new_value, addr);
|
||||
return Raw::oop_atomic_xchg(addr, new_value);
|
||||
} else {
|
||||
return Raw::atomic_xchg(new_value, addr);
|
||||
return Raw::atomic_xchg(addr, new_value);
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline static typename EnableIf<
|
||||
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
|
||||
atomic_xchg(T new_value, void* addr) {
|
||||
atomic_xchg(void* addr, T new_value) {
|
||||
if (UseCompressedOops) {
|
||||
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
} else {
|
||||
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline static typename EnableIf<
|
||||
!HasDecorator<decorators, AS_RAW>::value, T>::type
|
||||
atomic_xchg(T new_value, void* addr) {
|
||||
atomic_xchg(void* addr, T new_value) {
|
||||
if (is_hardwired_primitive<decorators>()) {
|
||||
const DecoratorSet expanded_decorators = decorators | AS_RAW;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
} else {
|
||||
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
|
||||
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
|
||||
}
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline static typename EnableIf<
|
||||
HasDecorator<decorators, AS_RAW>::value, T>::type
|
||||
atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
return atomic_xchg<decorators>(new_value, field_addr(base, offset));
|
||||
atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
return atomic_xchg<decorators>(field_addr(base, offset), new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline static typename EnableIf<
|
||||
!HasDecorator<decorators, AS_RAW>::value, T>::type
|
||||
atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
if (is_hardwired_primitive<decorators>()) {
|
||||
const DecoratorSet expanded_decorators = decorators | AS_RAW;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
|
||||
} else {
|
||||
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
|
||||
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1045,29 +1045,29 @@ namespace AccessInternal {
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline T atomic_xchg_reduce_types(T new_value, T* addr) {
|
||||
inline T atomic_xchg_reduce_types(T* addr, T new_value) {
|
||||
const DecoratorSet expanded_decorators = decorators;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
|
||||
inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
|
||||
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
|
||||
INTERNAL_RT_USE_COMPRESSED_OOPS;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
|
||||
inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
|
||||
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
|
||||
INTERNAL_RT_USE_COMPRESSED_OOPS;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
|
||||
inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
|
||||
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
|
||||
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
@ -1224,19 +1224,19 @@ namespace AccessInternal {
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename P, typename T>
|
||||
inline T atomic_xchg(T new_value, P* addr) {
|
||||
inline T atomic_xchg(P* addr, T new_value) {
|
||||
verify_types<decorators, T>();
|
||||
typedef typename Decay<P>::type DecayedP;
|
||||
typedef typename Decay<T>::type DecayedT;
|
||||
DecayedT new_decayed_value = new_value;
|
||||
// atomic_xchg is only available in SEQ_CST flavour.
|
||||
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
|
||||
return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
|
||||
const_cast<DecayedP*>(addr));
|
||||
return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
|
||||
new_decayed_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
verify_types<decorators, T>();
|
||||
typedef typename Decay<T>::type DecayedT;
|
||||
DecayedT new_decayed_value = new_value;
|
||||
@ -1244,7 +1244,7 @@ namespace AccessInternal {
|
||||
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
|
||||
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
|
||||
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
|
||||
return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
|
||||
return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
|
@ -103,17 +103,17 @@ inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop ba
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
template <typename T>
|
||||
inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
|
||||
inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
|
||||
typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
|
||||
Encoded encoded_new = encode(new_value);
|
||||
Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
|
||||
Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
|
||||
return decode<T>(encoded_result);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
template <typename T>
|
||||
inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
|
||||
return oop_atomic_xchg(new_value, field_addr(base, offset));
|
||||
inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
|
||||
return oop_atomic_xchg(field_addr(base, offset), new_value);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
@ -203,9 +203,9 @@ template <DecoratorSet decorators>
|
||||
template <DecoratorSet ds, typename T>
|
||||
inline typename EnableIf<
|
||||
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
|
||||
RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) {
|
||||
return Atomic::xchg(new_value,
|
||||
reinterpret_cast<volatile T*>(addr));
|
||||
RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
|
||||
return Atomic::xchg(reinterpret_cast<volatile T*>(addr),
|
||||
new_value);
|
||||
}
|
||||
|
||||
// For platforms that do not have native support for wide atomics,
|
||||
@ -216,9 +216,9 @@ template <DecoratorSet ds>
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline typename EnableIf<
|
||||
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
|
||||
RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) {
|
||||
RawAccessBarrier<ds>::atomic_xchg_maybe_locked(void* addr, T new_value) {
|
||||
if (!AccessInternal::wide_atomic_needs_locking()) {
|
||||
return atomic_xchg_internal<ds>(new_value, addr);
|
||||
return atomic_xchg_internal<ds>(addr, new_value);
|
||||
} else {
|
||||
AccessInternal::AccessLocker access_lock;
|
||||
volatile T* p = reinterpret_cast<volatile T*>(addr);
|
||||
|
@ -3811,9 +3811,9 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
|
||||
#if defined(ZERO) && defined(ASSERT)
|
||||
{
|
||||
jint a = 0xcafebabe;
|
||||
jint b = Atomic::xchg((jint) 0xdeadbeef, &a);
|
||||
jint b = Atomic::xchg(&a, (jint) 0xdeadbeef);
|
||||
void *c = &a;
|
||||
void *d = Atomic::xchg(&b, &c);
|
||||
void *d = Atomic::xchg(&c, &b);
|
||||
assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
|
||||
assert(c == &b && d == &a, "Atomic::xchg() works");
|
||||
}
|
||||
@ -3829,10 +3829,10 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
|
||||
// We use Atomic::xchg rather than Atomic::add/dec since on some platforms
|
||||
// the add/dec implementations are dependent on whether we are running
|
||||
// on a multiprocessor Atomic::xchg does not have this problem.
|
||||
if (Atomic::xchg(1, &vm_created) == 1) {
|
||||
if (Atomic::xchg(&vm_created, 1) == 1) {
|
||||
return JNI_EEXIST; // already created, or create attempt in progress
|
||||
}
|
||||
if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) {
|
||||
if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) {
|
||||
return JNI_ERR; // someone tried and failed and retry not allowed.
|
||||
}
|
||||
|
||||
|
@ -132,8 +132,8 @@ public:
|
||||
// The type T must be either a pointer type convertible to or equal
|
||||
// to D, an integral/enum type equal to D, or a type equal to D that
|
||||
// is primitive convertible using PrimitiveConversions.
|
||||
template<typename T, typename D>
|
||||
inline static D xchg(T exchange_value, volatile D* dest,
|
||||
template<typename D, typename T>
|
||||
inline static D xchg(volatile D* dest, T exchange_value,
|
||||
atomic_memory_order order = memory_order_conservative);
|
||||
|
||||
// Performs atomic compare of *dest and compare_value, and exchanges
|
||||
@ -341,7 +341,7 @@ private:
|
||||
// checking and limited conversions around calls to the
|
||||
// platform-specific implementation layer provided by
|
||||
// PlatformXchg.
|
||||
template<typename T, typename D, typename Enable = void>
|
||||
template<typename D, typename T, typename Enable = void>
|
||||
struct XchgImpl;
|
||||
|
||||
// Platform-specific implementation of xchg. Support for sizes
|
||||
@ -353,11 +353,11 @@ private:
|
||||
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
|
||||
//
|
||||
// Then
|
||||
// platform_xchg(exchange_value, dest)
|
||||
// platform_xchg(dest, exchange_value)
|
||||
// must be a valid expression, returning a result convertible to T.
|
||||
//
|
||||
// A default definition is provided, which declares a function template
|
||||
// T operator()(T, T volatile*, T, atomic_memory_order) const
|
||||
// T operator()(T volatile*, T, atomic_memory_order) const
|
||||
//
|
||||
// For each required size, a platform must either provide an
|
||||
// appropriate definition of that function, or must entirely
|
||||
@ -373,8 +373,8 @@ private:
|
||||
// helper function.
|
||||
template<typename Type, typename Fn, typename T>
|
||||
static T xchg_using_helper(Fn fn,
|
||||
T exchange_value,
|
||||
T volatile* dest);
|
||||
T volatile* dest,
|
||||
T exchange_value);
|
||||
};
|
||||
|
||||
template<typename From, typename To>
|
||||
@ -593,8 +593,8 @@ struct Atomic::CmpxchgByteUsingInt {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformXchg {
|
||||
template<typename T>
|
||||
T operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T operator()(T volatile* dest,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const;
|
||||
};
|
||||
|
||||
@ -891,9 +891,9 @@ struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
|
||||
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
|
||||
// Forward to the platform handler for the size of T.
|
||||
return PlatformXchg<sizeof(T)>()(exchange_value, dest, order);
|
||||
return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -902,15 +902,15 @@ struct Atomic::XchgImpl<
|
||||
// The exchange_value must be implicitly convertible to the
|
||||
// destination's type; it must be type-correct to store the
|
||||
// exchange_value in the destination.
|
||||
template<typename T, typename D>
|
||||
template<typename D, typename T>
|
||||
struct Atomic::XchgImpl<
|
||||
T*, D*,
|
||||
D*, T*,
|
||||
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
|
||||
{
|
||||
D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const {
|
||||
D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {
|
||||
// Allow derived to base conversion, and adding cv-qualifiers.
|
||||
D* new_value = exchange_value;
|
||||
return PlatformXchg<sizeof(D*)>()(new_value, dest, order);
|
||||
return PlatformXchg<sizeof(D*)>()(dest, new_value, order);
|
||||
}
|
||||
};
|
||||
|
||||
@ -926,30 +926,31 @@ struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
|
||||
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
|
||||
typedef PrimitiveConversions::Translate<T> Translator;
|
||||
typedef typename Translator::Decayed Decayed;
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
|
||||
return Translator::recover(
|
||||
xchg(Translator::decay(exchange_value),
|
||||
reinterpret_cast<Decayed volatile*>(dest),
|
||||
xchg(reinterpret_cast<Decayed volatile*>(dest),
|
||||
Translator::decay(exchange_value),
|
||||
order));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Type, typename Fn, typename T>
|
||||
inline T Atomic::xchg_using_helper(Fn fn,
|
||||
T exchange_value,
|
||||
T volatile* dest) {
|
||||
T volatile* dest,
|
||||
T exchange_value) {
|
||||
STATIC_ASSERT(sizeof(Type) == sizeof(T));
|
||||
// Notice the swapped order of arguments. Change when/if stubs are rewritten.
|
||||
return PrimitiveConversions::cast<T>(
|
||||
fn(PrimitiveConversions::cast<Type>(exchange_value),
|
||||
reinterpret_cast<Type volatile*>(dest)));
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) {
|
||||
return XchgImpl<T, D>()(exchange_value, dest, order);
|
||||
template<typename D, typename T>
|
||||
inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {
|
||||
return XchgImpl<D, T>()(dest, exchange_value, order);
|
||||
}
|
||||
|
||||
#endif // SHARE_RUNTIME_ATOMIC_HPP
|
||||
|
@ -998,7 +998,7 @@ static void InduceScavenge(Thread* self, const char * Whence) {
|
||||
// of active monitors passes the specified threshold.
|
||||
// TODO: assert thread state is reasonable
|
||||
|
||||
if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
|
||||
if (ForceMonitorScavenge == 0 && Atomic::xchg(&ForceMonitorScavenge, 1) == 0) {
|
||||
// Induce a 'null' safepoint to scavenge monitors
|
||||
// Must VM_Operation instance be heap allocated as the op will be enqueue and posted
|
||||
// to the VMthread and have a lifespan longer than that of this activation record.
|
||||
|
@ -170,7 +170,7 @@ inline void ThreadsSMRSupport::update_java_thread_list_max(uint new_value) {
|
||||
}
|
||||
|
||||
inline ThreadsList* ThreadsSMRSupport::xchg_java_thread_list(ThreadsList* new_list) {
|
||||
return (ThreadsList*)Atomic::xchg(new_list, &_java_thread_list);
|
||||
return (ThreadsList*)Atomic::xchg(&_java_thread_list, new_list);
|
||||
}
|
||||
|
||||
// Hash table of pointers found by a scan. Used for collecting hazard
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
// list of elements. Acts as a full memory barrier.
|
||||
// postcondition: empty()
|
||||
T* pop_all() {
|
||||
return Atomic::xchg((T*)NULL, &_top);
|
||||
return Atomic::xchg(&_top, (T*)NULL);
|
||||
}
|
||||
|
||||
// Atomically adds value to the top of this stack. Acts as a full
|
||||
|
Loading…
x
Reference in New Issue
Block a user