| Index: src/base/atomicops_internals_ppc_gcc.h
|
| diff --git a/src/base/atomicops_internals_portable.h b/src/base/atomicops_internals_ppc_gcc.h
|
| similarity index 55%
|
| copy from src/base/atomicops_internals_portable.h
|
| copy to src/base/atomicops_internals_ppc_gcc.h
|
| index a3a6e74c72e53bdb6c126be44b5e7622837e25a1..daa27b4693d665d01c7e47583c5a86d91487f989 100644
|
| --- a/src/base/atomicops_internals_portable.h
|
| +++ b/src/base/atomicops_internals_ppc_gcc.h
|
| @@ -3,136 +3,166 @@
|
| // found in the LICENSE file.
|
|
|
| // This file is an internal atomic implementation, use atomicops.h instead.
|
| +//
|
|
|
| -#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
|
| -#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
|
| +#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
|
| +#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
|
|
|
| namespace v8 {
|
| namespace base {
|
|
|
| -inline void MemoryBarrier() { __sync_synchronize(); }
|
| -
|
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return (__sync_val_compare_and_swap(ptr, old_value, new_value));
|
| }
|
|
|
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
|
| Atomic32 new_value) {
|
| - return __sync_lock_test_and_set(ptr, new_value);
|
| + Atomic32 old_value;
|
| + do {
|
| + old_value = *ptr;
|
| + } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
|
| + return old_value;
|
| }
|
|
|
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
| Atomic32 increment) {
|
| - return __sync_add_and_fetch(ptr, increment);
|
| + return Barrier_AtomicIncrement(ptr, increment);
|
| }
|
|
|
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
| Atomic32 increment) {
|
| - return __sync_add_and_fetch(ptr, increment);
|
| + for (;;) {
|
| + Atomic32 old_value = *ptr;
|
| + Atomic32 new_value = old_value + increment;
|
| + if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
|
| + return new_value;
|
| + // The exchange took place as expected.
|
| + }
|
| + // Otherwise, *ptr changed mid-loop and we need to retry.
|
| + }
|
| }
|
|
|
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value, Atomic32 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| }
|
|
|
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value, Atomic32 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| }
|
|
|
| inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + *ptr = value;
|
| }
|
|
|
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + *ptr = value;
|
| }
|
|
|
| +inline void MemoryBarrier() {
|
| + __asm__ __volatile__("sync" : : : "memory"); }
|
| +
|
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + *ptr = value;
|
| + MemoryBarrier();
|
| }
|
|
|
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + MemoryBarrier();
|
| + *ptr = value;
|
| }
|
|
|
| -inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| -}
|
| +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
|
|
|
| -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| -}
|
| +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
|
|
|
| inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| + Atomic32 value = *ptr;
|
| + MemoryBarrier();
|
| + return value;
|
| }
|
|
|
| inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| + MemoryBarrier();
|
| + return *ptr;
|
| }
|
|
|
| -// 64-bit versions of the operations.
|
| -// See the 32-bit versions for comments.
|
| -
|
| +#ifdef V8_TARGET_ARCH_PPC64
|
| inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return (__sync_val_compare_and_swap(ptr, old_value, new_value));
|
| }
|
|
|
| inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
|
| Atomic64 new_value) {
|
| - return __sync_lock_test_and_set(ptr, new_value);
|
| + Atomic64 old_value;
|
| + do {
|
| + old_value = *ptr;
|
| + } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
|
| + return old_value;
|
| }
|
|
|
| inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
| Atomic64 increment) {
|
| - return __sync_add_and_fetch(ptr, increment);
|
| + return Barrier_AtomicIncrement(ptr, increment);
|
| }
|
|
|
| inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
| Atomic64 increment) {
|
| - return __sync_add_and_fetch(ptr, increment);
|
| + for (;;) {
|
| + Atomic64 old_value = *ptr;
|
| + Atomic64 new_value = old_value + increment;
|
| + if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
|
| + return new_value;
|
| + // The exchange took place as expected.
|
| + }
|
| + // Otherwise, *ptr changed mid-loop and we need to retry.
|
| + }
|
| }
|
|
|
| inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value, Atomic64 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| }
|
|
|
| inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value, Atomic64 new_value) {
|
| - return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| }
|
|
|
| inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + *ptr = value;
|
| }
|
|
|
| inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + *ptr = value;
|
| + MemoryBarrier();
|
| }
|
|
|
| inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
| - __sync_lock_test_and_set(ptr, value);
|
| + MemoryBarrier();
|
| + *ptr = value;
|
| }
|
|
|
| -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| -}
|
| +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
|
|
|
| inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| + Atomic64 value = *ptr;
|
| + MemoryBarrier();
|
| + return value;
|
| }
|
|
|
| inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
| - return __sync_add_and_fetch(ptr, 0);
|
| + MemoryBarrier();
|
| + return *ptr;
|
| }
|
| +
|
| +#endif
|
| }
|
| } // namespace v8::base
|
|
|
| -#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
|
| +#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
|
|
|