| Index: src/atomicops_internals_x86_macosx.h
|
| ===================================================================
|
| --- src/atomicops_internals_x86_macosx.h (revision 10404)
|
| +++ src/atomicops_internals_x86_macosx.h (working copy)
|
| @@ -35,7 +35,7 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
|
| +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| Atomic32 prev_value;
|
| @@ -49,7 +49,7 @@
|
| return prev_value;
|
| }
|
|
|
| -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
|
| +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
|
| Atomic32 new_value) {
|
| Atomic32 old_value;
|
| do {
|
| @@ -59,12 +59,12 @@
|
| return old_value;
|
| }
|
|
|
| -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
|
| +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
| Atomic32 increment) {
|
| return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
|
| }
|
|
|
| -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
|
| +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
| Atomic32 increment) {
|
| return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
|
| }
|
| @@ -73,7 +73,7 @@
|
| OSMemoryBarrier();
|
| }
|
|
|
| -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
|
| +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| Atomic32 prev_value;
|
| @@ -87,7 +87,7 @@
|
| return prev_value;
|
| }
|
|
|
| -inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
|
| +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| return Acquire_CompareAndSwap(ptr, old_value, new_value);
|
| @@ -97,12 +97,12 @@
|
| *ptr = value;
|
| }
|
|
|
| -inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
| +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| *ptr = value;
|
| MemoryBarrier();
|
| }
|
|
|
| -inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
| +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| MemoryBarrier();
|
| *ptr = value;
|
| }
|
| @@ -111,13 +111,13 @@
|
| return *ptr;
|
| }
|
|
|
| -inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
|
| +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
| Atomic32 value = *ptr;
|
| MemoryBarrier();
|
| return value;
|
| }
|
|
|
| -inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
|
| +inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
| MemoryBarrier();
|
| return *ptr;
|
| }
|
| @@ -126,7 +126,7 @@
|
|
|
| // 64-bit implementation on 64-bit platform
|
|
|
| -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
|
| +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| Atomic64 prev_value;
|
| @@ -140,7 +140,7 @@
|
| return prev_value;
|
| }
|
|
|
| -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
|
| +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
|
| Atomic64 new_value) {
|
| Atomic64 old_value;
|
| do {
|
| @@ -150,17 +150,17 @@
|
| return old_value;
|
| }
|
|
|
| -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
|
| +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
| Atomic64 increment) {
|
| return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
|
| }
|
|
|
| -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
|
| +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
| Atomic64 increment) {
|
| return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
|
| }
|
|
|
| -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
|
| +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| Atomic64 prev_value;
|
| @@ -174,7 +174,7 @@
|
| return prev_value;
|
| }
|
|
|
| -inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
|
| +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| // The lib kern interface does not distinguish between
|
| @@ -186,12 +186,12 @@
|
| *ptr = value;
|
| }
|
|
|
| -inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
|
| +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
| *ptr = value;
|
| MemoryBarrier();
|
| }
|
|
|
| -inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
|
| +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
| MemoryBarrier();
|
| *ptr = value;
|
| }
|
| @@ -200,13 +200,13 @@
|
| return *ptr;
|
| }
|
|
|
| -inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
|
| +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
| Atomic64 value = *ptr;
|
| MemoryBarrier();
|
| return value;
|
| }
|
|
|
| -inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
|
| +inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
| MemoryBarrier();
|
| return *ptr;
|
| }
|
| @@ -264,7 +264,7 @@
|
| old_value, new_value);
|
| }
|
|
|
| -inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
|
| +inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
|
| NoBarrier_Store(
|
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
|
| }
|
| @@ -279,7 +279,7 @@
|
| reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
|
| }
|
|
|
| -inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
|
| +inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
|
| return NoBarrier_Load(
|
| reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
|
| }
|
|
|