| Index: src/atomicops_internals_sh4_gcc.h
|
| diff --git a/src/atomicops_internals_arm_gcc.h b/src/atomicops_internals_sh4_gcc.h
|
| similarity index 63%
|
| copy from src/atomicops_internals_arm_gcc.h
|
| copy to src/atomicops_internals_sh4_gcc.h
|
| index 6c30256d93dea601b5d3592250c303dbae0dadeb..60b68b4080fe31110d3b62c3f0c1312cd1b618a1 100644
|
| --- a/src/atomicops_internals_arm_gcc.h
|
| +++ b/src/atomicops_internals_sh4_gcc.h
|
| @@ -27,52 +27,22 @@
|
|
|
| // This file is an internal atomic implementation, use atomicops.h instead.
|
| //
|
| -// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
|
|
|
| -#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
|
| -#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
|
| +#ifndef V8_ATOMICOPS_INTERNALS_SH4_GCC_H_
|
| +#define V8_ATOMICOPS_INTERNALS_SH4_GCC_H_
|
|
|
| namespace v8 {
|
| namespace internal {
|
|
|
| -// 0xffff0fc0 is the hard coded address of a function provided by
|
| -// the kernel which implements an atomic compare-exchange. On older
|
| -// ARM architecture revisions (pre-v6) this may be implemented using
|
| -// a syscall. This address is stable, and in active use (hard coded)
|
| -// by at least glibc-2.7 and the Android C library.
|
| -typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
|
| - Atomic32 new_value,
|
| - volatile Atomic32* ptr);
|
| -LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
|
| - (LinuxKernelCmpxchgFunc) 0xffff0fc0;
|
| -
|
| -typedef void (*LinuxKernelMemoryBarrierFunc)(void);
|
| -LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
|
| - (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
|
| -
|
| -
|
| inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| - Atomic32 prev_value = *ptr;
|
| - do {
|
| - if (!pLinuxKernelCmpxchg(old_value, new_value,
|
| - const_cast<Atomic32*>(ptr))) {
|
| - return old_value;
|
| - }
|
| - prev_value = *ptr;
|
| - } while (prev_value == old_value);
|
| - return prev_value;
|
| + return __sync_val_compare_and_swap(ptr, old_value, new_value);
|
| }
|
|
|
| inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
|
| Atomic32 new_value) {
|
| - Atomic32 old_value;
|
| - do {
|
| - old_value = *ptr;
|
| - } while (pLinuxKernelCmpxchg(old_value, new_value,
|
| - const_cast<Atomic32*>(ptr)));
|
| - return old_value;
|
| + return __sync_lock_test_and_set(ptr, new_value);
|
| }
|
|
|
| inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
| @@ -82,64 +52,65 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
|
|
| inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
| Atomic32 increment) {
|
| - for (;;) {
|
| - // Atomic exchange the old value with an incremented one.
|
| - Atomic32 old_value = *ptr;
|
| - Atomic32 new_value = old_value + increment;
|
| - if (pLinuxKernelCmpxchg(old_value, new_value,
|
| - const_cast<Atomic32*>(ptr)) == 0) {
|
| - // The exchange took place as expected.
|
| - return new_value;
|
| - }
|
| - // Otherwise, *ptr changed mid-loop and we need to retry.
|
| - }
|
| + return __sync_add_and_fetch(const_cast<Atomic32*>(ptr), increment);
|
| }
|
|
|
| inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| - return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| + Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| + MemoryBarrier();
|
| + return value;
|
| }
|
|
|
| inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| + MemoryBarrier();
|
| return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| }
|
|
|
| +
|
| inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| *ptr = value;
|
| }
|
|
|
| +
|
| inline void MemoryBarrier() {
|
| - pLinuxKernelMemoryBarrier();
|
| + __sync_synchronize();
|
| }
|
|
|
| +
|
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| *ptr = value;
|
| MemoryBarrier();
|
| }
|
|
|
| +
|
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| MemoryBarrier();
|
| *ptr = value;
|
| }
|
|
|
| +
|
| inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
| return *ptr;
|
| }
|
|
|
| +
|
| inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
| Atomic32 value = *ptr;
|
| MemoryBarrier();
|
| return value;
|
| }
|
|
|
| +
|
| inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
| MemoryBarrier();
|
| return *ptr;
|
| }
|
|
|
| +
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
|
| +#endif // V8_ATOMICOPS_INTERNALS_SH4_GCC_H_
|
|
|