Index: base/atomicops_internals_arm_gcc.h |
=================================================================== |
--- base/atomicops_internals_arm_gcc.h (revision 0) |
+++ base/atomicops_internals_arm_gcc.h (revision 0) |
@@ -0,0 +1,124 @@ |
+// Copyright (c) 2009 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+// This file is an internal atomic implementation, use base/atomicops.h instead. |
+// |
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
+ |
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
+#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
+ |
+namespace base { |
+namespace subtle { |
+ |
+// 0xffff0fc0 is the hard coded address of a function provided by |
+// the kernel which implements an atomic compare-exchange. On older |
+// ARM architecture revisions (pre-v6) this may be implemented using |
+// a syscall. This address is stable, and in active use (hard coded) |
+// by at least glibc-2.7 and the Android C library. |
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, |
+ Atomic32 new_value, |
+ volatile Atomic32* ptr); |
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = |
+ (LinuxKernelCmpxchgFunc) 0xffff0fc0; |
+ |
+typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
+ (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
+ |
+ |
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, |
+ Atomic32 new_value) { |
+ Atomic32 prev_value = *ptr; |
+ do { |
+ if (!pLinuxKernelCmpxchg(old_value, new_value, |
+ const_cast<Atomic32*>(ptr))) { |
+ return old_value; |
+ } |
+ prev_value = *ptr; |
+ } while (prev_value == old_value); |
+ return prev_value; |
+} |
+ |
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
+ Atomic32 new_value) { |
+ Atomic32 old_value; |
+ do { |
+ old_value = *ptr; |
+ } while (pLinuxKernelCmpxchg(old_value, new_value, |
+ const_cast<Atomic32*>(ptr))); |
+ return old_value; |
+} |
+ |
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
+ return Barrier_AtomicIncrement(ptr, increment); |
+} |
+ |
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
+ for (;;) { |
+ // Atomic exchange the old value with an incremented one. |
+ Atomic32 old_value = *ptr; |
+ Atomic32 new_value = old_value + increment; |
+ if (pLinuxKernelCmpxchg(old_value, new_value, |
+ const_cast<Atomic32*>(ptr)) == 0) { |
+ // The exchange took place as expected. |
+ return new_value; |
+ } |
+ // Otherwise, *ptr changed mid-loop and we need to retry. |
+ } |
+ |
+} |
+ |
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, |
+ Atomic32 new_value) { |
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+} |
+ |
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, |
+ Atomic32 new_value) { |
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+} |
+ |
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
+ *ptr = value; |
+} |
+ |
+inline void MemoryBarrier() { |
+ pLinuxKernelMemoryBarrier(); |
+} |
+ |
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
+ *ptr = value; |
+ MemoryBarrier(); |
+} |
+ |
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
+ MemoryBarrier(); |
+ *ptr = value; |
+} |
+ |
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
+ return *ptr; |
+} |
+ |
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
+ Atomic32 value = *ptr; |
+ MemoryBarrier(); |
+ return value; |
+} |
+ |
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
+ MemoryBarrier(); |
+ return *ptr; |
+} |
+ |
+} // namespace base::subtle |
+} // namespace base |
+ |
+#endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |