Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
|
Lei Zhang
2012/08/21 18:44:47
Apparently we don't do this anymore. See relevant
bbudge
2012/08/21 21:49:02
Done.
| |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
|
Lei Zhang
2012/08/21 18:44:47
Can you add a comment here to say what platforms t
bbudge
2012/08/21 21:49:02
Done.
| |
| 6 // | 6 // |
| 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
| 8 | 8 |
| 9 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_GCC_H_ |
| 10 #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_GCC_H_ |
| 11 | 11 |
| 12 namespace base { | 12 namespace base { |
| 13 namespace subtle { | 13 namespace subtle { |
| 14 | 14 |
| 15 #if defined(OS_NACL) | |
| 16 // For Native Client, define atomic ops in terms of GCC intrinsics. | |
| 17 #define compare_and_swap(ptr, old_value, new_value) \ | |
| 18 __sync_val_compare_and_swap(const_cast<Atomic32*>(ptr), \ | |
|
nfullagar
2012/08/21 18:23:20
The “val” version of __sync_val_compare_and_swap r
bbudge
2012/08/21 19:33:57
It does appear that way, but the old function sign
| |
| 19 old_value, new_value) | |
| 20 #define memory_barrier __sync_synchronize | |
| 21 #else // !defined(OS_NACL) | |
|
DaleCurtis
2012/08/21 17:52:01
Per http://gcc.gnu.org/git/?p=gcc.git;a=blob;f=lib
bbudge
2012/08/21 19:33:57
I'm going to run a trial on the normal Chrome ARM
| |
| 15 // 0xffff0fc0 is the hard coded address of a function provided by | 22 // 0xffff0fc0 is the hard coded address of a function provided by |
| 16 // the kernel which implements an atomic compare-exchange. On older | 23 // the kernel which implements an atomic compare-exchange. On older |
| 17 // ARM architecture revisions (pre-v6) this may be implemented using | 24 // ARM architecture revisions (pre-v6) this may be implemented using |
| 18 // a syscall. This address is stable, and in active use (hard coded) | 25 // a syscall. This address is stable, and in active use (hard coded) |
| 19 // by at least glibc-2.7 and the Android C library. | 26 // by at least glibc-2.7 and the Android C library. |
| 20 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | 27 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, |
| 21 Atomic32 new_value, | 28 Atomic32 new_value, |
| 22 volatile Atomic32* ptr); | 29 volatile Atomic32* ptr); |
| 23 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | 30 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = |
| 24 (LinuxKernelCmpxchgFunc) 0xffff0fc0; | 31 (LinuxKernelCmpxchgFunc) 0xffff0fc0; |
| 25 | 32 |
| 26 typedef void (*LinuxKernelMemoryBarrierFunc)(void); | 33 typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
| 27 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | 34 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
| 28 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | 35 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
| 29 | 36 |
| 37 #define compare_and_swap(ptr, old_value, new_value) \ | |
| 38 pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr)) | |
| 39 #define memory_barrier pLinuxKernelMemoryBarrier | |
| 40 #endif // !defined(OS_NACL) | |
| 30 | 41 |
| 31 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 42 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 32 Atomic32 old_value, | 43 Atomic32 old_value, |
| 33 Atomic32 new_value) { | 44 Atomic32 new_value) { |
| 34 Atomic32 prev_value = *ptr; | 45 Atomic32 prev_value = *ptr; |
| 35 do { | 46 do { |
| 36 if (!pLinuxKernelCmpxchg(old_value, new_value, | 47 if (!compare_and_swap(ptr, old_value, new_value)) { |
| 37 const_cast<Atomic32*>(ptr))) { | |
| 38 return old_value; | 48 return old_value; |
| 39 } | 49 } |
| 40 prev_value = *ptr; | 50 prev_value = *ptr; |
| 41 } while (prev_value == old_value); | 51 } while (prev_value == old_value); |
| 42 return prev_value; | 52 return prev_value; |
| 43 } | 53 } |
| 44 | 54 |
| 45 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 55 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 46 Atomic32 new_value) { | 56 Atomic32 new_value) { |
| 47 Atomic32 old_value; | 57 Atomic32 old_value; |
| 48 do { | 58 do { |
| 49 old_value = *ptr; | 59 old_value = *ptr; |
| 50 } while (pLinuxKernelCmpxchg(old_value, new_value, | 60 } while (compare_and_swap(ptr, old_value, new_value)); |
| 51 const_cast<Atomic32*>(ptr))); | |
| 52 return old_value; | 61 return old_value; |
| 53 } | 62 } |
| 54 | 63 |
| 55 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 64 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 56 Atomic32 increment) { | 65 Atomic32 increment) { |
| 57 return Barrier_AtomicIncrement(ptr, increment); | 66 return Barrier_AtomicIncrement(ptr, increment); |
| 58 } | 67 } |
| 59 | 68 |
| 60 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 69 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 61 Atomic32 increment) { | 70 Atomic32 increment) { |
| 62 for (;;) { | 71 for (;;) { |
| 63 // Atomic exchange the old value with an incremented one. | 72 // Atomic exchange the old value with an incremented one. |
| 64 Atomic32 old_value = *ptr; | 73 Atomic32 old_value = *ptr; |
| 65 Atomic32 new_value = old_value + increment; | 74 Atomic32 new_value = old_value + increment; |
| 66 if (pLinuxKernelCmpxchg(old_value, new_value, | 75 if (compare_and_swap(ptr, old_value, new_value) == 0) { |
| 67 const_cast<Atomic32*>(ptr)) == 0) { | |
| 68 // The exchange took place as expected. | 76 // The exchange took place as expected. |
| 69 return new_value; | 77 return new_value; |
| 70 } | 78 } |
| 71 // Otherwise, *ptr changed mid-loop and we need to retry. | 79 // Otherwise, *ptr changed mid-loop and we need to retry. |
| 72 } | 80 } |
| 73 | |
|
Lei Zhang
2012/08/21 18:44:47
nit: extra new line?
bbudge
2012/08/21 21:49:02
Done.
| |
| 74 } | 81 } |
| 75 | 82 |
| 76 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 83 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 77 Atomic32 old_value, | 84 Atomic32 old_value, |
| 78 Atomic32 new_value) { | 85 Atomic32 new_value) { |
| 79 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 86 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 80 } | 87 } |
| 81 | 88 |
| 82 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 89 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 83 Atomic32 old_value, | 90 Atomic32 old_value, |
| 84 Atomic32 new_value) { | 91 Atomic32 new_value) { |
| 85 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 92 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 86 } | 93 } |
| 87 | 94 |
| 88 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 95 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 89 *ptr = value; | 96 *ptr = value; |
| 90 } | 97 } |
| 91 | 98 |
| 92 inline void MemoryBarrier() { | 99 inline void MemoryBarrier() { |
| 93 pLinuxKernelMemoryBarrier(); | 100 memory_barrier(); |
| 94 } | 101 } |
| 95 | 102 |
| 96 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 97 *ptr = value; | 104 *ptr = value; |
| 98 MemoryBarrier(); | 105 MemoryBarrier(); |
| 99 } | 106 } |
| 100 | 107 |
| 101 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 108 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 102 MemoryBarrier(); | 109 MemoryBarrier(); |
| 103 *ptr = value; | 110 *ptr = value; |
| 104 } | 111 } |
| 105 | 112 |
| 106 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 113 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 107 return *ptr; | 114 return *ptr; |
| 108 } | 115 } |
| 109 | 116 |
| 110 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 117 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 111 Atomic32 value = *ptr; | 118 Atomic32 value = *ptr; |
| 112 MemoryBarrier(); | 119 MemoryBarrier(); |
| 113 return value; | 120 return value; |
| 114 } | 121 } |
| 115 | 122 |
| 116 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 123 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 117 MemoryBarrier(); | 124 MemoryBarrier(); |
| 118 return *ptr; | 125 return *ptr; |
| 119 } | 126 } |
| 120 | 127 |
| 121 } // namespace base::subtle | 128 } // namespace base::subtle |
| 122 } // namespace base | 129 } // namespace base |
| 123 | 130 |
| 124 #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 131 #endif // BASE_ATOMICOPS_INTERNALS_GCC_H_ |
| 132 | |
| OLD | NEW |