OLD | NEW |
---|---|
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, include base/atomicops.h |
6 // | 6 // instead. This file is for platforms that use GCC intrinsics rather than |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // platform-specific assembly code for atomic operations. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_GCC_H_ |
11 | 11 |
12 namespace base { | 12 namespace base { |
13 namespace subtle { | 13 namespace subtle { |
14 | 14 |
15 // 0xffff0fc0 is the hard coded address of a function provided by | |
16 // the kernel which implements an atomic compare-exchange. On older | |
17 // ARM architecture revisions (pre-v6) this may be implemented using | |
18 // a syscall. This address is stable, and in active use (hard coded) | |
19 // by at least glibc-2.7 and the Android C library. | |
20 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | |
21 Atomic32 new_value, | |
22 volatile Atomic32* ptr); | |
23 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | |
24 (LinuxKernelCmpxchgFunc) 0xffff0fc0; | |
25 | |
26 typedef void (*LinuxKernelMemoryBarrierFunc)(void); | |
27 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | |
28 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | |
29 | |
30 | |
31 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
32 Atomic32 old_value, | 16 Atomic32 old_value, |
33 Atomic32 new_value) { | 17 Atomic32 new_value) { |
34 Atomic32 prev_value = *ptr; | 18 Atomic32 prev_value = *ptr; |
35 do { | 19 do { |
36 if (!pLinuxKernelCmpxchg(old_value, new_value, | 20 if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { |
Roland McGrath
2012/08/21 22:14:15
Why not just use __sync_val_compare_and_swap here?
bbudge
2012/08/21 23:23:40
Done.
| |
37 const_cast<Atomic32*>(ptr))) { | |
38 return old_value; | 21 return old_value; |
39 } | 22 } |
40 prev_value = *ptr; | 23 prev_value = *ptr; |
41 } while (prev_value == old_value); | 24 } while (prev_value == old_value); |
42 return prev_value; | 25 return prev_value; |
43 } | 26 } |
44 | 27 |
45 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 28 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
46 Atomic32 new_value) { | 29 Atomic32 new_value) { |
47 Atomic32 old_value; | 30 Atomic32 old_value; |
48 do { | 31 do { |
49 old_value = *ptr; | 32 old_value = *ptr; |
50 } while (pLinuxKernelCmpxchg(old_value, new_value, | 33 } while (!__sync_bool_compare_and_swap(ptr, old_value, new_value)); |
51 const_cast<Atomic32*>(ptr))); | |
52 return old_value; | 34 return old_value; |
53 } | 35 } |
54 | 36 |
55 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 37 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
56 Atomic32 increment) { | 38 Atomic32 increment) { |
57 return Barrier_AtomicIncrement(ptr, increment); | 39 return Barrier_AtomicIncrement(ptr, increment); |
58 } | 40 } |
59 | 41 |
60 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 42 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
61 Atomic32 increment) { | 43 Atomic32 increment) { |
62 for (;;) { | 44 for (;;) { |
63 // Atomic exchange the old value with an incremented one. | 45 // Atomic exchange the old value with an incremented one. |
64 Atomic32 old_value = *ptr; | 46 Atomic32 old_value = *ptr; |
65 Atomic32 new_value = old_value + increment; | 47 Atomic32 new_value = old_value + increment; |
66 if (pLinuxKernelCmpxchg(old_value, new_value, | 48 if (!__sync_bool_compare_and_swap(ptr, old_value, new_value) == 0) { |
Roland McGrath
2012/08/21 22:14:15
(!... == 0) should be just (...).
bbudge
2012/08/21 23:23:40
Done.
| |
67 const_cast<Atomic32*>(ptr)) == 0) { | |
68 // The exchange took place as expected. | 49 // The exchange took place as expected. |
69 return new_value; | 50 return new_value; |
70 } | 51 } |
71 // Otherwise, *ptr changed mid-loop and we need to retry. | 52 // Otherwise, *ptr changed mid-loop and we need to retry. |
72 } | 53 } |
73 | |
74 } | 54 } |
75 | 55 |
76 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 56 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
77 Atomic32 old_value, | 57 Atomic32 old_value, |
78 Atomic32 new_value) { | 58 Atomic32 new_value) { |
59 // Since NoBarrier_CompareAndSwap uses __sync_bool_compare_and_swap, which | |
60 // is a full memory barrier, none is needed here or below in Release. | |
79 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 61 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
80 } | 62 } |
81 | 63 |
82 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 64 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
83 Atomic32 old_value, | 65 Atomic32 old_value, |
84 Atomic32 new_value) { | 66 Atomic32 new_value) { |
85 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 67 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
86 } | 68 } |
87 | 69 |
88 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 70 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
89 *ptr = value; | 71 *ptr = value; |
90 } | 72 } |
91 | 73 |
92 inline void MemoryBarrier() { | 74 inline void MemoryBarrier() { |
93 pLinuxKernelMemoryBarrier(); | 75 __sync_synchronize(); |
94 } | 76 } |
95 | 77 |
96 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 78 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
97 *ptr = value; | 79 *ptr = value; |
98 MemoryBarrier(); | 80 MemoryBarrier(); |
99 } | 81 } |
100 | 82 |
101 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 83 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
102 MemoryBarrier(); | 84 MemoryBarrier(); |
103 *ptr = value; | 85 *ptr = value; |
104 } | 86 } |
105 | 87 |
106 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 88 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
107 return *ptr; | 89 return *ptr; |
108 } | 90 } |
109 | 91 |
110 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 92 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
111 Atomic32 value = *ptr; | 93 Atomic32 value = *ptr; |
112 MemoryBarrier(); | 94 MemoryBarrier(); |
113 return value; | 95 return value; |
114 } | 96 } |
115 | 97 |
116 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 98 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
117 MemoryBarrier(); | 99 MemoryBarrier(); |
118 return *ptr; | 100 return *ptr; |
119 } | 101 } |
120 | 102 |
121 } // namespace base::subtle | 103 } // namespace base::subtle |
122 } // namespace base | 104 } // namespace base |
123 | 105 |
124 #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 106 #endif // BASE_ATOMICOPS_INTERNALS_GCC_H_ |
107 | |
OLD | NEW |