OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 14 matching lines...) Expand all Loading... | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 // This file is an internal atomic implementation, use atomicops.h instead. | 28 // This file is an internal atomic implementation, use atomicops.h instead. |
29 // | 29 // |
30 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 30 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
31 | 31 |
32 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 32 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
33 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 33 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
34 | 34 |
35 #if V8_OS_QNX | |
Benedikt Meurer
2013/11/15 11:49:59
The atomicops_*.h files were copied from Chromium'
c.truta
2013/11/18 13:36:32
Yes. We can keep this change for now in our privat
Benedikt Meurer
2013/11/19 07:01:27
That's great! I think we'll just sync all atomicop
| |
36 #include <arm/cpuinline.h> | |
37 #include <arm/smpxchg.h> | |
38 #endif | |
39 | |
35 namespace v8 { | 40 namespace v8 { |
36 namespace internal { | 41 namespace internal { |
37 | 42 |
43 #if V8_OS_QNX | |
44 inline void MemoryBarrier() { | |
45 __cpu_membarrier(); | |
46 } | |
47 | |
48 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
49 Atomic32 old_value, | |
50 Atomic32 new_value) { | |
51 return _smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), | |
52 old_value, new_value); | |
53 } | |
54 | |
55 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
56 Atomic32 new_value) { | |
57 return _smp_xchg(reinterpret_cast<volatile unsigned*>(ptr), new_value); | |
58 } | |
59 | |
60 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
61 Atomic32 increment) { | |
62 return Barrier_AtomicIncrement(ptr, increment); | |
63 } | |
64 | |
65 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
66 Atomic32 increment) { | |
67 for (;;) { | |
68 // Atomic exchange the old value with an incremented one. | |
69 Atomic32 old_value = *ptr; | |
70 Atomic32 new_value = old_value + increment; | |
71 if (_smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), | |
72 old_value, new_value) == | |
73 static_cast<unsigned>(old_value)) { | |
74 // The exchange took place as expected. | |
75 return new_value; | |
76 } | |
77 // Otherwise, *ptr changed mid-loop and we need to retry. | |
78 } | |
79 } | |
80 #else | |
38 // 0xffff0fc0 is the hard coded address of a function provided by | 81 // 0xffff0fc0 is the hard coded address of a function provided by |
39 // the kernel which implements an atomic compare-exchange. On older | 82 // the kernel which implements an atomic compare-exchange. On older |
40 // ARM architecture revisions (pre-v6) this may be implemented using | 83 // ARM architecture revisions (pre-v6) this may be implemented using |
41 // a syscall. This address is stable, and in active use (hard coded) | 84 // a syscall. This address is stable, and in active use (hard coded) |
42 // by at least glibc-2.7 and the Android C library. | 85 // by at least glibc-2.7 and the Android C library. |
43 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | 86 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, |
44 Atomic32 new_value, | 87 Atomic32 new_value, |
45 volatile Atomic32* ptr); | 88 volatile Atomic32* ptr); |
46 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | 89 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = |
47 (LinuxKernelCmpxchgFunc) 0xffff0fc0; | 90 (LinuxKernelCmpxchgFunc) 0xffff0fc0; |
48 | 91 |
49 typedef void (*LinuxKernelMemoryBarrierFunc)(void); | 92 typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
50 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | 93 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
51 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | 94 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
52 | 95 |
96 inline void MemoryBarrier() { | |
97 pLinuxKernelMemoryBarrier(); | |
98 } | |
53 | 99 |
54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 100 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
55 Atomic32 old_value, | 101 Atomic32 old_value, |
56 Atomic32 new_value) { | 102 Atomic32 new_value) { |
57 Atomic32 prev_value = *ptr; | 103 Atomic32 prev_value = *ptr; |
58 do { | 104 do { |
59 if (!pLinuxKernelCmpxchg(old_value, new_value, | 105 if (!pLinuxKernelCmpxchg(old_value, new_value, |
60 const_cast<Atomic32*>(ptr))) { | 106 const_cast<Atomic32*>(ptr))) { |
61 return old_value; | 107 return old_value; |
62 } | 108 } |
(...skipping 24 matching lines...) Expand all Loading... | |
87 Atomic32 old_value = *ptr; | 133 Atomic32 old_value = *ptr; |
88 Atomic32 new_value = old_value + increment; | 134 Atomic32 new_value = old_value + increment; |
89 if (pLinuxKernelCmpxchg(old_value, new_value, | 135 if (pLinuxKernelCmpxchg(old_value, new_value, |
90 const_cast<Atomic32*>(ptr)) == 0) { | 136 const_cast<Atomic32*>(ptr)) == 0) { |
91 // The exchange took place as expected. | 137 // The exchange took place as expected. |
92 return new_value; | 138 return new_value; |
93 } | 139 } |
94 // Otherwise, *ptr changed mid-loop and we need to retry. | 140 // Otherwise, *ptr changed mid-loop and we need to retry. |
95 } | 141 } |
96 } | 142 } |
143 #endif // V8_OS_QNX | |
97 | 144 |
98 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 145 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
99 Atomic32 old_value, | 146 Atomic32 old_value, |
100 Atomic32 new_value) { | 147 Atomic32 new_value) { |
101 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 148 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
102 } | 149 } |
103 | 150 |
104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 151 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
105 Atomic32 old_value, | 152 Atomic32 old_value, |
106 Atomic32 new_value) { | 153 Atomic32 new_value) { |
107 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 154 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
108 } | 155 } |
109 | 156 |
110 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 157 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
111 *ptr = value; | 158 *ptr = value; |
112 } | 159 } |
113 | 160 |
114 inline void MemoryBarrier() { | |
115 pLinuxKernelMemoryBarrier(); | |
116 } | |
117 | |
118 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 161 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
119 *ptr = value; | 162 *ptr = value; |
120 MemoryBarrier(); | 163 MemoryBarrier(); |
121 } | 164 } |
122 | 165 |
123 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 166 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
124 MemoryBarrier(); | 167 MemoryBarrier(); |
125 *ptr = value; | 168 *ptr = value; |
126 } | 169 } |
127 | 170 |
128 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 171 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
129 return *ptr; | 172 return *ptr; |
130 } | 173 } |
131 | 174 |
132 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 175 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
133 Atomic32 value = *ptr; | 176 Atomic32 value = *ptr; |
134 MemoryBarrier(); | 177 MemoryBarrier(); |
135 return value; | 178 return value; |
136 } | 179 } |
137 | 180 |
138 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 181 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
139 MemoryBarrier(); | 182 MemoryBarrier(); |
140 return *ptr; | 183 return *ptr; |
141 } | 184 } |
142 | 185 |
143 } } // namespace v8::internal | 186 } } // namespace v8::internal |
144 | 187 |
145 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 188 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
OLD | NEW |