OLD | NEW |
1 // Copyright 2006-2008 The RE2 Authors. All Rights Reserved. | 1 // Copyright 2006-2008 The RE2 Authors. All Rights Reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #ifndef RE2_UTIL_ATOMICOPS_H__ | 5 #ifndef RE2_UTIL_ATOMICOPS_H__ |
6 #define RE2_UTIL_ATOMICOPS_H__ | 6 #define RE2_UTIL_ATOMICOPS_H__ |
7 | 7 |
8 // The memory ordering constraints resemble the ones in C11. | |
9 // RELAXED - no memory ordering, just an atomic operation. | |
10 // CONSUME - data-dependent ordering. | |
11 // ACQUIRE - prevents memory accesses from hoisting above the operation. | |
12 // RELEASE - prevents memory accesses from sinking below the operation. | |
13 | |
14 #ifndef __has_builtin | |
15 #define __has_builtin(x) 0 | |
16 #endif | |
17 | |
18 #if !defined(OS_NACL) && (__has_builtin(__atomic_load_n) || (__GNUC__*10000 + __
GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__ >= 40801)) | |
19 | |
20 #define ATOMIC_LOAD_RELAXED(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_RELAX
ED); } while (0) | |
21 #define ATOMIC_LOAD_CONSUME(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_CONSU
ME); } while (0) | |
22 #define ATOMIC_LOAD_ACQUIRE(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_ACQUI
RE); } while (0) | |
23 #define ATOMIC_STORE_RELAXED(p, v) __atomic_store_n((p), (v), __ATOMIC_RELAXED) | |
24 #define ATOMIC_STORE_RELEASE(p, v) __atomic_store_n((p), (v), __ATOMIC_RELEASE) | |
25 | |
26 #else // old compiler | |
27 | |
28 #define ATOMIC_LOAD_RELAXED(x, p) do { (x) = *(p); } while (0) | |
29 #define ATOMIC_LOAD_CONSUME(x, p) do { (x) = *(p); MaybeReadMemoryBarrier(); } w
hile (0) | |
30 #define ATOMIC_LOAD_ACQUIRE(x, p) do { (x) = *(p); ReadMemoryBarrier(); } while
(0) | |
31 #define ATOMIC_STORE_RELAXED(p, v) do { *(p) = (v); } while (0) | |
32 #define ATOMIC_STORE_RELEASE(p, v) do { WriteMemoryBarrier(); *(p) = (v); } whil
e (0) | |
33 | |
34 // WriteMemoryBarrier(), ReadMemoryBarrier() and MaybeReadMemoryBarrier() | |
35 // are an implementation detail and must not be used in the rest of the code. | |
36 | |
37 #if defined(__i386__) | 8 #if defined(__i386__) |
38 | 9 |
39 static inline void WriteMemoryBarrier() { | 10 static inline void WriteMemoryBarrier() { |
40 int x; | 11 int x; |
41 __asm__ __volatile__("xchgl (%0),%0" // The lock prefix is implicit for xchg. | 12 __asm__ __volatile__("xchgl (%0),%0" // The lock prefix is implicit for xchg. |
42 :: "r" (&x)); | 13 :: "r" (&x)); |
43 } | 14 } |
44 | 15 |
45 #elif defined(__x86_64__) | 16 #elif defined(__x86_64__) |
46 | 17 |
47 // 64-bit implementations of memory barrier can be simpler, because | 18 // 64-bit implementations of memory barrier can be simpler, because |
48 // "sfence" is guaranteed to exist. | 19 // "sfence" is guaranteed to exist. |
49 static inline void WriteMemoryBarrier() { | 20 static inline void WriteMemoryBarrier() { |
50 __asm__ __volatile__("sfence" : : : "memory"); | 21 __asm__ __volatile__("sfence" : : : "memory"); |
51 } | 22 } |
52 | 23 |
53 #elif defined(__ppc__) || defined(__powerpc64__) | 24 #elif defined(__ppc__) |
54 | 25 |
55 static inline void WriteMemoryBarrier() { | 26 static inline void WriteMemoryBarrier() { |
56 __asm__ __volatile__("lwsync" : : : "memory"); | 27 __asm__ __volatile__("eieio" : : : "memory"); |
57 } | |
58 | |
59 #elif defined(__aarch64__) | |
60 | |
61 static inline void WriteMemoryBarrier() { | |
62 __asm__ __volatile__("dmb st" : : : "memory"); | |
63 } | 28 } |
64 | 29 |
65 #elif defined(__alpha__) | 30 #elif defined(__alpha__) |
66 | 31 |
67 static inline void WriteMemoryBarrier() { | 32 static inline void WriteMemoryBarrier() { |
68 __asm__ __volatile__("wmb" : : : "memory"); | 33 __asm__ __volatile__("wmb" : : : "memory"); |
69 } | 34 } |
70 | 35 |
71 #elif defined(__arm__) && defined(__linux__) | |
72 | |
73 // Linux on ARM puts a suitable memory barrier at a magic address for us to call
. | |
74 static inline void WriteMemoryBarrier() { | |
75 ((void(*)(void))0xffff0fa0)(); | |
76 } | |
77 | |
78 #elif defined(__windows__) || defined(_WIN32) | |
79 | |
80 #include <intrin.h> | |
81 #include <windows.h> | |
82 | |
83 static inline void WriteMemoryBarrier() { | |
84 #if defined(_M_IX86) || defined(_M_X64) | |
85 // x86 and x64 CPUs have a strong memory model that prohibits most types of | |
86 // reordering, so a non-instruction intrinsic to suppress compiler reordering | |
87 // is sufficient. _WriteBarrier is deprecated, but is still appropriate for | |
88 // the "old compiler" path (pre C++11). | |
89 _WriteBarrier(); | |
90 #else | 36 #else |
91 LONG x; | |
92 ::InterlockedExchange(&x, 0); | |
93 #endif | |
94 } | |
95 | |
96 #elif defined(OS_NACL) | |
97 | |
98 static inline void WriteMemoryBarrier() { | |
99 __sync_synchronize(); | |
100 } | |
101 | |
102 #elif defined(__mips__) | |
103 | |
104 static inline void WriteMemoryBarrier() { | |
105 __asm__ __volatile__("sync" : : : "memory"); | |
106 } | |
107 | |
108 #else | |
109 | 37 |
110 #include "util/mutex.h" | 38 #include "util/mutex.h" |
111 | 39 |
112 static inline void WriteMemoryBarrier() { | 40 static inline void WriteMemoryBarrier() { |
113 // Slight overkill, but good enough: | 41 // Slight overkill, but good enough: |
114 // any mutex implementation must have | 42 // any mutex implementation must have |
115 // a read barrier after the lock operation and | 43 // a read barrier after the lock operation and |
116 // a write barrier before the unlock operation. | 44 // a write barrier before the unlock operation. |
117 // | 45 // |
118 // It may be worthwhile to write architecture-specific | 46 // It may be worthwhile to write architecture-specific |
119 // barriers for the common platforms, as above, but | 47 // barriers for the common platforms, as above, but |
120 // this is a correct fallback. | 48 // this is a correct fallback. |
121 re2::Mutex mu; | 49 re2::Mutex mu; |
122 re2::MutexLock l(&mu); | 50 re2::MutexLock l(&mu); |
123 } | 51 } |
124 | 52 |
| 53 /* |
| 54 #error Need WriteMemoryBarrier for architecture. |
| 55 |
| 56 // Windows |
| 57 inline void WriteMemoryBarrier() { |
| 58 LONG x; |
| 59 ::InterlockedExchange(&x, 0); |
| 60 } |
| 61 */ |
| 62 |
125 #endif | 63 #endif |
126 | 64 |
127 // Alpha has very weak memory ordering. If relying on WriteBarriers, one must | 65 // Alpha has very weak memory ordering. If relying on WriteBarriers, must one |
128 // use read barriers for the readers too. | 66 // use read barriers for the readers too. |
129 #if defined(__alpha__) | 67 #if defined(__alpha__) |
130 | 68 |
131 static inline void MaybeReadMemoryBarrier() { | 69 static inline void MaybeReadMemoryBarrier() { |
132 __asm__ __volatile__("mb" : : : "memory"); | 70 __asm__ __volatile__("mb" : : : "memory"); |
133 } | 71 } |
134 | 72 |
135 #else | 73 #else |
136 | 74 |
137 static inline void MaybeReadMemoryBarrier() {} | 75 static inline void MaybeReadMemoryBarrier() {} |
138 | 76 |
139 #endif // __alpha__ | 77 #endif // __alpha__ |
140 | |
141 // Read barrier for various targets. | |
142 | |
143 #if defined(__ppc__) || defined(__powerpc64__) | |
144 | |
145 static inline void ReadMemoryBarrier() { | |
146 __asm__ __volatile__("lwsync" : : : "memory"); | |
147 } | |
148 | |
149 #elif defined(__aarch64__) | |
150 | |
151 static inline void ReadMemoryBarrier() { | |
152 __asm__ __volatile__("dmb ld" : : : "memory"); | |
153 } | |
154 | |
155 #elif defined(__alpha__) | |
156 | |
157 static inline void ReadMemoryBarrier() { | |
158 __asm__ __volatile__("mb" : : : "memory"); | |
159 } | |
160 | |
161 #elif defined(__mips__) | |
162 | |
163 static inline void ReadMemoryBarrier() { | |
164 __asm__ __volatile__("sync" : : : "memory"); | |
165 } | |
166 | |
167 #else | |
168 | |
169 static inline void ReadMemoryBarrier() {} | |
170 | |
171 #endif | |
172 | |
173 #endif // old compiler | |
174 | |
175 #ifndef NO_THREAD_SAFETY_ANALYSIS | |
176 #define NO_THREAD_SAFETY_ANALYSIS | |
177 #endif | |
178 | 78 |
179 #endif // RE2_UTIL_ATOMICOPS_H__ | 79 #endif // RE2_UTIL_ATOMICOPS_H__ |
OLD | NEW |