Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_arm64_gcc.h

Issue 2599263002: third_party/protobuf: Update to HEAD (f52e188fe4) (Closed)
Patch Set: Address comments Created 3 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Protocol Buffers - Google's data interchange format 1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2012 Google Inc. All rights reserved. 2 // Copyright 2012 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/ 3 // https://developers.google.com/protocol-buffers/
4 // 4 //
5 // Redistribution and use in source and binary forms, with or without 5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are 6 // modification, are permitted provided that the following conditions are
7 // met: 7 // met:
8 // 8 //
9 // * Redistributions of source code must retain the above copyright 9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer. 10 // notice, this list of conditions and the following disclaimer.
(...skipping 19 matching lines...) Expand all
30 30
31 // This file is an internal atomic implementation, use atomicops.h instead. 31 // This file is an internal atomic implementation, use atomicops.h instead.
32 32
33 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_ 33 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
34 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_ 34 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
35 35
36 namespace google { 36 namespace google {
37 namespace protobuf { 37 namespace protobuf {
38 namespace internal { 38 namespace internal {
39 39
40 inline void MemoryBarrier() { 40 inline void MemoryBarrierInternal() {
41 __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT 41 __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
42 } 42 }
43 43
44 // NoBarrier versions of the operation include "memory" in the clobber list. 44 // NoBarrier versions of the operation include "memory" in the clobber list.
45 // This is not required for direct usage of the NoBarrier versions of the 45 // This is not required for direct usage of the NoBarrier versions of the
46 // operations. However this is required for correctness when they are used as 46 // operations. However this is required for correctness when they are used as
47 // part of the Acquire or Release versions, to ensure that nothing from outside 47 // part of the Acquire or Release versions, to ensure that nothing from outside
48 // the call is reordered between the operation and the memory barrier. This does 48 // the call is reordered between the operation and the memory barrier. This does
49 // not change the code generated, so has no or minimal impact on the 49 // not change the code generated, so has no or minimal impact on the
50 // NoBarrier operations. 50 // NoBarrier operations.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 [ptr]"+Q" (*ptr) 110 [ptr]"+Q" (*ptr)
111 : [increment]"IJr" (increment) 111 : [increment]"IJr" (increment)
112 : "memory" 112 : "memory"
113 ); // NOLINT 113 ); // NOLINT
114 114
115 return result; 115 return result;
116 } 116 }
117 117
118 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 118 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
119 Atomic32 increment) { 119 Atomic32 increment) {
120 MemoryBarrier(); 120 MemoryBarrierInternal();
121 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); 121 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
122 MemoryBarrier(); 122 MemoryBarrierInternal();
123 123
124 return result; 124 return result;
125 } 125 }
126 126
127 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 127 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
128 Atomic32 old_value, 128 Atomic32 old_value,
129 Atomic32 new_value) { 129 Atomic32 new_value) {
130 Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 130 Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
131 MemoryBarrier(); 131 MemoryBarrierInternal();
132 132
133 return prev; 133 return prev;
134 } 134 }
135 135
136 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 136 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
137 Atomic32 old_value, 137 Atomic32 old_value,
138 Atomic32 new_value) { 138 Atomic32 new_value) {
139 MemoryBarrier(); 139 MemoryBarrierInternal();
140 Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 140 Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
141 141
142 return prev; 142 return prev;
143 } 143 }
144 144
145 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 145 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
146 *ptr = value; 146 *ptr = value;
147 } 147 }
148 148
149 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 149 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
150 *ptr = value; 150 *ptr = value;
151 MemoryBarrier(); 151 MemoryBarrierInternal();
152 } 152 }
153 153
154 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 154 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
155 __asm__ __volatile__ ( // NOLINT 155 __asm__ __volatile__ ( // NOLINT
156 "stlr %w[value], %[ptr] \n\t" 156 "stlr %w[value], %[ptr] \n\t"
157 : [ptr]"=Q" (*ptr) 157 : [ptr]"=Q" (*ptr)
158 : [value]"r" (value) 158 : [value]"r" (value)
159 : "memory" 159 : "memory"
160 ); // NOLINT 160 ); // NOLINT
161 } 161 }
162 162
163 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 163 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
164 return *ptr; 164 return *ptr;
165 } 165 }
166 166
167 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { 167 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
168 Atomic32 value; 168 Atomic32 value;
169 169
170 __asm__ __volatile__ ( // NOLINT 170 __asm__ __volatile__ ( // NOLINT
171 "ldar %w[value], %[ptr] \n\t" 171 "ldar %w[value], %[ptr] \n\t"
172 : [value]"=r" (value) 172 : [value]"=r" (value)
173 : [ptr]"Q" (*ptr) 173 : [ptr]"Q" (*ptr)
174 : "memory" 174 : "memory"
175 ); // NOLINT 175 ); // NOLINT
176 176
177 return value; 177 return value;
178 } 178 }
179 179
180 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 180 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
181 MemoryBarrier(); 181 MemoryBarrierInternal();
182 return *ptr; 182 return *ptr;
183 } 183 }
184 184
185 // 64-bit versions of the operations. 185 // 64-bit versions of the operations.
186 // See the 32-bit versions for comments. 186 // See the 32-bit versions for comments.
187 187
188 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 188 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
189 Atomic64 old_value, 189 Atomic64 old_value,
190 Atomic64 new_value) { 190 Atomic64 new_value) {
191 Atomic64 prev; 191 Atomic64 prev;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
246 [ptr]"+Q" (*ptr) 246 [ptr]"+Q" (*ptr)
247 : [increment]"IJr" (increment) 247 : [increment]"IJr" (increment)
248 : "memory" 248 : "memory"
249 ); // NOLINT 249 ); // NOLINT
250 250
251 return result; 251 return result;
252 } 252 }
253 253
254 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 254 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
255 Atomic64 increment) { 255 Atomic64 increment) {
256 MemoryBarrier(); 256 MemoryBarrierInternal();
257 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment); 257 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
258 MemoryBarrier(); 258 MemoryBarrierInternal();
259 259
260 return result; 260 return result;
261 } 261 }
262 262
263 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 263 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
264 Atomic64 old_value, 264 Atomic64 old_value,
265 Atomic64 new_value) { 265 Atomic64 new_value) {
266 Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 266 Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
267 MemoryBarrier(); 267 MemoryBarrierInternal();
268 268
269 return prev; 269 return prev;
270 } 270 }
271 271
272 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 272 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
273 Atomic64 old_value, 273 Atomic64 old_value,
274 Atomic64 new_value) { 274 Atomic64 new_value) {
275 MemoryBarrier(); 275 MemoryBarrierInternal();
276 Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 276 Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
277 277
278 return prev; 278 return prev;
279 } 279 }
280 280
281 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 281 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
282 *ptr = value; 282 *ptr = value;
283 } 283 }
284 284
285 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 285 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
286 *ptr = value; 286 *ptr = value;
287 MemoryBarrier(); 287 MemoryBarrierInternal();
288 } 288 }
289 289
290 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { 290 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
291 __asm__ __volatile__ ( // NOLINT 291 __asm__ __volatile__ ( // NOLINT
292 "stlr %x[value], %[ptr] \n\t" 292 "stlr %x[value], %[ptr] \n\t"
293 : [ptr]"=Q" (*ptr) 293 : [ptr]"=Q" (*ptr)
294 : [value]"r" (value) 294 : [value]"r" (value)
295 : "memory" 295 : "memory"
296 ); // NOLINT 296 ); // NOLINT
297 } 297 }
298 298
299 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 299 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
300 return *ptr; 300 return *ptr;
301 } 301 }
302 302
303 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 303 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
304 Atomic64 value; 304 Atomic64 value;
305 305
306 __asm__ __volatile__ ( // NOLINT 306 __asm__ __volatile__ ( // NOLINT
307 "ldar %x[value], %[ptr] \n\t" 307 "ldar %x[value], %[ptr] \n\t"
308 : [value]"=r" (value) 308 : [value]"=r" (value)
309 : [ptr]"Q" (*ptr) 309 : [ptr]"Q" (*ptr)
310 : "memory" 310 : "memory"
311 ); // NOLINT 311 ); // NOLINT
312 312
313 return value; 313 return value;
314 } 314 }
315 315
316 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 316 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
317 MemoryBarrier(); 317 MemoryBarrierInternal();
318 return *ptr; 318 return *ptr;
319 } 319 }
320 320
321 } // namespace internal 321 } // namespace internal
322 } // namespace protobuf 322 } // namespace protobuf
323 } // namespace google 323 } // namespace google
324 324
325 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_ 325 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698