| Index: third_party/re2/util/atomicops.h
|
| diff --git a/third_party/re2/util/atomicops.h b/third_party/re2/util/atomicops.h
|
| index 11c11963d1bb449f33cb987adc33796460026d42..dc944e751f41530863daa67213435a357a4f11f3 100644
|
| --- a/third_party/re2/util/atomicops.h
|
| +++ b/third_party/re2/util/atomicops.h
|
| @@ -5,6 +5,35 @@
|
| #ifndef RE2_UTIL_ATOMICOPS_H__
|
| #define RE2_UTIL_ATOMICOPS_H__
|
|
|
| +// The memory ordering constraints resemble the ones in C11.
|
| +// RELAXED - no memory ordering, just an atomic operation.
|
| +// CONSUME - data-dependent ordering.
|
| +// ACQUIRE - prevents memory accesses from hoisting above the operation.
|
| +// RELEASE - prevents memory accesses from sinking below the operation.
|
| +
|
| +#ifndef __has_builtin
|
| +#define __has_builtin(x) 0
|
| +#endif
|
| +
|
| +#if !defined(OS_NACL) && (__has_builtin(__atomic_load_n) || (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__ >= 40801))
|
| +
|
| +#define ATOMIC_LOAD_RELAXED(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_RELAXED); } while (0)
|
| +#define ATOMIC_LOAD_CONSUME(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_CONSUME); } while (0)
|
| +#define ATOMIC_LOAD_ACQUIRE(x, p) do { (x) = __atomic_load_n((p), __ATOMIC_ACQUIRE); } while (0)
|
| +#define ATOMIC_STORE_RELAXED(p, v) __atomic_store_n((p), (v), __ATOMIC_RELAXED)
|
| +#define ATOMIC_STORE_RELEASE(p, v) __atomic_store_n((p), (v), __ATOMIC_RELEASE)
|
| +
|
| +#else // old compiler
|
| +
|
| +#define ATOMIC_LOAD_RELAXED(x, p) do { (x) = *(p); } while (0)
|
| +#define ATOMIC_LOAD_CONSUME(x, p) do { (x) = *(p); MaybeReadMemoryBarrier(); } while (0)
|
| +#define ATOMIC_LOAD_ACQUIRE(x, p) do { (x) = *(p); ReadMemoryBarrier(); } while (0)
|
| +#define ATOMIC_STORE_RELAXED(p, v) do { *(p) = (v); } while (0)
|
| +#define ATOMIC_STORE_RELEASE(p, v) do { WriteMemoryBarrier(); *(p) = (v); } while (0)
|
| +
|
| +// WriteMemoryBarrier(), ReadMemoryBarrier() and MaybeReadMemoryBarrier()
|
| +// are an implementation detail and must not be used in the rest of the code.
|
| +
|
| #if defined(__i386__)
|
|
|
| static inline void WriteMemoryBarrier() {
|
| @@ -21,10 +50,16 @@ static inline void WriteMemoryBarrier() {
|
| __asm__ __volatile__("sfence" : : : "memory");
|
| }
|
|
|
| -#elif defined(__ppc__)
|
| +#elif defined(__ppc__) || defined(__powerpc64__)
|
|
|
| static inline void WriteMemoryBarrier() {
|
| - __asm__ __volatile__("eieio" : : : "memory");
|
| + __asm__ __volatile__("lwsync" : : : "memory");
|
| +}
|
| +
|
| +#elif defined(__aarch64__)
|
| +
|
| +static inline void WriteMemoryBarrier() {
|
| + __asm__ __volatile__("dmb st" : : : "memory");
|
| }
|
|
|
| #elif defined(__alpha__)
|
| @@ -33,6 +68,43 @@ static inline void WriteMemoryBarrier() {
|
| __asm__ __volatile__("wmb" : : : "memory");
|
| }
|
|
|
| +#elif defined(__arm__) && defined(__linux__)
|
| +
|
| +// Linux on ARM puts a suitable memory barrier at a magic address for us to call.
|
| +static inline void WriteMemoryBarrier() {
|
| + ((void(*)(void))0xffff0fa0)();
|
| +}
|
| +
|
| +#elif defined(__windows__) || defined(_WIN32)
|
| +
|
| +#include <intrin.h>
|
| +#include <windows.h>
|
| +
|
| +static inline void WriteMemoryBarrier() {
|
| +#if defined(_M_IX86) || defined(_M_X64)
|
| + // x86 and x64 CPUs have a strong memory model that prohibits most types of
|
| + // reordering, so a non-instruction intrinsic to suppress compiler reordering
|
| + // is sufficient. _WriteBarrier is deprecated, but is still appropriate for
|
| + // the "old compiler" path (pre C++11).
|
| + _WriteBarrier();
|
| +#else
|
| + LONG x;
|
| + ::InterlockedExchange(&x, 0);
|
| +#endif
|
| +}
|
| +
|
| +#elif defined(OS_NACL)
|
| +
|
| +static inline void WriteMemoryBarrier() {
|
| + __sync_synchronize();
|
| +}
|
| +
|
| +#elif defined(__mips__)
|
| +
|
| +static inline void WriteMemoryBarrier() {
|
| + __asm__ __volatile__("sync" : : : "memory");
|
| +}
|
| +
|
| #else
|
|
|
| #include "util/mutex.h"
|
| @@ -50,19 +122,9 @@ static inline void WriteMemoryBarrier() {
|
| re2::MutexLock l(&mu);
|
| }
|
|
|
| -/*
|
| -#error Need WriteMemoryBarrier for architecture.
|
| -
|
| -// Windows
|
| -inline void WriteMemoryBarrier() {
|
| - LONG x;
|
| - ::InterlockedExchange(&x, 0);
|
| -}
|
| -*/
|
| -
|
| #endif
|
|
|
| -// Alpha has very weak memory ordering. If relying on WriteBarriers, must one
|
| +// Alpha has very weak memory ordering. If relying on WriteBarriers, one must
|
| // use read barriers for the readers too.
|
| #if defined(__alpha__)
|
|
|
| @@ -74,6 +136,44 @@ static inline void MaybeReadMemoryBarrier() {
|
|
|
| static inline void MaybeReadMemoryBarrier() {}
|
|
|
| -#endif // __alpha__
|
| +#endif // __alpha__
|
| +
|
| +// Read barrier for various targets.
|
| +
|
| +#if defined(__ppc__) || defined(__powerpc64__)
|
| +
|
| +static inline void ReadMemoryBarrier() {
|
| + __asm__ __volatile__("lwsync" : : : "memory");
|
| +}
|
| +
|
| +#elif defined(__aarch64__)
|
| +
|
| +static inline void ReadMemoryBarrier() {
|
| + __asm__ __volatile__("dmb ld" : : : "memory");
|
| +}
|
| +
|
| +#elif defined(__alpha__)
|
| +
|
| +static inline void ReadMemoryBarrier() {
|
| + __asm__ __volatile__("mb" : : : "memory");
|
| +}
|
| +
|
| +#elif defined(__mips__)
|
| +
|
| +static inline void ReadMemoryBarrier() {
|
| + __asm__ __volatile__("sync" : : : "memory");
|
| +}
|
| +
|
| +#else
|
| +
|
| +static inline void ReadMemoryBarrier() {}
|
| +
|
| +#endif
|
| +
|
| +#endif // old compiler
|
| +
|
| +#ifndef NO_THREAD_SAFETY_ANALYSIS
|
| +#define NO_THREAD_SAFETY_ANALYSIS
|
| +#endif
|
|
|
| #endif // RE2_UTIL_ATOMICOPS_H__
|
|
|