Index: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_solaris.h |
diff --git a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_macosx.h b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_solaris.h |
similarity index 60% |
copy from third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_macosx.h |
copy to third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_solaris.h |
index f9b7581ad568216deb3c92acedbaa28ca0269acd..d8057ecdeabf77e2e63449dd52d99ced9c463dc5 100644 |
--- a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_macosx.h |
+++ b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_solaris.h |
@@ -1,6 +1,5 @@ |
-// Protocol Buffers - Google's data interchange format |
-// Copyright 2012 Google Inc. All rights reserved. |
-// http://code.google.com/p/protobuf/ |
+// Copyright 2014 Google Inc. All rights reserved. |
+// https://developers.google.com/protocol-buffers/ |
// |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
@@ -30,10 +29,10 @@ |
// This file is an internal atomic implementation, use atomicops.h instead. |
-#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_ |
-#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_ |
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_ |
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_ |
-#include <libkern/OSAtomic.h> |
+#include <atomic.h> |
namespace google { |
namespace protobuf { |
@@ -42,59 +41,47 @@ namespace internal { |
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- Atomic32 prev_value; |
- do { |
- if (OSAtomicCompareAndSwap32(old_value, new_value, |
- const_cast<Atomic32*>(ptr))) { |
- return old_value; |
- } |
- prev_value = *ptr; |
- } while (prev_value == old_value); |
- return prev_value; |
+ return (Atomic32)atomic_cas_32((volatile uint32_t*)ptr, (uint32_t)old_value, (uint32_t)new_value); |
} |
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
Atomic32 new_value) { |
- Atomic32 old_value; |
- do { |
- old_value = *ptr; |
- } while (!OSAtomicCompareAndSwap32(old_value, new_value, |
- const_cast<Atomic32*>(ptr))); |
- return old_value; |
+ return (Atomic32)atomic_swap_32((volatile uint32_t*)ptr, (uint32_t)new_value); |
} |
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
Atomic32 increment) { |
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
+ return (Atomic32)atomic_add_32_nv((volatile uint32_t*)ptr, (uint32_t)increment); |
} |
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
- Atomic32 increment) { |
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
+inline void MemoryBarrier(void) { |
+ membar_producer(); |
+ membar_consumer(); |
} |
-inline void MemoryBarrier() { |
- OSMemoryBarrier(); |
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
+ MemoryBarrier(); |
+ Atomic32 ret = NoBarrier_AtomicIncrement(ptr, increment); |
+ MemoryBarrier(); |
+ |
+ return ret; |
} |
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- Atomic32 prev_value; |
- do { |
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
- const_cast<Atomic32*>(ptr))) { |
- return old_value; |
- } |
- prev_value = *ptr; |
- } while (prev_value == old_value); |
- return prev_value; |
+ Atomic32 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ MemoryBarrier(); |
+ |
+ return ret; |
} |
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- return Acquire_CompareAndSwap(ptr, old_value, new_value); |
+ MemoryBarrier(); |
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
} |
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
@@ -103,11 +90,11 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
*ptr = value; |
- MemoryBarrier(); |
+ membar_producer(); |
} |
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
- MemoryBarrier(); |
+ membar_consumer(); |
*ptr = value; |
} |
@@ -116,75 +103,51 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
} |
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
- Atomic32 value = *ptr; |
- MemoryBarrier(); |
- return value; |
+ Atomic32 val = *ptr; |
+ membar_consumer(); |
+ return val; |
} |
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
- MemoryBarrier(); |
+ membar_producer(); |
return *ptr; |
} |
-#ifdef __LP64__ |
- |
-// 64-bit implementation on 64-bit platform |
- |
+#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT |
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- Atomic64 prev_value; |
- do { |
- if (OSAtomicCompareAndSwap64(old_value, new_value, |
- reinterpret_cast<volatile int64_t*>(ptr))) { |
- return old_value; |
- } |
- prev_value = *ptr; |
- } while (prev_value == old_value); |
- return prev_value; |
-} |
- |
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
- Atomic64 new_value) { |
- Atomic64 old_value; |
- do { |
- old_value = *ptr; |
- } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
- reinterpret_cast<volatile int64_t*>(ptr))); |
- return old_value; |
+ return atomic_cas_64((volatile uint64_t*)ptr, (uint64_t)old_value, (uint64_t)new_value); |
} |
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
- Atomic64 increment) { |
- return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); |
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { |
+ return atomic_swap_64((volatile uint64_t*)ptr, (uint64_t)new_value); |
} |
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
- Atomic64 increment) { |
- return OSAtomicAdd64Barrier(increment, |
- reinterpret_cast<volatile int64_t*>(ptr)); |
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { |
+ return atomic_add_64_nv((volatile uint64_t*)ptr, increment); |
+} |
+ |
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { |
+ MemoryBarrier(); |
+ Atomic64 ret = atomic_add_64_nv((volatile uint64_t*)ptr, increment); |
+ MemoryBarrier(); |
+ return ret; |
} |
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- Atomic64 prev_value; |
- do { |
- if (OSAtomicCompareAndSwap64Barrier( |
- old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { |
- return old_value; |
- } |
- prev_value = *ptr; |
- } while (prev_value == old_value); |
- return prev_value; |
+ Atomic64 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ MemoryBarrier(); |
+ return ret; |
} |
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- // The lib kern interface does not distinguish between |
- // Acquire and Release memory barriers; they are equivalent. |
- return Acquire_CompareAndSwap(ptr, old_value, new_value); |
+ MemoryBarrier(); |
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
} |
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
@@ -193,11 +156,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
*ptr = value; |
- MemoryBarrier(); |
+ membar_producer(); |
} |
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
- MemoryBarrier(); |
+ membar_consumer(); |
*ptr = value; |
} |
@@ -206,20 +169,20 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
} |
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
- Atomic64 value = *ptr; |
- MemoryBarrier(); |
- return value; |
+ Atomic64 ret = *ptr; |
+ membar_consumer(); |
+ return ret; |
} |
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
- MemoryBarrier(); |
+ membar_producer(); |
return *ptr; |
} |
- |
-#endif // defined(__LP64__) |
+#endif |
} // namespace internal |
} // namespace protobuf |
} // namespace google |
-#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_ |
+#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_ |
+ |