| OLD | NEW |
| 1 // Copyright 2013 Red Hat Inc. All rights reserved. | 1 // Copyright 2013 Red Hat Inc. All rights reserved. |
| 2 // | 2 // |
| 3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
| 4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
| 5 // met: | 5 // met: |
| 6 // | 6 // |
| 7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
| 8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
| 9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
| 10 // copyright notice, this list of conditions and the following disclaimer | 10 // copyright notice, this list of conditions and the following disclaimer |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ | 31 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ |
| 32 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ | 32 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ |
| 33 | 33 |
| 34 namespace google { | 34 namespace google { |
| 35 namespace protobuf { | 35 namespace protobuf { |
| 36 namespace internal { | 36 namespace internal { |
| 37 | 37 |
| 38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 39 Atomic32 old_value, | 39 Atomic32 old_value, |
| 40 Atomic32 new_value) { | 40 Atomic32 new_value) { |
| 41 __atomic_compare_exchange_n(ptr, &old_value, new_value, true, | 41 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 42 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | 42 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 43 return old_value; | 43 return old_value; |
| 44 } | 44 } |
| 45 | 45 |
| 46 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 46 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 47 Atomic32 new_value) { | 47 Atomic32 new_value) { |
| 48 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); | 48 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 49 } | 49 } |
| 50 | 50 |
| 51 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 51 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 52 Atomic32 increment) { | 52 Atomic32 increment) { |
| 53 return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED); | 53 return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED); |
| 54 } | 54 } |
| 55 | 55 |
| 56 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 56 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 57 Atomic32 increment) { | 57 Atomic32 increment) { |
| 58 return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST); | 58 return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST); |
| 59 } | 59 } |
| 60 | 60 |
| 61 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 61 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 62 Atomic32 old_value, | 62 Atomic32 old_value, |
| 63 Atomic32 new_value) { | 63 Atomic32 new_value) { |
| 64 __atomic_compare_exchange_n(ptr, &old_value, new_value, true, | 64 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 65 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | 65 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 66 return old_value; | 66 return old_value; |
| 67 } | 67 } |
| 68 | 68 |
| 69 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 69 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 70 Atomic32 old_value, | 70 Atomic32 old_value, |
| 71 Atomic32 new_value) { | 71 Atomic32 new_value) { |
| 72 __atomic_compare_exchange_n(ptr, &old_value, new_value, true, | 72 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 73 __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); | 73 __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); |
| 74 return old_value; | 74 return old_value; |
| 75 } | 75 } |
| 76 | 76 |
| 77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 78 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | 78 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 79 } | 79 } |
| 80 | 80 |
| 81 inline void MemoryBarrier() { | 81 inline void MemoryBarrierInternal() { |
| 82 __sync_synchronize(); | 82 __sync_synchronize(); |
| 83 } | 83 } |
| 84 | 84 |
| 85 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 85 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 86 __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); | 86 __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); |
| 87 } | 87 } |
| 88 | 88 |
| 89 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 89 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 90 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | 90 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 91 } | 91 } |
| (...skipping 16 matching lines...) Expand all Loading... |
| 108 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | 108 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 109 } | 109 } |
| 110 | 110 |
| 111 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 111 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 112 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); | 112 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| 113 } | 113 } |
| 114 | 114 |
| 115 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 115 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 116 Atomic64 old_value, | 116 Atomic64 old_value, |
| 117 Atomic64 new_value) { | 117 Atomic64 new_value) { |
| 118 __atomic_compare_exchange_n(ptr, &old_value, new_value, true, | 118 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 119 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | 119 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 120 return old_value; | 120 return old_value; |
| 121 } | 121 } |
| 122 | 122 |
| 123 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 123 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 124 Atomic64 old_value, | 124 Atomic64 old_value, |
| 125 Atomic64 new_value) { | 125 Atomic64 new_value) { |
| 126 __atomic_compare_exchange_n(ptr, &old_value, new_value, true, | 126 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 127 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | 127 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 128 return old_value; | 128 return old_value; |
| 129 } | 129 } |
| 130 | 130 |
| 131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 132 Atomic64 increment) { |
| 133 return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED); |
| 134 } |
| 135 |
| 136 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 137 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 138 } |
| 139 |
| 140 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 141 Atomic64 new_value) { |
| 142 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 143 } |
| 144 |
| 145 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 146 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 147 } |
| 148 |
| 131 #endif // defined(__LP64__) | 149 #endif // defined(__LP64__) |
| 132 | 150 |
| 133 } // namespace internal | 151 } // namespace internal |
| 134 } // namespace protobuf | 152 } // namespace protobuf |
| 135 } // namespace google | 153 } // namespace google |
| 136 | 154 |
| 137 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ | 155 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_ |
| OLD | NEW |