Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h

Issue 2885223002: Protobuf: Remove protobuf globals patch (Closed)
Patch Set: typo Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Protocol Buffers - Google's data interchange format 1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2012 Google Inc. All rights reserved. 2 // Copyright 2012 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/ 3 // https://developers.google.com/protocol-buffers/
4 // 4 //
5 // Redistribution and use in source and binary forms, with or without 5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are 6 // modification, are permitted provided that the following conditions are
7 // met: 7 // met:
8 // 8 //
9 // * Redistributions of source code must retain the above copyright 9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer. 10 // notice, this list of conditions and the following disclaimer.
(...skipping 28 matching lines...) Expand all
39 39
40 // This struct is not part of the public API of this module; clients may not 40 // This struct is not part of the public API of this module; clients may not
41 // use it. 41 // use it.
42 // Features of this x86. Values may not be correct before main() is run, 42 // Features of this x86. Values may not be correct before main() is run,
43 // but are set conservatively. 43 // but are set conservatively.
44 struct AtomicOps_x86CPUFeatureStruct { 44 struct AtomicOps_x86CPUFeatureStruct {
45 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence 45 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
46 // after acquire compare-and-swap. 46 // after acquire compare-and-swap.
47 bool has_sse2; // Processor has SSE2. 47 bool has_sse2; // Processor has SSE2.
48 }; 48 };
49 extern struct AtomicOps_x86CPUFeatureStruct cr_AtomicOps_Internalx86CPUFeatures; 49 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
50
51 void AtomicOps_Internalx86CPUFeaturesInit();
52 50
53 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") 51 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
54 52
55 // 32-bit low-level operations on any platform. 53 // 32-bit low-level operations on any platform.
56 54
57 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 55 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
58 Atomic32 old_value, 56 Atomic32 old_value,
59 Atomic32 new_value) { 57 Atomic32 new_value) {
60 Atomic32 prev; 58 Atomic32 prev;
61 __asm__ __volatile__("lock; cmpxchgl %1,%2" 59 __asm__ __volatile__("lock; cmpxchgl %1,%2"
(...skipping 22 matching lines...) Expand all
84 return temp + increment; 82 return temp + increment;
85 } 83 }
86 84
87 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 85 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
88 Atomic32 increment) { 86 Atomic32 increment) {
89 Atomic32 temp = increment; 87 Atomic32 temp = increment;
90 __asm__ __volatile__("lock; xaddl %0,%1" 88 __asm__ __volatile__("lock; xaddl %0,%1"
91 : "+r" (temp), "+m" (*ptr) 89 : "+r" (temp), "+m" (*ptr)
92 : : "memory"); 90 : : "memory");
93 // temp now holds the old value of *ptr 91 // temp now holds the old value of *ptr
94 if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 92 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
95 __asm__ __volatile__("lfence" : : : "memory"); 93 __asm__ __volatile__("lfence" : : : "memory");
96 } 94 }
97 return temp + increment; 95 return temp + increment;
98 } 96 }
99 97
100 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 98 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
101 Atomic32 old_value, 99 Atomic32 old_value,
102 Atomic32 new_value) { 100 Atomic32 new_value) {
103 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 101 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
104 if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 102 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
105 __asm__ __volatile__("lfence" : : : "memory"); 103 __asm__ __volatile__("lfence" : : : "memory");
106 } 104 }
107 return x; 105 return x;
108 } 106 }
109 107
110 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 108 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
111 Atomic32 old_value, 109 Atomic32 old_value,
112 Atomic32 new_value) { 110 Atomic32 new_value) {
113 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 111 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
114 } 112 }
(...skipping 11 matching lines...) Expand all
126 } 124 }
127 125
128 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 126 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
129 *ptr = value; 127 *ptr = value;
130 MemoryBarrier(); 128 MemoryBarrier();
131 } 129 }
132 130
133 #else 131 #else
134 132
135 inline void MemoryBarrier() { 133 inline void MemoryBarrier() {
136 if (cr_AtomicOps_Internalx86CPUFeatures.has_sse2) { 134 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
137 __asm__ __volatile__("mfence" : : : "memory"); 135 __asm__ __volatile__("mfence" : : : "memory");
138 } else { // mfence is faster but not present on PIII 136 } else { // mfence is faster but not present on PIII
139 Atomic32 x = 0; 137 Atomic32 x = 0;
140 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII 138 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
141 } 139 }
142 } 140 }
143 141
144 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 142 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
145 if (cr_AtomicOps_Internalx86CPUFeatures.has_sse2) { 143 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
146 *ptr = value; 144 *ptr = value;
147 __asm__ __volatile__("mfence" : : : "memory"); 145 __asm__ __volatile__("mfence" : : : "memory");
148 } else { 146 } else {
149 NoBarrier_AtomicExchange(ptr, value); 147 NoBarrier_AtomicExchange(ptr, value);
150 // acts as a barrier on PIII 148 // acts as a barrier on PIII
151 } 149 }
152 } 150 }
153 #endif 151 #endif
154 152
155 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 153 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
208 return temp + increment; 206 return temp + increment;
209 } 207 }
210 208
211 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 209 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
212 Atomic64 increment) { 210 Atomic64 increment) {
213 Atomic64 temp = increment; 211 Atomic64 temp = increment;
214 __asm__ __volatile__("lock; xaddq %0,%1" 212 __asm__ __volatile__("lock; xaddq %0,%1"
215 : "+r" (temp), "+m" (*ptr) 213 : "+r" (temp), "+m" (*ptr)
216 : : "memory"); 214 : : "memory");
217 // temp now contains the previous value of *ptr 215 // temp now contains the previous value of *ptr
218 if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 216 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
219 __asm__ __volatile__("lfence" : : : "memory"); 217 __asm__ __volatile__("lfence" : : : "memory");
220 } 218 }
221 return temp + increment; 219 return temp + increment;
222 } 220 }
223 221
224 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
225 *ptr = value; 223 *ptr = value;
226 } 224 }
227 225
228 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 226 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 263
266 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 264 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
267 MemoryBarrier(); 265 MemoryBarrier();
268 return *ptr; 266 return *ptr;
269 } 267 }
270 268
271 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 269 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
272 Atomic64 old_value, 270 Atomic64 old_value,
273 Atomic64 new_value) { 271 Atomic64 new_value) {
274 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 272 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
275 if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 273 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
276 __asm__ __volatile__("lfence" : : : "memory"); 274 __asm__ __volatile__("lfence" : : : "memory");
277 } 275 }
278 return x; 276 return x;
279 } 277 }
280 278
281 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 279 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
282 Atomic64 old_value, 280 Atomic64 old_value,
283 Atomic64 new_value) { 281 Atomic64 new_value) {
284 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 282 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
285 } 283 }
286 284
287 #endif // defined(__x86_64__) 285 #endif // defined(__x86_64__)
288 286
289 } // namespace internal 287 } // namespace internal
290 } // namespace protobuf 288 } // namespace protobuf
291 } // namespace google 289 } // namespace google
292 290
293 #undef ATOMICOPS_COMPILER_BARRIER 291 #undef ATOMICOPS_COMPILER_BARRIER
294 292
295 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ 293 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698