Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/atomicops-internals-x86.h

Issue 636783002: Use C++11 atomics (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 28 matching lines...) Expand all
39 #define BASE_ATOMICOPS_INTERNALS_X86_H_ 39 #define BASE_ATOMICOPS_INTERNALS_X86_H_
40 40
41 typedef int32_t Atomic32; 41 typedef int32_t Atomic32;
42 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* 42 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
43 43
44 44
45 // NOTE(vchen): x86 does not need to define AtomicWordCastType, because it 45 // NOTE(vchen): x86 does not need to define AtomicWordCastType, because it
46 // already matches Atomic32 or Atomic64, depending on the platform. 46 // already matches Atomic32 or Atomic64, depending on the platform.
47 47
48 48
49 // This struct is not part of the public API of this module; clients may not
50 // use it.
51 // Features of this x86. Values may not be correct before main() is run,
52 // but are set conservatively.
53 struct AtomicOps_x86CPUFeatureStruct {
54 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
55 // after acquire compare-and-swap.
56 bool has_sse2; // Processor has SSE2.
57 bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction.
58 };
59 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
60
61
62 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") 49 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
63 50
64 51
65 namespace base { 52 namespace base {
66 namespace subtle { 53 namespace subtle {
67 54
68 typedef int64_t Atomic64; 55 typedef int64_t Atomic64;
69 56
70 // 32-bit low-level operations on any platform. 57 // 32-bit low-level operations on any platform.
71 58
(...skipping 27 matching lines...) Expand all
99 return temp + increment; 86 return temp + increment;
100 } 87 }
101 88
102 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 89 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
103 Atomic32 increment) { 90 Atomic32 increment) {
104 Atomic32 temp = increment; 91 Atomic32 temp = increment;
105 __asm__ __volatile__("lock; xaddl %0,%1" 92 __asm__ __volatile__("lock; xaddl %0,%1"
106 : "+r" (temp), "+m" (*ptr) 93 : "+r" (temp), "+m" (*ptr)
107 : : "memory"); 94 : : "memory");
108 // temp now holds the old value of *ptr 95 // temp now holds the old value of *ptr
109 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
110 __asm__ __volatile__("lfence" : : : "memory");
111 }
112 return temp + increment; 96 return temp + increment;
113 } 97 }
114 98
115 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 99 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
116 Atomic32 old_value, 100 Atomic32 old_value,
117 Atomic32 new_value) { 101 Atomic32 new_value) {
118 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 102 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
119 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
120 __asm__ __volatile__("lfence" : : : "memory");
121 }
122 return x; 103 return x;
123 } 104 }
124 105
125 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 106 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
126 Atomic32 old_value, 107 Atomic32 old_value,
127 Atomic32 new_value) { 108 Atomic32 new_value) {
128 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 109 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
129 } 110 }
130 111
131 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 112 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
132 *ptr = value; 113 *ptr = value;
133 } 114 }
134 115
135 #if defined(__x86_64__) 116 #if defined(__x86_64__)
136 117
137 // 64-bit implementations of memory barrier can be simpler, because it 118 // 64-bit implementations of memory barrier can be simpler, because it
138 // "mfence" is guaranteed to exist. 119 // "mfence" is guaranteed to exist.
139 inline void MemoryBarrier() { 120 inline void MemoryBarrier() {
140 __asm__ __volatile__("mfence" : : : "memory"); 121 __asm__ __volatile__("mfence" : : : "memory");
141 } 122 }
142 123
143 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 124 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
144 *ptr = value; 125 *ptr = value;
145 MemoryBarrier(); 126 MemoryBarrier();
146 } 127 }
147 128
148 #else 129 #else
149 130
150 inline void MemoryBarrier() { 131 inline void MemoryBarrier() {
151 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { 132 __asm__ __volatile__("mfence" : : : "memory");
152 __asm__ __volatile__("mfence" : : : "memory");
153 } else { // mfence is faster but not present on PIII
154 Atomic32 x = 0;
155 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
156 }
157 } 133 }
158 134
159 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 135 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
160 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { 136 *ptr = value;
161 *ptr = value; 137 __asm__ __volatile__("mfence" : : : "memory");
162 __asm__ __volatile__("mfence" : : : "memory");
163 } else {
164 NoBarrier_AtomicExchange(ptr, value);
165 // acts as a barrier on PIII
166 }
167 } 138 }
168 #endif 139 #endif
169 140
170 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 141 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
171 ATOMICOPS_COMPILER_BARRIER(); 142 ATOMICOPS_COMPILER_BARRIER();
172 *ptr = value; // An x86 store acts as a release barrier. 143 *ptr = value; // An x86 store acts as a release barrier.
173 // See comments in Atomic64 version of Release_Store(), below. 144 // See comments in Atomic64 version of Release_Store(), below.
174 } 145 }
175 146
176 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 147 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
223 return temp + increment; 194 return temp + increment;
224 } 195 }
225 196
226 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 197 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
227 Atomic64 increment) { 198 Atomic64 increment) {
228 Atomic64 temp = increment; 199 Atomic64 temp = increment;
229 __asm__ __volatile__("lock; xaddq %0,%1" 200 __asm__ __volatile__("lock; xaddq %0,%1"
230 : "+r" (temp), "+m" (*ptr) 201 : "+r" (temp), "+m" (*ptr)
231 : : "memory"); 202 : : "memory");
232 // temp now contains the previous value of *ptr 203 // temp now contains the previous value of *ptr
233 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
234 __asm__ __volatile__("lfence" : : : "memory");
235 }
236 return temp + increment; 204 return temp + increment;
237 } 205 }
238 206
239 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 207 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
240 *ptr = value; 208 *ptr = value;
241 } 209 }
242 210
243 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 211 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
244 *ptr = value; 212 *ptr = value;
245 MemoryBarrier(); 213 MemoryBarrier();
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
342 old_val = *ptr; 310 old_val = *ptr;
343 new_val = old_val + increment; 311 new_val = old_val + increment;
344 } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val); 312 } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
345 313
346 return old_val + increment; 314 return old_val + increment;
347 } 315 }
348 316
349 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 317 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
350 Atomic64 increment) { 318 Atomic64 increment) {
351 Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment); 319 Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
352 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
353 __asm__ __volatile__("lfence" : : : "memory");
354 }
355 return new_val; 320 return new_val;
356 } 321 }
357 322
358 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 323 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
359 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic 324 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
360 "movq %%mm0, %0\n\t" // moves (ptr could be read-only) 325 "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
361 "emms\n\t" // Empty mmx state/Reset FP regs 326 "emms\n\t" // Empty mmx state/Reset FP regs
362 : "=m" (*ptr) 327 : "=m" (*ptr)
363 : "m" (value) 328 : "m" (value)
364 : // mark the FP stack and mmx registers as clobbered 329 : // mark the FP stack and mmx registers as clobbered
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
401 MemoryBarrier(); 366 MemoryBarrier();
402 return NoBarrier_Load(ptr); 367 return NoBarrier_Load(ptr);
403 } 368 }
404 369
405 #endif // defined(__x86_64__) 370 #endif // defined(__x86_64__)
406 371
407 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 372 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
408 Atomic64 old_value, 373 Atomic64 old_value,
409 Atomic64 new_value) { 374 Atomic64 new_value) {
410 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 375 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
411 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
412 __asm__ __volatile__("lfence" : : : "memory");
413 }
414 return x; 376 return x;
415 } 377 }
416 378
417 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 379 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
418 Atomic64 old_value, 380 Atomic64 old_value,
419 Atomic64 new_value) { 381 Atomic64 new_value) {
420 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 382 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
421 } 383 }
422 384
423 } // namespace base::subtle 385 } // namespace base::subtle
424 } // namespace base 386 } // namespace base
425 387
426 #undef ATOMICOPS_COMPILER_BARRIER 388 #undef ATOMICOPS_COMPILER_BARRIER
427 389
428 #endif // BASE_ATOMICOPS_INTERNALS_X86_H_ 390 #endif // BASE_ATOMICOPS_INTERNALS_X86_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698