Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(298)

Side by Side Diff: third_party/WebKit/Source/wtf/Atomics.h

Issue 2741343017: Move files in wtf/ to platform/wtf/ (Part 4). (Closed)
Patch Set: Rebase. Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « third_party/WebKit/Source/wtf/Assertions.cpp ('k') | third_party/WebKit/Source/wtf/BUILD.gn » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) 3 // found in the LICENSE file.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 4
30 #ifndef Atomics_h 5 #include "platform/wtf/Atomics.h"
31 #define Atomics_h
32 6
33 #include "wtf/AddressSanitizer.h" 7 // The contents of this header was moved to platform/wtf as part of
34 #include "wtf/Assertions.h" 8 // WTF migration project. See the following post for details:
35 #include "wtf/CPU.h" 9 // https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gY CAAJ
36
37 #include <stdint.h>
38
39 #if COMPILER(MSVC)
40 #include <windows.h>
41 #endif
42
43 #if defined(THREAD_SANITIZER)
44 #include <sanitizer/tsan_interface_atomic.h>
45 #endif
46
47 #if defined(ADDRESS_SANITIZER)
48 #include <sanitizer/asan_interface.h>
49 #endif
50
51 namespace WTF {
52
53 #if COMPILER(MSVC)
54
55 // atomicAdd returns the result of the addition.
56 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
57 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
58 static_cast<long>(increment)) +
59 increment;
60 }
61 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
62 unsigned increment) {
63 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
64 static_cast<long>(increment)) +
65 increment;
66 }
67 #if defined(_WIN64)
68 ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
69 unsigned long long increment) {
70 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
71 static_cast<long long>(increment)) +
72 increment;
73 }
74 #endif
75
76 // atomicSubtract returns the result of the subtraction.
77 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
78 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
79 static_cast<long>(-decrement)) -
80 decrement;
81 }
82 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
83 unsigned decrement) {
84 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
85 -static_cast<long>(decrement)) -
86 decrement;
87 }
88 #if defined(_WIN64)
89 ALWAYS_INLINE unsigned long long atomicSubtract(
90 unsigned long long volatile* addend,
91 unsigned long long decrement) {
92 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
93 -static_cast<long long>(decrement)) -
94 decrement;
95 }
96 #endif
97
98 ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
99 return InterlockedIncrement(reinterpret_cast<long volatile*>(addend));
100 }
101 ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
102 return InterlockedDecrement(reinterpret_cast<long volatile*>(addend));
103 }
104
105 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
106 return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend));
107 }
108 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
109 return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend));
110 }
111
112 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
113 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1);
114 DCHECK(!ret || ret == 1);
115 return ret;
116 }
117
118 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
119 DCHECK_EQ(*ptr, 1);
120 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0);
121 }
122
123 #else
124
125 // atomicAdd returns the result of the addition.
126 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
127 return __sync_add_and_fetch(addend, increment);
128 }
129 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
130 unsigned increment) {
131 return __sync_add_and_fetch(addend, increment);
132 }
133 ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend,
134 unsigned long increment) {
135 return __sync_add_and_fetch(addend, increment);
136 }
137 // atomicSubtract returns the result of the subtraction.
138 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
139 return __sync_sub_and_fetch(addend, decrement);
140 }
141 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
142 unsigned decrement) {
143 return __sync_sub_and_fetch(addend, decrement);
144 }
145 ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend,
146 unsigned long decrement) {
147 return __sync_sub_and_fetch(addend, decrement);
148 }
149
150 ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
151 return atomicAdd(addend, 1);
152 }
153 ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
154 return atomicSubtract(addend, 1);
155 }
156
157 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
158 return __sync_add_and_fetch(addend, 1);
159 }
160 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
161 return __sync_sub_and_fetch(addend, 1);
162 }
163
164 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
165 int ret = __sync_lock_test_and_set(ptr, 1);
166 DCHECK(!ret || ret == 1);
167 return ret;
168 }
169
170 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
171 DCHECK_EQ(*ptr, 1);
172 __sync_lock_release(ptr);
173 }
174 #endif
175
176 #if defined(THREAD_SANITIZER)
177 // The definitions below assume an LP64 data model. This is fine because
178 // TSan is only supported on x86_64 Linux.
179 #if CPU(64BIT) && OS(LINUX)
180 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
181 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
182 }
183 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
184 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr),
185 static_cast<int>(value), __tsan_memory_order_release);
186 }
187 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
188 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
189 static_cast<__tsan_atomic64>(value),
190 __tsan_memory_order_release);
191 }
192 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
193 unsigned long value) {
194 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
195 static_cast<__tsan_atomic64>(value),
196 __tsan_memory_order_release);
197 }
198 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
199 unsigned long long value) {
200 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
201 static_cast<__tsan_atomic64>(value),
202 __tsan_memory_order_release);
203 }
204 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
205 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
206 reinterpret_cast<__tsan_atomic64>(value),
207 __tsan_memory_order_release);
208 }
209 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
210 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
211 }
212 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
213 return static_cast<unsigned>(__tsan_atomic32_load(
214 reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire));
215 }
216 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
217 return static_cast<long>(__tsan_atomic64_load(
218 reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
219 __tsan_memory_order_acquire));
220 }
221 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
222 return static_cast<unsigned long>(__tsan_atomic64_load(
223 reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
224 __tsan_memory_order_acquire));
225 }
226 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
227 return reinterpret_cast<void*>(__tsan_atomic64_load(
228 reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
229 __tsan_memory_order_acquire));
230 }
231
232 // Do not use noBarrierStore/noBarrierLoad for synchronization.
233 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
234 static_assert(sizeof(int) == sizeof(float),
235 "int and float are different sizes");
236 union {
237 int ivalue;
238 float fvalue;
239 } u;
240 u.fvalue = value;
241 __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr),
242 u.ivalue, __tsan_memory_order_relaxed);
243 }
244
245 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
246 static_assert(sizeof(int) == sizeof(float),
247 "int and float are different sizes");
248 union {
249 int ivalue;
250 float fvalue;
251 } u;
252 u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr),
253 __tsan_memory_order_relaxed);
254 return u.fvalue;
255 }
256 #endif
257
258 #else // defined(THREAD_SANITIZER)
259
260 #if CPU(X86) || CPU(X86_64)
261 // Only compiler barrier is needed.
262 #if COMPILER(MSVC)
263 // Starting from Visual Studio 2005 compiler guarantees acquire and release
264 // semantics for operations on volatile variables. See MSDN entry for
265 // MemoryBarrier macro.
266 #define MEMORY_BARRIER()
267 #else
268 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory")
269 #endif
270 #else
271 // Fallback to the compiler intrinsic on all other platforms.
272 #define MEMORY_BARRIER() __sync_synchronize()
273 #endif
274
275 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
276 MEMORY_BARRIER();
277 *ptr = value;
278 }
279 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
280 MEMORY_BARRIER();
281 *ptr = value;
282 }
283 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
284 MEMORY_BARRIER();
285 *ptr = value;
286 }
287 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
288 unsigned long value) {
289 MEMORY_BARRIER();
290 *ptr = value;
291 }
292 #if CPU(64BIT)
293 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
294 unsigned long long value) {
295 MEMORY_BARRIER();
296 *ptr = value;
297 }
298 #endif
299 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
300 MEMORY_BARRIER();
301 *ptr = value;
302 }
303
304 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
305 int value = *ptr;
306 MEMORY_BARRIER();
307 return value;
308 }
309 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
310 unsigned value = *ptr;
311 MEMORY_BARRIER();
312 return value;
313 }
314 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
315 long value = *ptr;
316 MEMORY_BARRIER();
317 return value;
318 }
319 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
320 unsigned long value = *ptr;
321 MEMORY_BARRIER();
322 return value;
323 }
324 #if CPU(64BIT)
325 ALWAYS_INLINE unsigned long long acquireLoad(
326 volatile const unsigned long long* ptr) {
327 unsigned long long value = *ptr;
328 MEMORY_BARRIER();
329 return value;
330 }
331 #endif
332 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
333 void* value = *ptr;
334 MEMORY_BARRIER();
335 return value;
336 }
337
338 // Do not use noBarrierStore/noBarrierLoad for synchronization.
339 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
340 *ptr = value;
341 }
342
343 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
344 float value = *ptr;
345 return value;
346 }
347
348 #if defined(ADDRESS_SANITIZER)
349
350 NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(
351 volatile unsigned* ptr,
352 unsigned value) {
353 MEMORY_BARRIER();
354 *ptr = value;
355 }
356
357 NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(
358 volatile const unsigned* ptr) {
359 unsigned value = *ptr;
360 MEMORY_BARRIER();
361 return value;
362 }
363
364 #endif // defined(ADDRESS_SANITIZER)
365
366 #undef MEMORY_BARRIER
367
368 #endif
369
370 #if !defined(ADDRESS_SANITIZER)
371
372 ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr,
373 unsigned value) {
374 releaseStore(ptr, value);
375 }
376
377 ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) {
378 return acquireLoad(ptr);
379 }
380
381 #endif
382
383 } // namespace WTF
384
385 using WTF::atomicAdd;
386 using WTF::atomicSubtract;
387 using WTF::atomicDecrement;
388 using WTF::atomicIncrement;
389 using WTF::atomicTestAndSetToOne;
390 using WTF::atomicSetOneToZero;
391 using WTF::acquireLoad;
392 using WTF::releaseStore;
393 using WTF::noBarrierLoad;
394 using WTF::noBarrierStore;
395
396 // These methods allow loading from and storing to poisoned memory. Only
397 // use these methods if you know what you are doing since they will
398 // silence use-after-poison errors from ASan.
399 using WTF::asanUnsafeAcquireLoad;
400 using WTF::asanUnsafeReleaseStore;
401
402 #endif // Atomics_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/Assertions.cpp ('k') | third_party/WebKit/Source/wtf/BUILD.gn » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698