OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. | 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. |
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) | 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * | 8 * |
9 * 1. Redistributions of source code must retain the above copyright | 9 * 1. Redistributions of source code must retain the above copyright |
10 * notice, this list of conditions and the following disclaimer. | 10 * notice, this list of conditions and the following disclaimer. |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
104 | 104 |
105 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { | 105 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
106 return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); | 106 return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); |
107 } | 107 } |
108 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { | 108 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
109 return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); | 109 return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); |
110 } | 110 } |
111 | 111 |
112 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { | 112 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
113 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); | 113 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); |
114 ASSERT(!ret || ret == 1); | 114 DCHECK(!ret || ret == 1); |
115 return ret; | 115 return ret; |
116 } | 116 } |
117 | 117 |
118 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { | 118 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
119 ASSERT(*ptr == 1); | 119 DCHECK_EQ(*ptr, 1); |
120 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); | 120 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); |
121 } | 121 } |
122 | 122 |
123 #else | 123 #else |
124 | 124 |
125 // atomicAdd returns the result of the addition. | 125 // atomicAdd returns the result of the addition. |
126 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { | 126 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { |
127 return __sync_add_and_fetch(addend, increment); | 127 return __sync_add_and_fetch(addend, increment); |
128 } | 128 } |
129 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, | 129 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, |
(...skipping 26 matching lines...) Expand all Loading... |
156 | 156 |
157 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { | 157 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
158 return __sync_add_and_fetch(addend, 1); | 158 return __sync_add_and_fetch(addend, 1); |
159 } | 159 } |
160 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { | 160 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
161 return __sync_sub_and_fetch(addend, 1); | 161 return __sync_sub_and_fetch(addend, 1); |
162 } | 162 } |
163 | 163 |
164 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { | 164 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
165 int ret = __sync_lock_test_and_set(ptr, 1); | 165 int ret = __sync_lock_test_and_set(ptr, 1); |
166 ASSERT(!ret || ret == 1); | 166 DCHECK(!ret || ret == 1); |
167 return ret; | 167 return ret; |
168 } | 168 } |
169 | 169 |
170 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { | 170 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
171 ASSERT(*ptr == 1); | 171 DCHECK_EQ(*ptr, 1); |
172 __sync_lock_release(ptr); | 172 __sync_lock_release(ptr); |
173 } | 173 } |
174 #endif | 174 #endif |
175 | 175 |
176 #if defined(THREAD_SANITIZER) | 176 #if defined(THREAD_SANITIZER) |
177 // The definitions below assume an LP64 data model. This is fine because | 177 // The definitions below assume an LP64 data model. This is fine because |
178 // TSan is only supported on x86_64 Linux. | 178 // TSan is only supported on x86_64 Linux. |
179 #if CPU(64BIT) && OS(LINUX) | 179 #if CPU(64BIT) && OS(LINUX) |
180 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { | 180 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { |
181 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | 181 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
403 using WTF::noBarrierLoad; | 403 using WTF::noBarrierLoad; |
404 using WTF::noBarrierStore; | 404 using WTF::noBarrierStore; |
405 | 405 |
406 // These methods allow loading from and storing to poisoned memory. Only | 406 // These methods allow loading from and storing to poisoned memory. Only |
407 // use these methods if you know what you are doing since they will | 407 // use these methods if you know what you are doing since they will |
408 // silence use-after-poison errors from ASan. | 408 // silence use-after-poison errors from ASan. |
409 using WTF::asanUnsafeAcquireLoad; | 409 using WTF::asanUnsafeAcquireLoad; |
410 using WTF::asanUnsafeReleaseStore; | 410 using WTF::asanUnsafeReleaseStore; |
411 | 411 |
412 #endif // Atomics_h | 412 #endif // Atomics_h |
OLD | NEW |