OLD | NEW |
(Empty) | |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 // This module wraps compiler specific syncronisation related intrinsics. |
| 29 |
| 30 #ifndef V8_ATOMIC_H_ |
| 31 #define V8_ATOMIC_H_ |
| 32 |
| 33 // Avoid warning when compiled with /Wp64. |
| 34 #ifndef _MSC_VER |
| 35 #define __w64 |
| 36 #endif |
| 37 typedef __w64 int32_t Atomic32; |
| 38 #ifdef V8_TARGET_ARCH_X64 |
| 39 // We need to be able to go between Atomic64 and AtomicWord implicitly. This |
| 40 // means Atomic64 and AtomicWord should be the same type on 64-bit. |
| 41 typedef intptr_t Atomic64; |
| 42 #endif |
| 43 |
| 44 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or |
| 45 // Atomic64 routines below, depending on your architecture. |
| 46 typedef intptr_t AtomicWord; |
| 47 |
| 48 inline void AtomicAdd(volatile Atomic32* ptr, Atomic32 value); |
| 49 inline void AtomicOr(volatile Atomic32* ptr, Atomic32 value); |
| 50 inline void AtomicAnd(volatile Atomic32* ptr, Atomic32 value); |
| 51 inline bool AtomicCompareAndSwap(volatile Atomic32* ptr, |
| 52 Atomic32 old_value, |
| 53 Atomic32 new_value); |
| 54 |
| 55 #if defined(V8_TARGET_ARCH_X64) |
| 56 inline bool AtomicCompareAndSwap(volatile Atomic64* ptr, |
| 57 Atomic64 old_value, |
| 58 Atomic64 new_value); |
| 59 #endif |
| 60 |
| 61 |
| 62 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) |
| 63 |
| 64 // Microsoft Visual C++ specific stuff. |
| 65 #ifdef _MSC_VER |
| 66 #if (_MSC_VER >= 1500) |
| 67 #include <intrin.h> |
| 68 #else |
| 69 // For older versions we have to provide intrisic signatures. |
| 70 long _InterlockedExchangeAdd (long volatile* Addend, long Value); |
| 71 long _InterlockedOr (long volatile* Value, long Mask); |
| 72 long _InterlockedAnd (long volatile *Value, long Mask); |
| 73 long _InterlockedCompareExchange (long volatile* Destination, |
| 74 long Exchange, |
| 75 long Comperand); |
| 76 |
| 77 #pragma intrinsic(_InterlockedExchangeAdd) |
| 78 #pragma intrinsic(_InterlockedOr) |
| 79 #pragma intrinsic(_InterlockedAnd) |
| 80 #pragma intrinsic(_InterlockedCompareExchange) |
| 81 #endif |
| 82 |
| 83 inline void AtomicAdd(volatile Atomic32* ptr, Atomic32 value) { |
| 84 _InterlockedExchangeAdd(reinterpret_cast<long volatile*>(ptr), |
| 85 static_cast<long>(value)); |
| 86 } |
| 87 |
| 88 inline void AtomicOr(volatile Atomic32* ptr, Atomic32 value) { |
| 89 _InterlockedOr(reinterpret_cast<long volatile*>(ptr), |
| 90 static_cast<long>(value)); |
| 91 } |
| 92 |
| 93 inline void AtomicAnd(volatile Atomic32* ptr, Atomic32 value) { |
| 94 _InterlockedAnd(reinterpret_cast<long volatile*>(ptr), |
| 95 static_cast<long>(value)); |
| 96 } |
| 97 |
| 98 inline bool AtomicCompareAndSwap(volatile Atomic32* ptr, |
| 99 Atomic32 old_value, |
| 100 Atomic32 new_value) { |
| 101 long result = _InterlockedCompareExchange( |
| 102 reinterpret_cast<long volatile*>(ptr), |
| 103 static_cast<long>(new_value), |
| 104 static_cast<long>(old_value)); |
| 105 return result == static_cast<long>(old_value); |
| 106 } |
| 107 |
| 108 #if defined(V8_TARGET_ARCH_X64) |
| 109 inline bool AtomicCompareAndSwap(volatile Atomic64* ptr, |
| 110 Atomic64 old_value, |
| 111 Atomic64 new_value) { |
| 112 |
| 113 __int64 result = _InterlockedCompareExchange_64( |
| 114 reinterpret_cast<__int64 volatile*>(ptr), |
| 115 static_cast<__int64>(new_value), |
| 116 static_cast<__int64>(old_value)); |
| 117 return result == static_cast<__int64>(old_value); |
| 118 } |
| 119 #endif |
| 120 |
| 121 #define ATOMIC_SUPPORTED 1 |
| 122 |
| 123 #endif // _MSC_VER |
| 124 |
| 125 // GCC specific stuff |
| 126 #ifdef __GNUC__ |
| 127 inline void AtomicAdd(volatile Atomic32* ptr, Atomic32 value) { |
| 128 __sync_fetch_and_add(ptr, value); |
| 129 } |
| 130 |
| 131 inline void AtomicOr(volatile Atomic32* ptr, Atomic32 value) { |
| 132 __sync_fetch_and_or(ptr, value); |
| 133 } |
| 134 |
| 135 inline void AtomicAnd(volatile Atomic32* ptr, Atomic32 value) { |
| 136 __sync_fetch_and_and(ptr, value); |
| 137 } |
| 138 |
| 139 inline bool AtomicCompareAndSwap(volatile Atomic32* ptr, |
| 140 Atomic32 old_value, |
| 141 Atomic32 new_value) { |
| 142 return __sync_bool_compare_and_swap(ptr, old_value, new_value); |
| 143 } |
| 144 |
| 145 #if defined(V8_TARGET_ARCH_X64) |
| 146 inline bool AtomicCompareAndSwap(volatile Atomic64* ptr, |
| 147 Atomic64 old_value, |
| 148 Atomic64 new_value) { |
| 149 return __sync_bool_compare_and_swap(ptr, old_value, new_value); |
| 150 } |
| 151 #endif |
| 152 |
| 153 #define ATOMIC_SUPPORTED 1 |
| 154 #endif |
| 155 |
| 156 #endif // defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) |
| 157 |
| 158 #ifndef ATOMIC_SUPPORTED |
| 159 inline void AtomicAdd(volatile Atomic32* ptr, Atomic32 value) { |
| 160 *ptr += value; |
| 161 } |
| 162 |
| 163 inline void AtomicOr(volatile Atomic32* ptr, Atomic32 value) { |
| 164 *ptr |= value; |
| 165 } |
| 166 |
| 167 inline void AtomicAnd(volatile Atomic32* ptr, Atomic32 value) { |
| 168 *ptr &= value; |
| 169 } |
| 170 |
| 171 inline bool AtomicCompareAndSwap(volatile Atomic32* ptr, |
| 172 Atomic32 old_value, |
| 173 Atomic32 new_value) { |
| 174 if (*ptr == old_value) { |
| 175 *ptr = new_value; |
| 176 return true; |
| 177 } |
| 178 return false; |
| 179 } |
| 180 |
| 181 #if defined(V8_TARGET_ARCH_X64) |
| 182 inline bool AtomicCompareAndSwap(volatile Atomic64* ptr, |
| 183 Atomic64 old_value, |
| 184 Atomic64 new_value) { |
| 185 if (*ptr == old_value) { |
| 186 *ptr = new_value; |
| 187 return true; |
| 188 } |
| 189 return false; |
| 190 } |
| 191 #endif |
| 192 |
| 193 #define ATOMIC_SUPPORTED 0 |
| 194 #endif |
| 195 |
| 196 |
| 197 #endif |
OLD | NEW |