OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/arguments.h" | 7 #include "src/arguments.h" |
8 #include "src/base/macros.h" | 8 #include "src/base/macros.h" |
9 #include "src/base/platform/mutex.h" | 9 #include "src/base/platform/mutex.h" |
10 #include "src/conversions.h" | 10 #include "src/conversions.h" |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 | 96 |
97 inline void StoreSeqCst(uint64_t* p, uint64_t value) { | 97 inline void StoreSeqCst(uint64_t* p, uint64_t value) { |
98 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | 98 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); |
99 *p = value; | 99 *p = value; |
100 } | 100 } |
101 | 101 |
102 #endif // ATOMICS_REQUIRE_LOCK_64_BIT | 102 #endif // ATOMICS_REQUIRE_LOCK_64_BIT |
103 | 103 |
104 #elif V8_CC_MSVC | 104 #elif V8_CC_MSVC |
105 | 105 |
106 #define _InterlockedCompareExchange32 _InterlockedCompareExchange | 106 #define InterlockedCompareExchange32 _InterlockedCompareExchange |
107 #define _InterlockedExchange32 _InterlockedExchange | 107 #define InterlockedExchange32 _InterlockedExchange |
108 #define _InterlockedExchangeAdd32 _InterlockedExchangeAdd | 108 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
109 #define _InterlockedAnd32 _InterlockedAnd | 109 #define InterlockedAnd32 _InterlockedAnd |
110 #define _InterlockedOr32 _InterlockedOr | 110 #define InterlockedOr32 _InterlockedOr |
111 #define _InterlockedXor32 _InterlockedXor | 111 #define InterlockedXor32 _InterlockedXor |
| 112 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
| 113 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
| 114 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
112 | 115 |
113 #define INTEGER_TYPES(V) \ | 116 #define INTEGER_TYPES(V) \ |
114 V(int8_t, 8, char) \ | 117 V(int8_t, 8, char) \ |
115 V(uint8_t, 8, char) \ | 118 V(uint8_t, 8, char) \ |
116 V(int16_t, 16, short) /* NOLINT(runtime/int) */ \ | 119 V(int16_t, 16, short) /* NOLINT(runtime/int) */ \ |
117 V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \ | 120 V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \ |
118 V(int32_t, 32, long) /* NOLINT(runtime/int) */ \ | 121 V(int32_t, 32, long) /* NOLINT(runtime/int) */ \ |
119 V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \ | 122 V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \ |
120 V(int64_t, 64, LONGLONG) \ | 123 V(int64_t, 64, LONGLONG) \ |
121 V(uint64_t, 64, LONGLONG) | 124 V(uint64_t, 64, LONGLONG) |
122 | 125 |
123 #define ATOMIC_OPS(type, suffix, vctype) \ | 126 #define ATOMIC_OPS(type, suffix, vctype) \ |
124 inline type CompareExchangeSeqCst(volatile type* p, type oldval, \ | 127 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
125 type newval) { \ | 128 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
126 return _InterlockedCompareExchange##suffix( \ | 129 bit_cast<vctype>(newval), \ |
127 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \ | 130 bit_cast<vctype>(oldval)); \ |
128 bit_cast<vctype>(oldval)); \ | 131 } \ |
129 } \ | 132 inline type LoadSeqCst(type* p) { return *p; } \ |
130 inline type LoadSeqCst(volatile type* p) { return *p; } \ | 133 inline void StoreSeqCst(type* p, type value) { \ |
131 inline void StoreSeqCst(volatile type* p, type value) { \ | 134 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
132 _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | 135 bit_cast<vctype>(value)); \ |
133 bit_cast<vctype>(value)); \ | 136 } \ |
134 } \ | 137 inline type AddSeqCst(type* p, type value) { \ |
135 inline type AddSeqCst(volatile type* p, type value) { \ | 138 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
136 return _InterlockedExchangeAdd##suffix( \ | 139 bit_cast<vctype>(value)); \ |
137 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \ | 140 } \ |
138 } \ | 141 inline type SubSeqCst(type* p, type value) { \ |
139 inline type SubSeqCst(volatile type* p, type value) { \ | 142 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
140 return _InterlockedExchangeAdd##suffix( \ | 143 -bit_cast<vctype>(value)); \ |
141 reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \ | 144 } \ |
142 } \ | 145 inline type AndSeqCst(type* p, type value) { \ |
143 inline type AndSeqCst(volatile type* p, type value) { \ | 146 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
144 return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \ | 147 bit_cast<vctype>(value)); \ |
145 bit_cast<vctype>(value)); \ | 148 } \ |
146 } \ | 149 inline type OrSeqCst(type* p, type value) { \ |
147 inline type OrSeqCst(volatile type* p, type value) { \ | 150 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
148 return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \ | 151 bit_cast<vctype>(value)); \ |
149 bit_cast<vctype>(value)); \ | 152 } \ |
150 } \ | 153 inline type XorSeqCst(type* p, type value) { \ |
151 inline type XorSeqCst(volatile type* p, type value) { \ | 154 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
152 return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \ | 155 bit_cast<vctype>(value)); \ |
153 bit_cast<vctype>(value)); \ | 156 } \ |
154 } \ | 157 inline type ExchangeSeqCst(type* p, type value) { \ |
155 inline type ExchangeSeqCst(volatile type* p, type value) { \ | 158 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
156 return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | 159 bit_cast<vctype>(value)); \ |
157 bit_cast<vctype>(value)); \ | |
158 } | 160 } |
159 INTEGER_TYPES(ATOMIC_OPS) | 161 INTEGER_TYPES(ATOMIC_OPS) |
160 #undef ATOMIC_OPS | 162 #undef ATOMIC_OPS |
161 | 163 |
162 #undef INTEGER_TYPES | 164 #undef INTEGER_TYPES |
163 #undef _InterlockedCompareExchange32 | 165 #undef InterlockedCompareExchange32 |
164 #undef _InterlockedExchange32 | 166 #undef InterlockedExchange32 |
165 #undef _InterlockedExchangeAdd32 | 167 #undef InterlockedExchangeAdd32 |
166 #undef _InterlockedAnd32 | 168 #undef InterlockedAnd32 |
167 #undef _InterlockedOr32 | 169 #undef InterlockedOr32 |
168 #undef _InterlockedXor32 | 170 #undef InterlockedXor32 |
| 171 #undef InterlockedExchangeAdd16 |
| 172 #undef InterlockedCompareExchange8 |
| 173 #undef InterlockedExchangeAdd8 |
169 | 174 |
170 #else | 175 #else |
171 | 176 |
172 #error Unsupported platform! | 177 #error Unsupported platform! |
173 | 178 |
174 #endif | 179 #endif |
175 | 180 |
176 template <typename T> | 181 template <typename T> |
177 T FromObject(Handle<Object> number); | 182 T FromObject(Handle<Object> number); |
178 | 183 |
(...skipping 636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 HandleScope scope(isolate); | 820 HandleScope scope(isolate); |
816 DCHECK(args.length() == 1); | 821 DCHECK(args.length() == 1); |
817 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | 822 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
818 uint32_t usize = NumberToUint32(*size); | 823 uint32_t usize = NumberToUint32(*size); |
819 | 824 |
820 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value() | 825 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value() |
821 : isolate->heap()->false_value(); | 826 : isolate->heap()->false_value(); |
822 } | 827 } |
823 } | 828 } |
824 } // namespace v8::internal | 829 } // namespace v8::internal |
OLD | NEW |