| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/runtime/runtime-utils.h" | 5 #include "src/runtime/runtime-utils.h" |
| 6 | 6 |
| 7 #include "src/arguments.h" | 7 #include "src/arguments.h" |
| 8 #include "src/base/macros.h" | 8 #include "src/base/macros.h" |
| 9 #include "src/base/platform/mutex.h" | 9 #include "src/base/platform/mutex.h" |
| 10 #include "src/conversions-inl.h" | 10 #include "src/conversions-inl.h" |
| 11 #include "src/factory.h" | 11 #include "src/factory.h" |
| 12 | 12 |
| 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the | 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the |
| 14 // SharedArrayBuffer draft spec, found here | 14 // SharedArrayBuffer draft spec, found here |
| 15 // https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77P
Fk | 15 // https://github.com/lars-t-hansen/ecmascript_sharedmem |
| 16 | 16 |
| 17 namespace v8 { | 17 namespace v8 { |
| 18 namespace internal { | 18 namespace internal { |
| 19 | 19 |
| 20 namespace { | 20 namespace { |
| 21 | 21 |
| 22 // Assume that 32-bit architectures don't have 64-bit atomic ops. | |
| 23 // TODO(binji): can we do better here? | |
| 24 #if V8_TARGET_ARCH_64_BIT && V8_HOST_ARCH_64_BIT | |
| 25 | |
| 26 #define ATOMICS_REQUIRE_LOCK_64_BIT 0 | |
| 27 | |
| 28 inline bool AtomicIsLockFree(uint32_t size) { | |
| 29 return size == 1 || size == 2 || size == 4 || size == 8; | |
| 30 } | |
| 31 | |
| 32 #else | |
| 33 | |
| 34 #define ATOMICS_REQUIRE_LOCK_64_BIT 1 | |
| 35 | |
| 36 inline bool AtomicIsLockFree(uint32_t size) { | 22 inline bool AtomicIsLockFree(uint32_t size) { |
| 37 return size == 1 || size == 2 || size == 4; | 23 return size == 1 || size == 2 || size == 4; |
| 38 } | 24 } |
| 39 | 25 |
| 40 #endif | |
| 41 | |
| 42 #if V8_CC_GNU | 26 #if V8_CC_GNU |
| 43 | 27 |
| 44 template <typename T> | 28 template <typename T> |
| 45 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | 29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
| 46 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | 30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
| 47 __ATOMIC_SEQ_CST); | 31 __ATOMIC_SEQ_CST); |
| 48 return oldval; | 32 return oldval; |
| 49 } | 33 } |
| 50 | 34 |
| 51 template <typename T> | 35 template <typename T> |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 template <typename T> | 67 template <typename T> |
| 84 inline T XorSeqCst(T* p, T value) { | 68 inline T XorSeqCst(T* p, T value) { |
| 85 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 69 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
| 86 } | 70 } |
| 87 | 71 |
| 88 template <typename T> | 72 template <typename T> |
| 89 inline T ExchangeSeqCst(T* p, T value) { | 73 inline T ExchangeSeqCst(T* p, T value) { |
| 90 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | 74 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
| 91 } | 75 } |
| 92 | 76 |
| 93 #if ATOMICS_REQUIRE_LOCK_64_BIT | |
| 94 | |
| 95 // We only need to implement the following functions, because the rest of the | |
| 96 // atomic operations only work on integer types, and the only 64-bit type is | |
| 97 // float64. Similarly, because the values are being bit_cast from double -> | |
| 98 // uint64_t, we don't need to implement these functions for int64_t either. | |
| 99 | |
| 100 static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER; | |
| 101 | |
| 102 inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval, | |
| 103 uint64_t newval) { | |
| 104 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
| 105 uint64_t result = *p; | |
| 106 if (result == oldval) *p = newval; | |
| 107 return result; | |
| 108 } | |
| 109 | |
| 110 | |
| 111 inline uint64_t LoadSeqCst(uint64_t* p) { | |
| 112 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
| 113 return *p; | |
| 114 } | |
| 115 | |
| 116 | |
| 117 inline void StoreSeqCst(uint64_t* p, uint64_t value) { | |
| 118 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
| 119 *p = value; | |
| 120 } | |
| 121 | |
| 122 #endif // ATOMICS_REQUIRE_LOCK_64_BIT | |
| 123 | |
| 124 #elif V8_CC_MSVC | 77 #elif V8_CC_MSVC |
| 125 | 78 |
| 126 #define InterlockedCompareExchange32 _InterlockedCompareExchange | 79 #define InterlockedCompareExchange32 _InterlockedCompareExchange |
| 127 #define InterlockedExchange32 _InterlockedExchange | 80 #define InterlockedExchange32 _InterlockedExchange |
| 128 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 81 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
| 129 #define InterlockedAnd32 _InterlockedAnd | 82 #define InterlockedAnd32 _InterlockedAnd |
| 130 #define InterlockedOr32 _InterlockedOr | 83 #define InterlockedOr32 _InterlockedOr |
| 131 #define InterlockedXor32 _InterlockedXor | 84 #define InterlockedXor32 _InterlockedXor |
| 132 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 85 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
| 133 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | 86 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
| 134 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 87 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
| 135 | 88 |
| 136 #define ATOMIC_OPS_INTEGER(type, suffix, vctype) \ | 89 #define ATOMIC_OPS(type, suffix, vctype) \ |
| 137 inline type AddSeqCst(type* p, type value) { \ | 90 inline type AddSeqCst(type* p, type value) { \ |
| 138 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
| 139 bit_cast<vctype>(value)); \ | 92 bit_cast<vctype>(value)); \ |
| 140 } \ | 93 } \ |
| 141 inline type SubSeqCst(type* p, type value) { \ | 94 inline type SubSeqCst(type* p, type value) { \ |
| 142 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 95 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
| 143 -bit_cast<vctype>(value)); \ | 96 -bit_cast<vctype>(value)); \ |
| 144 } \ | 97 } \ |
| 145 inline type AndSeqCst(type* p, type value) { \ | 98 inline type AndSeqCst(type* p, type value) { \ |
| 146 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 99 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
| 147 bit_cast<vctype>(value)); \ | 100 bit_cast<vctype>(value)); \ |
| 148 } \ | 101 } \ |
| 149 inline type OrSeqCst(type* p, type value) { \ | 102 inline type OrSeqCst(type* p, type value) { \ |
| 150 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 103 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
| 151 bit_cast<vctype>(value)); \ | 104 bit_cast<vctype>(value)); \ |
| 152 } \ | 105 } \ |
| 153 inline type XorSeqCst(type* p, type value) { \ | 106 inline type XorSeqCst(type* p, type value) { \ |
| 154 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 107 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
| 155 bit_cast<vctype>(value)); \ | 108 bit_cast<vctype>(value)); \ |
| 156 } \ | 109 } \ |
| 157 inline type ExchangeSeqCst(type* p, type value) { \ | 110 inline type ExchangeSeqCst(type* p, type value) { \ |
| 158 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 159 bit_cast<vctype>(value)); \ | 112 bit_cast<vctype>(value)); \ |
| 160 } | 113 } \ |
| 161 | 114 \ |
| 162 #define ATOMIC_OPS_FLOAT(type, suffix, vctype) \ | |
| 163 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | 115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
| 164 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | 116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 165 bit_cast<vctype>(newval), \ | 117 bit_cast<vctype>(newval), \ |
| 166 bit_cast<vctype>(oldval)); \ | 118 bit_cast<vctype>(oldval)); \ |
| 167 } \ | 119 } \ |
| 168 inline type LoadSeqCst(type* p) { return *p; } \ | 120 inline type LoadSeqCst(type* p) { return *p; } \ |
| 169 inline void StoreSeqCst(type* p, type value) { \ | 121 inline void StoreSeqCst(type* p, type value) { \ |
| 170 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 171 bit_cast<vctype>(value)); \ | 123 bit_cast<vctype>(value)); \ |
| 172 } | 124 } |
| 173 | 125 |
| 174 #define ATOMIC_OPS(type, suffix, vctype) \ | |
| 175 ATOMIC_OPS_INTEGER(type, suffix, vctype) \ | |
| 176 ATOMIC_OPS_FLOAT(type, suffix, vctype) | |
| 177 | |
| 178 ATOMIC_OPS(int8_t, 8, char) | 126 ATOMIC_OPS(int8_t, 8, char) |
| 179 ATOMIC_OPS(uint8_t, 8, char) | 127 ATOMIC_OPS(uint8_t, 8, char) |
| 180 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 128 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
| 181 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
| 182 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 130 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
| 183 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 131 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
| 184 ATOMIC_OPS_FLOAT(uint64_t, 64, LONGLONG) | |
| 185 | 132 |
| 186 #undef ATOMIC_OPS_INTEGER | 133 #undef ATOMIC_OPS_INTEGER |
| 187 #undef ATOMIC_OPS_FLOAT | |
| 188 #undef ATOMIC_OPS | 134 #undef ATOMIC_OPS |
| 189 | 135 |
| 190 #undef InterlockedCompareExchange32 | 136 #undef InterlockedCompareExchange32 |
| 191 #undef InterlockedExchange32 | 137 #undef InterlockedExchange32 |
| 192 #undef InterlockedExchangeAdd32 | 138 #undef InterlockedExchangeAdd32 |
| 193 #undef InterlockedAnd32 | 139 #undef InterlockedAnd32 |
| 194 #undef InterlockedOr32 | 140 #undef InterlockedOr32 |
| 195 #undef InterlockedXor32 | 141 #undef InterlockedXor32 |
| 196 #undef InterlockedExchangeAdd16 | 142 #undef InterlockedExchangeAdd16 |
| 197 #undef InterlockedCompareExchange8 | 143 #undef InterlockedCompareExchange8 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 209 template <> | 155 template <> |
| 210 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | 156 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { |
| 211 return NumberToUint32(*number); | 157 return NumberToUint32(*number); |
| 212 } | 158 } |
| 213 | 159 |
| 214 template <> | 160 template <> |
| 215 inline int32_t FromObject<int32_t>(Handle<Object> number) { | 161 inline int32_t FromObject<int32_t>(Handle<Object> number) { |
| 216 return NumberToInt32(*number); | 162 return NumberToInt32(*number); |
| 217 } | 163 } |
| 218 | 164 |
| 219 template <> | |
| 220 inline float FromObject<float>(Handle<Object> number) { | |
| 221 return static_cast<float>(number->Number()); | |
| 222 } | |
| 223 | |
| 224 template <> | |
| 225 inline double FromObject<double>(Handle<Object> number) { | |
| 226 return number->Number(); | |
| 227 } | |
| 228 | |
| 229 template <typename T, typename F> | 165 template <typename T, typename F> |
| 230 inline T ToAtomic(F from) { | 166 inline T ToAtomic(F from) { |
| 231 return static_cast<T>(from); | 167 return static_cast<T>(from); |
| 232 } | 168 } |
| 233 | 169 |
| 234 template <> | |
| 235 inline uint32_t ToAtomic<uint32_t, float>(float from) { | |
| 236 return bit_cast<uint32_t, float>(from); | |
| 237 } | |
| 238 | |
| 239 template <> | |
| 240 inline uint64_t ToAtomic<uint64_t, double>(double from) { | |
| 241 return bit_cast<uint64_t, double>(from); | |
| 242 } | |
| 243 | |
| 244 template <typename T, typename F> | 170 template <typename T, typename F> |
| 245 inline T FromAtomic(F from) { | 171 inline T FromAtomic(F from) { |
| 246 return static_cast<T>(from); | 172 return static_cast<T>(from); |
| 247 } | 173 } |
| 248 | 174 |
| 249 template <> | |
| 250 inline float FromAtomic<float, uint32_t>(uint32_t from) { | |
| 251 return bit_cast<float, uint32_t>(from); | |
| 252 } | |
| 253 | |
| 254 template <> | |
| 255 inline double FromAtomic<double, uint64_t>(uint64_t from) { | |
| 256 return bit_cast<double, uint64_t>(from); | |
| 257 } | |
| 258 | |
| 259 template <typename T> | 175 template <typename T> |
| 260 inline Object* ToObject(Isolate* isolate, T t); | 176 inline Object* ToObject(Isolate* isolate, T t); |
| 261 | 177 |
| 262 template <> | 178 template <> |
| 263 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { | 179 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { |
| 264 return Smi::FromInt(t); | 180 return Smi::FromInt(t); |
| 265 } | 181 } |
| 266 | 182 |
| 267 template <> | 183 template <> |
| 268 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { | 184 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 282 template <> | 198 template <> |
| 283 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { | 199 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { |
| 284 return *isolate->factory()->NewNumber(t); | 200 return *isolate->factory()->NewNumber(t); |
| 285 } | 201 } |
| 286 | 202 |
| 287 template <> | 203 template <> |
| 288 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { | 204 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { |
| 289 return *isolate->factory()->NewNumber(t); | 205 return *isolate->factory()->NewNumber(t); |
| 290 } | 206 } |
| 291 | 207 |
| 292 template <> | |
| 293 inline Object* ToObject<float>(Isolate* isolate, float t) { | |
| 294 return *isolate->factory()->NewNumber(t); | |
| 295 } | |
| 296 | |
| 297 template <> | |
| 298 inline Object* ToObject<double>(Isolate* isolate, double t) { | |
| 299 return *isolate->factory()->NewNumber(t); | |
| 300 } | |
| 301 | |
| 302 template <typename T> | 208 template <typename T> |
| 303 struct FromObjectTraits {}; | 209 struct FromObjectTraits {}; |
| 304 | 210 |
| 305 template <> | 211 template <> |
| 306 struct FromObjectTraits<int8_t> { | 212 struct FromObjectTraits<int8_t> { |
| 307 typedef int32_t convert_type; | 213 typedef int32_t convert_type; |
| 308 typedef int8_t atomic_type; | 214 typedef int8_t atomic_type; |
| 309 }; | 215 }; |
| 310 | 216 |
| 311 template <> | 217 template <> |
| (...skipping 19 matching lines...) Expand all Loading... |
| 331 typedef int32_t convert_type; | 237 typedef int32_t convert_type; |
| 332 typedef int32_t atomic_type; | 238 typedef int32_t atomic_type; |
| 333 }; | 239 }; |
| 334 | 240 |
| 335 template <> | 241 template <> |
| 336 struct FromObjectTraits<uint32_t> { | 242 struct FromObjectTraits<uint32_t> { |
| 337 typedef uint32_t convert_type; | 243 typedef uint32_t convert_type; |
| 338 typedef uint32_t atomic_type; | 244 typedef uint32_t atomic_type; |
| 339 }; | 245 }; |
| 340 | 246 |
| 341 template <> | |
| 342 struct FromObjectTraits<float> { | |
| 343 typedef float convert_type; | |
| 344 typedef uint32_t atomic_type; | |
| 345 }; | |
| 346 | |
| 347 template <> | |
| 348 struct FromObjectTraits<double> { | |
| 349 typedef double convert_type; | |
| 350 typedef uint64_t atomic_type; | |
| 351 }; | |
| 352 | |
| 353 | 247 |
| 354 template <typename T> | 248 template <typename T> |
| 355 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | 249 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
| 356 Handle<Object> oldobj, Handle<Object> newobj) { | 250 Handle<Object> oldobj, Handle<Object> newobj) { |
| 357 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | 251 typedef typename FromObjectTraits<T>::atomic_type atomic_type; |
| 358 typedef typename FromObjectTraits<T>::convert_type convert_type; | 252 typedef typename FromObjectTraits<T>::convert_type convert_type; |
| 359 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); | 253 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); |
| 360 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); | 254 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); |
| 361 atomic_type result = CompareExchangeSeqCst( | 255 atomic_type result = CompareExchangeSeqCst( |
| 362 static_cast<atomic_type*>(buffer) + index, oldval, newval); | 256 static_cast<atomic_type*>(buffer) + index, oldval, newval); |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 553 void* buffer = sta->GetBuffer()->backing_store(); | 447 void* buffer = sta->GetBuffer()->backing_store(); |
| 554 | 448 |
| 555 switch (sta->type()) { | 449 switch (sta->type()) { |
| 556 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 450 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 557 case kExternal##Type##Array: \ | 451 case kExternal##Type##Array: \ |
| 558 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); | 452 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); |
| 559 | 453 |
| 560 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 454 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 561 #undef TYPED_ARRAY_CASE | 455 #undef TYPED_ARRAY_CASE |
| 562 | 456 |
| 563 case kExternalFloat32Array: | |
| 564 return DoCompareExchange<float>(isolate, buffer, index, oldobj, newobj); | |
| 565 | |
| 566 case kExternalFloat64Array: | |
| 567 return DoCompareExchange<double>(isolate, buffer, index, oldobj, newobj); | |
| 568 | |
| 569 case kExternalUint8ClampedArray: | 457 case kExternalUint8ClampedArray: |
| 570 return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj, | 458 return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj, |
| 571 newobj); | 459 newobj); |
| 572 | 460 |
| 573 default: | 461 default: |
| 574 break; | 462 break; |
| 575 } | 463 } |
| 576 | 464 |
| 577 UNREACHABLE(); | 465 UNREACHABLE(); |
| 578 return isolate->heap()->undefined_value(); | 466 return isolate->heap()->undefined_value(); |
| 579 } | 467 } |
| 580 | 468 |
| 581 | 469 |
| 582 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { | 470 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { |
| 583 HandleScope scope(isolate); | 471 HandleScope scope(isolate); |
| 584 DCHECK(args.length() == 2); | 472 DCHECK(args.length() == 2); |
| 585 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 473 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
| 586 CONVERT_SIZE_ARG_CHECKED(index, 1); | 474 CONVERT_SIZE_ARG_CHECKED(index, 1); |
| 587 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 475 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
| 588 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 476 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
| 589 | 477 |
| 590 void* buffer = sta->GetBuffer()->backing_store(); | 478 void* buffer = sta->GetBuffer()->backing_store(); |
| 591 | 479 |
| 592 switch (sta->type()) { | 480 switch (sta->type()) { |
| 593 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 481 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 594 case kExternal##Type##Array: \ | 482 case kExternal##Type##Array: \ |
| 595 return DoLoad<ctype>(isolate, buffer, index); | 483 return DoLoad<ctype>(isolate, buffer, index); |
| 596 | 484 |
| 597 TYPED_ARRAYS(TYPED_ARRAY_CASE) | 485 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 598 #undef TYPED_ARRAY_CASE | 486 #undef TYPED_ARRAY_CASE |
| 599 | 487 |
| 488 case kExternalUint8ClampedArray: |
| 489 return DoLoad<uint8_t>(isolate, buffer, index); |
| 490 |
| 600 default: | 491 default: |
| 601 break; | 492 break; |
| 602 } | 493 } |
| 603 | 494 |
| 604 UNREACHABLE(); | 495 UNREACHABLE(); |
| 605 return isolate->heap()->undefined_value(); | 496 return isolate->heap()->undefined_value(); |
| 606 } | 497 } |
| 607 | 498 |
| 608 | 499 |
| 609 RUNTIME_FUNCTION(Runtime_AtomicsStore) { | 500 RUNTIME_FUNCTION(Runtime_AtomicsStore) { |
| 610 HandleScope scope(isolate); | 501 HandleScope scope(isolate); |
| 611 DCHECK(args.length() == 3); | 502 DCHECK(args.length() == 3); |
| 612 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 503 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
| 613 CONVERT_SIZE_ARG_CHECKED(index, 1); | 504 CONVERT_SIZE_ARG_CHECKED(index, 1); |
| 614 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | 505 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); |
| 615 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 506 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
| 616 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 507 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
| 617 | 508 |
| 618 void* buffer = sta->GetBuffer()->backing_store(); | 509 void* buffer = sta->GetBuffer()->backing_store(); |
| 619 | 510 |
| 620 switch (sta->type()) { | 511 switch (sta->type()) { |
| 621 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 512 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 622 case kExternal##Type##Array: \ | 513 case kExternal##Type##Array: \ |
| 623 return DoStore<ctype>(isolate, buffer, index, value); | 514 return DoStore<ctype>(isolate, buffer, index, value); |
| 624 | 515 |
| 625 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 516 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 626 #undef TYPED_ARRAY_CASE | 517 #undef TYPED_ARRAY_CASE |
| 627 | 518 |
| 628 case kExternalFloat32Array: | |
| 629 return DoStore<float>(isolate, buffer, index, value); | |
| 630 | |
| 631 case kExternalFloat64Array: | |
| 632 return DoStore<double>(isolate, buffer, index, value); | |
| 633 | |
| 634 case kExternalUint8ClampedArray: | 519 case kExternalUint8ClampedArray: |
| 635 return DoStoreUint8Clamped(isolate, buffer, index, value); | 520 return DoStoreUint8Clamped(isolate, buffer, index, value); |
| 636 | 521 |
| 637 default: | 522 default: |
| 638 break; | 523 break; |
| 639 } | 524 } |
| 640 | 525 |
| 641 UNREACHABLE(); | 526 UNREACHABLE(); |
| 642 return isolate->heap()->undefined_value(); | 527 return isolate->heap()->undefined_value(); |
| 643 } | 528 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 658 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 543 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 659 case kExternal##Type##Array: \ | 544 case kExternal##Type##Array: \ |
| 660 return DoAdd<ctype>(isolate, buffer, index, value); | 545 return DoAdd<ctype>(isolate, buffer, index, value); |
| 661 | 546 |
| 662 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 547 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 663 #undef TYPED_ARRAY_CASE | 548 #undef TYPED_ARRAY_CASE |
| 664 | 549 |
| 665 case kExternalUint8ClampedArray: | 550 case kExternalUint8ClampedArray: |
| 666 return DoAddUint8Clamped(isolate, buffer, index, value); | 551 return DoAddUint8Clamped(isolate, buffer, index, value); |
| 667 | 552 |
| 668 case kExternalFloat32Array: | |
| 669 case kExternalFloat64Array: | |
| 670 default: | 553 default: |
| 671 break; | 554 break; |
| 672 } | 555 } |
| 673 | 556 |
| 674 UNREACHABLE(); | 557 UNREACHABLE(); |
| 675 return isolate->heap()->undefined_value(); | 558 return isolate->heap()->undefined_value(); |
| 676 } | 559 } |
| 677 | 560 |
| 678 | 561 |
| 679 RUNTIME_FUNCTION(Runtime_AtomicsSub) { | 562 RUNTIME_FUNCTION(Runtime_AtomicsSub) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 691 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 574 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 692 case kExternal##Type##Array: \ | 575 case kExternal##Type##Array: \ |
| 693 return DoSub<ctype>(isolate, buffer, index, value); | 576 return DoSub<ctype>(isolate, buffer, index, value); |
| 694 | 577 |
| 695 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 578 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 696 #undef TYPED_ARRAY_CASE | 579 #undef TYPED_ARRAY_CASE |
| 697 | 580 |
| 698 case kExternalUint8ClampedArray: | 581 case kExternalUint8ClampedArray: |
| 699 return DoSubUint8Clamped(isolate, buffer, index, value); | 582 return DoSubUint8Clamped(isolate, buffer, index, value); |
| 700 | 583 |
| 701 case kExternalFloat32Array: | |
| 702 case kExternalFloat64Array: | |
| 703 default: | 584 default: |
| 704 break; | 585 break; |
| 705 } | 586 } |
| 706 | 587 |
| 707 UNREACHABLE(); | 588 UNREACHABLE(); |
| 708 return isolate->heap()->undefined_value(); | 589 return isolate->heap()->undefined_value(); |
| 709 } | 590 } |
| 710 | 591 |
| 711 | 592 |
| 712 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { | 593 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 724 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 605 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 725 case kExternal##Type##Array: \ | 606 case kExternal##Type##Array: \ |
| 726 return DoAnd<ctype>(isolate, buffer, index, value); | 607 return DoAnd<ctype>(isolate, buffer, index, value); |
| 727 | 608 |
| 728 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 609 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 729 #undef TYPED_ARRAY_CASE | 610 #undef TYPED_ARRAY_CASE |
| 730 | 611 |
| 731 case kExternalUint8ClampedArray: | 612 case kExternalUint8ClampedArray: |
| 732 return DoAndUint8Clamped(isolate, buffer, index, value); | 613 return DoAndUint8Clamped(isolate, buffer, index, value); |
| 733 | 614 |
| 734 case kExternalFloat32Array: | |
| 735 case kExternalFloat64Array: | |
| 736 default: | 615 default: |
| 737 break; | 616 break; |
| 738 } | 617 } |
| 739 | 618 |
| 740 UNREACHABLE(); | 619 UNREACHABLE(); |
| 741 return isolate->heap()->undefined_value(); | 620 return isolate->heap()->undefined_value(); |
| 742 } | 621 } |
| 743 | 622 |
| 744 | 623 |
| 745 RUNTIME_FUNCTION(Runtime_AtomicsOr) { | 624 RUNTIME_FUNCTION(Runtime_AtomicsOr) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 757 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 636 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 758 case kExternal##Type##Array: \ | 637 case kExternal##Type##Array: \ |
| 759 return DoOr<ctype>(isolate, buffer, index, value); | 638 return DoOr<ctype>(isolate, buffer, index, value); |
| 760 | 639 |
| 761 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 640 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 762 #undef TYPED_ARRAY_CASE | 641 #undef TYPED_ARRAY_CASE |
| 763 | 642 |
| 764 case kExternalUint8ClampedArray: | 643 case kExternalUint8ClampedArray: |
| 765 return DoOrUint8Clamped(isolate, buffer, index, value); | 644 return DoOrUint8Clamped(isolate, buffer, index, value); |
| 766 | 645 |
| 767 case kExternalFloat32Array: | |
| 768 case kExternalFloat64Array: | |
| 769 default: | 646 default: |
| 770 break; | 647 break; |
| 771 } | 648 } |
| 772 | 649 |
| 773 UNREACHABLE(); | 650 UNREACHABLE(); |
| 774 return isolate->heap()->undefined_value(); | 651 return isolate->heap()->undefined_value(); |
| 775 } | 652 } |
| 776 | 653 |
| 777 | 654 |
| 778 RUNTIME_FUNCTION(Runtime_AtomicsXor) { | 655 RUNTIME_FUNCTION(Runtime_AtomicsXor) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 790 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 667 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 791 case kExternal##Type##Array: \ | 668 case kExternal##Type##Array: \ |
| 792 return DoXor<ctype>(isolate, buffer, index, value); | 669 return DoXor<ctype>(isolate, buffer, index, value); |
| 793 | 670 |
| 794 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 671 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 795 #undef TYPED_ARRAY_CASE | 672 #undef TYPED_ARRAY_CASE |
| 796 | 673 |
| 797 case kExternalUint8ClampedArray: | 674 case kExternalUint8ClampedArray: |
| 798 return DoXorUint8Clamped(isolate, buffer, index, value); | 675 return DoXorUint8Clamped(isolate, buffer, index, value); |
| 799 | 676 |
| 800 case kExternalFloat32Array: | |
| 801 case kExternalFloat64Array: | |
| 802 default: | 677 default: |
| 803 break; | 678 break; |
| 804 } | 679 } |
| 805 | 680 |
| 806 UNREACHABLE(); | 681 UNREACHABLE(); |
| 807 return isolate->heap()->undefined_value(); | 682 return isolate->heap()->undefined_value(); |
| 808 } | 683 } |
| 809 | 684 |
| 810 | 685 |
| 811 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { | 686 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 823 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 698 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 824 case kExternal##Type##Array: \ | 699 case kExternal##Type##Array: \ |
| 825 return DoExchange<ctype>(isolate, buffer, index, value); | 700 return DoExchange<ctype>(isolate, buffer, index, value); |
| 826 | 701 |
| 827 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 702 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 828 #undef TYPED_ARRAY_CASE | 703 #undef TYPED_ARRAY_CASE |
| 829 | 704 |
| 830 case kExternalUint8ClampedArray: | 705 case kExternalUint8ClampedArray: |
| 831 return DoExchangeUint8Clamped(isolate, buffer, index, value); | 706 return DoExchangeUint8Clamped(isolate, buffer, index, value); |
| 832 | 707 |
| 833 case kExternalFloat32Array: | |
| 834 case kExternalFloat64Array: | |
| 835 default: | 708 default: |
| 836 break; | 709 break; |
| 837 } | 710 } |
| 838 | 711 |
| 839 UNREACHABLE(); | 712 UNREACHABLE(); |
| 840 return isolate->heap()->undefined_value(); | 713 return isolate->heap()->undefined_value(); |
| 841 } | 714 } |
| 842 | 715 |
| 843 | 716 |
| 844 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { | 717 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { |
| 845 HandleScope scope(isolate); | 718 HandleScope scope(isolate); |
| 846 DCHECK(args.length() == 1); | 719 DCHECK(args.length() == 1); |
| 847 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | 720 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
| 848 uint32_t usize = NumberToUint32(*size); | 721 uint32_t usize = NumberToUint32(*size); |
| 849 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); | 722 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); |
| 850 } | 723 } |
| 851 } | 724 } |
| 852 } // namespace v8::internal | 725 } // namespace v8::internal |
| OLD | NEW |