| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/runtime/runtime-utils.h" | 5 #include "src/runtime/runtime-utils.h" |
| 6 | 6 |
| 7 #include "src/arguments.h" | 7 #include "src/arguments.h" |
| 8 #include "src/base/macros.h" | 8 #include "src/base/macros.h" |
| 9 #include "src/base/platform/mutex.h" | 9 #include "src/base/platform/mutex.h" |
| 10 #include "src/conversions-inl.h" | 10 #include "src/conversions-inl.h" |
| (...skipping 15 matching lines...) Expand all Loading... |
| 26 #if V8_CC_GNU | 26 #if V8_CC_GNU |
| 27 | 27 |
| 28 template <typename T> | 28 template <typename T> |
| 29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | 29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
| 30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | 30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
| 31 __ATOMIC_SEQ_CST); | 31 __ATOMIC_SEQ_CST); |
| 32 return oldval; | 32 return oldval; |
| 33 } | 33 } |
| 34 | 34 |
| 35 template <typename T> | 35 template <typename T> |
| 36 inline T LoadSeqCst(T* p) { | |
| 37 T result; | |
| 38 __atomic_load(p, &result, __ATOMIC_SEQ_CST); | |
| 39 return result; | |
| 40 } | |
| 41 | |
| 42 template <typename T> | |
| 43 inline void StoreSeqCst(T* p, T value) { | 36 inline void StoreSeqCst(T* p, T value) { |
| 44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); | 37 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); |
| 45 } | 38 } |
| 46 | 39 |
| 47 template <typename T> | 40 template <typename T> |
| 48 inline T AddSeqCst(T* p, T value) { | 41 inline T AddSeqCst(T* p, T value) { |
| 49 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | 42 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
| 50 } | 43 } |
| 51 | 44 |
| 52 template <typename T> | 45 template <typename T> |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 110 inline type ExchangeSeqCst(type* p, type value) { \ | 103 inline type ExchangeSeqCst(type* p, type value) { \ |
| 111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 104 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 112 bit_cast<vctype>(value)); \ | 105 bit_cast<vctype>(value)); \ |
| 113 } \ | 106 } \ |
| 114 \ | 107 \ |
| 115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | 108 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
| 116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | 109 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 117 bit_cast<vctype>(newval), \ | 110 bit_cast<vctype>(newval), \ |
| 118 bit_cast<vctype>(oldval)); \ | 111 bit_cast<vctype>(oldval)); \ |
| 119 } \ | 112 } \ |
| 120 inline type LoadSeqCst(type* p) { return *p; } \ | |
| 121 inline void StoreSeqCst(type* p, type value) { \ | 113 inline void StoreSeqCst(type* p, type value) { \ |
| 122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 114 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
| 123 bit_cast<vctype>(value)); \ | 115 bit_cast<vctype>(value)); \ |
| 124 } | 116 } |
| 125 | 117 |
| 126 ATOMIC_OPS(int8_t, 8, char) | 118 ATOMIC_OPS(int8_t, 8, char) |
| 127 ATOMIC_OPS(uint8_t, 8, char) | 119 ATOMIC_OPS(uint8_t, 8, char) |
| 128 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 120 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
| 129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 121 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
| 130 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 122 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 209 Handle<Object> oldobj, Handle<Object> newobj) { | 201 Handle<Object> oldobj, Handle<Object> newobj) { |
| 210 T oldval = FromObject<T>(oldobj); | 202 T oldval = FromObject<T>(oldobj); |
| 211 T newval = FromObject<T>(newobj); | 203 T newval = FromObject<T>(newobj); |
| 212 T result = | 204 T result = |
| 213 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); | 205 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
| 214 return ToObject(isolate, result); | 206 return ToObject(isolate, result); |
| 215 } | 207 } |
| 216 | 208 |
| 217 | 209 |
| 218 template <typename T> | 210 template <typename T> |
| 219 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { | |
| 220 T result = LoadSeqCst(static_cast<T*>(buffer) + index); | |
| 221 return ToObject(isolate, result); | |
| 222 } | |
| 223 | |
| 224 | |
| 225 template <typename T> | |
| 226 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, | 211 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, |
| 227 Handle<Object> obj) { | 212 Handle<Object> obj) { |
| 228 T value = FromObject<T>(obj); | 213 T value = FromObject<T>(obj); |
| 229 StoreSeqCst(static_cast<T*>(buffer) + index, value); | 214 StoreSeqCst(static_cast<T*>(buffer) + index, value); |
| 230 return *obj; | 215 return *obj; |
| 231 } | 216 } |
| 232 | 217 |
| 233 | 218 |
| 234 template <typename T> | 219 template <typename T> |
| 235 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | 220 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 358 // Duplicated from objects.h | 343 // Duplicated from objects.h |
| 359 // V has parameters (Type, type, TYPE, C type, element_size) | 344 // V has parameters (Type, type, TYPE, C type, element_size) |
| 360 #define INTEGER_TYPED_ARRAYS(V) \ | 345 #define INTEGER_TYPED_ARRAYS(V) \ |
| 361 V(Uint8, uint8, UINT8, uint8_t, 1) \ | 346 V(Uint8, uint8, UINT8, uint8_t, 1) \ |
| 362 V(Int8, int8, INT8, int8_t, 1) \ | 347 V(Int8, int8, INT8, int8_t, 1) \ |
| 363 V(Uint16, uint16, UINT16, uint16_t, 2) \ | 348 V(Uint16, uint16, UINT16, uint16_t, 2) \ |
| 364 V(Int16, int16, INT16, int16_t, 2) \ | 349 V(Int16, int16, INT16, int16_t, 2) \ |
| 365 V(Uint32, uint32, UINT32, uint32_t, 4) \ | 350 V(Uint32, uint32, UINT32, uint32_t, 4) \ |
| 366 V(Int32, int32, INT32, int32_t, 4) | 351 V(Int32, int32, INT32, int32_t, 4) |
| 367 | 352 |
| 353 RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) { |
| 354 HandleScope scope(isolate); |
| 355 DCHECK_EQ(1, args.length()); |
| 356 CONVERT_ARG_HANDLE_CHECKED(Object, value, 0); |
| 357 THROW_NEW_ERROR_RETURN_FAILURE( |
| 358 isolate, |
| 359 NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value)); |
| 360 } |
| 361 |
| 362 RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) { |
| 363 HandleScope scope(isolate); |
| 364 DCHECK_EQ(1, args.length()); |
| 365 CONVERT_ARG_HANDLE_CHECKED(Object, value, 0); |
| 366 THROW_NEW_ERROR_RETURN_FAILURE( |
| 367 isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value)); |
| 368 } |
| 369 |
| 370 RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) { |
| 371 HandleScope scope(isolate); |
| 372 DCHECK_EQ(0, args.length()); |
| 373 THROW_NEW_ERROR_RETURN_FAILURE( |
| 374 isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex)); |
| 375 } |
| 368 | 376 |
| 369 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { | 377 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { |
| 370 HandleScope scope(isolate); | 378 HandleScope scope(isolate); |
| 371 DCHECK(args.length() == 4); | 379 DCHECK(args.length() == 4); |
| 372 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 380 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
| 373 CONVERT_SIZE_ARG_CHECKED(index, 1); | 381 CONVERT_SIZE_ARG_CHECKED(index, 1); |
| 374 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); | 382 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); |
| 375 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); | 383 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); |
| 376 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 384 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
| 377 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 385 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 393 | 401 |
| 394 default: | 402 default: |
| 395 break; | 403 break; |
| 396 } | 404 } |
| 397 | 405 |
| 398 UNREACHABLE(); | 406 UNREACHABLE(); |
| 399 return isolate->heap()->undefined_value(); | 407 return isolate->heap()->undefined_value(); |
| 400 } | 408 } |
| 401 | 409 |
| 402 | 410 |
| 403 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { | |
| 404 HandleScope scope(isolate); | |
| 405 DCHECK(args.length() == 2); | |
| 406 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
| 407 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
| 408 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | |
| 409 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | |
| 410 | |
| 411 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 412 NumberToSize(isolate, sta->byte_offset()); | |
| 413 | |
| 414 switch (sta->type()) { | |
| 415 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 416 case kExternal##Type##Array: \ | |
| 417 return DoLoad<ctype>(isolate, source, index); | |
| 418 | |
| 419 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 420 #undef TYPED_ARRAY_CASE | |
| 421 | |
| 422 case kExternalUint8ClampedArray: | |
| 423 return DoLoad<uint8_t>(isolate, source, index); | |
| 424 | |
| 425 default: | |
| 426 break; | |
| 427 } | |
| 428 | |
| 429 UNREACHABLE(); | |
| 430 return isolate->heap()->undefined_value(); | |
| 431 } | |
| 432 | |
| 433 | |
| 434 RUNTIME_FUNCTION(Runtime_AtomicsStore) { | 411 RUNTIME_FUNCTION(Runtime_AtomicsStore) { |
| 435 HandleScope scope(isolate); | 412 HandleScope scope(isolate); |
| 436 DCHECK(args.length() == 3); | 413 DCHECK(args.length() == 3); |
| 437 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 414 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
| 438 CONVERT_SIZE_ARG_CHECKED(index, 1); | 415 CONVERT_SIZE_ARG_CHECKED(index, 1); |
| 439 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | 416 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); |
| 440 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 417 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
| 441 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 418 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
| 442 | 419 |
| 443 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | 420 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 657 | 634 |
| 658 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { | 635 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { |
| 659 HandleScope scope(isolate); | 636 HandleScope scope(isolate); |
| 660 DCHECK(args.length() == 1); | 637 DCHECK(args.length() == 1); |
| 661 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | 638 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
| 662 uint32_t usize = NumberToUint32(*size); | 639 uint32_t usize = NumberToUint32(*size); |
| 663 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); | 640 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); |
| 664 } | 641 } |
| 665 } // namespace internal | 642 } // namespace internal |
| 666 } // namespace v8 | 643 } // namespace v8 |
| OLD | NEW |