OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/runtime/runtime-utils.h" | 5 #include "src/runtime/runtime-utils.h" |
6 | 6 |
7 #include "src/arguments.h" | 7 #include "src/arguments.h" |
8 #include "src/base/macros.h" | 8 #include "src/base/macros.h" |
9 #include "src/base/platform/mutex.h" | 9 #include "src/base/platform/mutex.h" |
10 #include "src/conversions-inl.h" | 10 #include "src/conversions-inl.h" |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
50 template <typename T> | 50 template <typename T> |
51 inline T OrSeqCst(T* p, T value) { | 51 inline T OrSeqCst(T* p, T value) { |
52 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | 52 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
53 } | 53 } |
54 | 54 |
55 template <typename T> | 55 template <typename T> |
56 inline T XorSeqCst(T* p, T value) { | 56 inline T XorSeqCst(T* p, T value) { |
57 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 57 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
58 } | 58 } |
59 | 59 |
60 template <typename T> | |
61 inline T ExchangeSeqCst(T* p, T value) { | |
62 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
63 } | |
64 | |
65 #elif V8_CC_MSVC | 60 #elif V8_CC_MSVC |
66 | 61 |
67 #define InterlockedCompareExchange32 _InterlockedCompareExchange | 62 #define InterlockedCompareExchange32 _InterlockedCompareExchange |
68 #define InterlockedExchange32 _InterlockedExchange | |
69 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 63 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
70 #define InterlockedAnd32 _InterlockedAnd | 64 #define InterlockedAnd32 _InterlockedAnd |
71 #define InterlockedOr32 _InterlockedOr | 65 #define InterlockedOr32 _InterlockedOr |
72 #define InterlockedXor32 _InterlockedXor | 66 #define InterlockedXor32 _InterlockedXor |
73 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 67 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
74 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | 68 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
75 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 69 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
76 | 70 |
77 #define ATOMIC_OPS(type, suffix, vctype) \ | 71 #define ATOMIC_OPS(type, suffix, vctype) \ |
78 inline type AddSeqCst(type* p, type value) { \ | 72 inline type AddSeqCst(type* p, type value) { \ |
79 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 73 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
80 bit_cast<vctype>(value)); \ | 74 bit_cast<vctype>(value)); \ |
81 } \ | 75 } \ |
82 inline type SubSeqCst(type* p, type value) { \ | 76 inline type SubSeqCst(type* p, type value) { \ |
83 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 77 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
84 -bit_cast<vctype>(value)); \ | 78 -bit_cast<vctype>(value)); \ |
85 } \ | 79 } \ |
86 inline type AndSeqCst(type* p, type value) { \ | 80 inline type AndSeqCst(type* p, type value) { \ |
87 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 81 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
88 bit_cast<vctype>(value)); \ | 82 bit_cast<vctype>(value)); \ |
89 } \ | 83 } \ |
90 inline type OrSeqCst(type* p, type value) { \ | 84 inline type OrSeqCst(type* p, type value) { \ |
91 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 85 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
92 bit_cast<vctype>(value)); \ | 86 bit_cast<vctype>(value)); \ |
93 } \ | 87 } \ |
94 inline type XorSeqCst(type* p, type value) { \ | 88 inline type XorSeqCst(type* p, type value) { \ |
95 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 89 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
96 bit_cast<vctype>(value)); \ | 90 bit_cast<vctype>(value)); \ |
97 } \ | 91 } \ |
98 inline type ExchangeSeqCst(type* p, type value) { \ | |
99 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
100 bit_cast<vctype>(value)); \ | |
101 } \ | |
102 \ | 92 \ |
103 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | 93 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
104 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | 94 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
105 bit_cast<vctype>(newval), \ | 95 bit_cast<vctype>(newval), \ |
106 bit_cast<vctype>(oldval)); \ | 96 bit_cast<vctype>(oldval)); \ |
107 } | 97 } |
108 | 98 |
109 ATOMIC_OPS(int8_t, 8, char) | 99 ATOMIC_OPS(int8_t, 8, char) |
110 ATOMIC_OPS(uint8_t, 8, char) | 100 ATOMIC_OPS(uint8_t, 8, char) |
111 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 101 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
112 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 102 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
113 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 103 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
114 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 104 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
115 | 105 |
116 #undef ATOMIC_OPS_INTEGER | 106 #undef ATOMIC_OPS_INTEGER |
117 #undef ATOMIC_OPS | 107 #undef ATOMIC_OPS |
118 | 108 |
119 #undef InterlockedCompareExchange32 | 109 #undef InterlockedCompareExchange32 |
120 #undef InterlockedExchange32 | |
121 #undef InterlockedExchangeAdd32 | 110 #undef InterlockedExchangeAdd32 |
122 #undef InterlockedAnd32 | 111 #undef InterlockedAnd32 |
123 #undef InterlockedOr32 | 112 #undef InterlockedOr32 |
124 #undef InterlockedXor32 | 113 #undef InterlockedXor32 |
125 #undef InterlockedExchangeAdd16 | 114 #undef InterlockedExchangeAdd16 |
126 #undef InterlockedCompareExchange8 | 115 #undef InterlockedCompareExchange8 |
127 #undef InterlockedExchangeAdd8 | 116 #undef InterlockedExchangeAdd8 |
128 | 117 |
129 #else | 118 #else |
130 | 119 |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
236 | 225 |
237 template <typename T> | 226 template <typename T> |
238 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | 227 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, |
239 Handle<Object> obj) { | 228 Handle<Object> obj) { |
240 T value = FromObject<T>(obj); | 229 T value = FromObject<T>(obj); |
241 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); | 230 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); |
242 return ToObject(isolate, result); | 231 return ToObject(isolate, result); |
243 } | 232 } |
244 | 233 |
245 | 234 |
246 template <typename T> | |
247 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
248 Handle<Object> obj) { | |
249 T value = FromObject<T>(obj); | |
250 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); | |
251 return ToObject(isolate, result); | |
252 } | |
253 | |
254 | |
255 // Uint8Clamped functions | 235 // Uint8Clamped functions |
256 | 236 |
257 uint8_t ClampToUint8(int32_t value) { | 237 uint8_t ClampToUint8(int32_t value) { |
258 if (value < 0) return 0; | 238 if (value < 0) return 0; |
259 if (value > 255) return 255; | 239 if (value > 255) return 255; |
260 return value; | 240 return value; |
261 } | 241 } |
262 | 242 |
263 | 243 |
264 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, | 244 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, |
(...skipping 26 matching lines...) Expand all Loading... |
291 | 271 |
292 DO_UINT8_CLAMPED_OP(Add, +) | 272 DO_UINT8_CLAMPED_OP(Add, +) |
293 DO_UINT8_CLAMPED_OP(Sub, -) | 273 DO_UINT8_CLAMPED_OP(Sub, -) |
294 DO_UINT8_CLAMPED_OP(And, &) | 274 DO_UINT8_CLAMPED_OP(And, &) |
295 DO_UINT8_CLAMPED_OP(Or, | ) | 275 DO_UINT8_CLAMPED_OP(Or, | ) |
296 DO_UINT8_CLAMPED_OP(Xor, ^) | 276 DO_UINT8_CLAMPED_OP(Xor, ^) |
297 | 277 |
298 #undef DO_UINT8_CLAMPED_OP | 278 #undef DO_UINT8_CLAMPED_OP |
299 | 279 |
300 | 280 |
301 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, | |
302 size_t index, Handle<Object> obj) { | |
303 typedef int32_t convert_type; | |
304 uint8_t* p = static_cast<uint8_t*>(buffer) + index; | |
305 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); | |
306 uint8_t expected; | |
307 do { | |
308 expected = *p; | |
309 } while (CompareExchangeSeqCst(p, expected, result) != expected); | |
310 return ToObject(isolate, expected); | |
311 } | |
312 | |
313 | |
314 } // anonymous namespace | 281 } // anonymous namespace |
315 | 282 |
316 // Duplicated from objects.h | 283 // Duplicated from objects.h |
317 // V has parameters (Type, type, TYPE, C type, element_size) | 284 // V has parameters (Type, type, TYPE, C type, element_size) |
318 #define INTEGER_TYPED_ARRAYS(V) \ | 285 #define INTEGER_TYPED_ARRAYS(V) \ |
319 V(Uint8, uint8, UINT8, uint8_t, 1) \ | 286 V(Uint8, uint8, UINT8, uint8_t, 1) \ |
320 V(Int8, int8, INT8, int8_t, 1) \ | 287 V(Int8, int8, INT8, int8_t, 1) \ |
321 V(Uint16, uint16, UINT16, uint16_t, 2) \ | 288 V(Uint16, uint16, UINT16, uint16_t, 2) \ |
322 V(Int16, int16, INT16, int16_t, 2) \ | 289 V(Int16, int16, INT16, int16_t, 2) \ |
323 V(Uint32, uint32, UINT32, uint32_t, 4) \ | 290 V(Uint32, uint32, UINT32, uint32_t, 4) \ |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
534 | 501 |
535 default: | 502 default: |
536 break; | 503 break; |
537 } | 504 } |
538 | 505 |
539 UNREACHABLE(); | 506 UNREACHABLE(); |
540 return isolate->heap()->undefined_value(); | 507 return isolate->heap()->undefined_value(); |
541 } | 508 } |
542 | 509 |
543 | 510 |
544 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { | |
545 HandleScope scope(isolate); | |
546 DCHECK_EQ(3, args.length()); | |
547 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
548 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
549 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
550 CHECK(sta->GetBuffer()->is_shared()); | |
551 CHECK_LT(index, NumberToSize(sta->length())); | |
552 | |
553 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
554 NumberToSize(sta->byte_offset()); | |
555 | |
556 switch (sta->type()) { | |
557 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
558 case kExternal##Type##Array: \ | |
559 return DoExchange<ctype>(isolate, source, index, value); | |
560 | |
561 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
562 #undef TYPED_ARRAY_CASE | |
563 | |
564 case kExternalUint8ClampedArray: | |
565 return DoExchangeUint8Clamped(isolate, source, index, value); | |
566 | |
567 default: | |
568 break; | |
569 } | |
570 | |
571 UNREACHABLE(); | |
572 return isolate->heap()->undefined_value(); | |
573 } | |
574 | |
575 | |
576 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { | 511 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { |
577 HandleScope scope(isolate); | 512 HandleScope scope(isolate); |
578 DCHECK_EQ(1, args.length()); | 513 DCHECK_EQ(1, args.length()); |
579 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | 514 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
580 uint32_t usize = NumberToUint32(*size); | 515 uint32_t usize = NumberToUint32(*size); |
581 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); | 516 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); |
582 } | 517 } |
583 } // namespace internal | 518 } // namespace internal |
584 } // namespace v8 | 519 } // namespace v8 |
OLD | NEW |