OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/arguments.h" | |
8 #include "src/base/macros.h" | |
9 #include "src/base/platform/mutex.h" | |
10 #include "src/conversions.h" | |
11 #include "src/runtime/runtime-utils.h" | |
12 | |
13 | |
14 namespace v8 { | |
15 namespace internal { | |
16 | |
17 // Assume that 32-bit architectures don't have 64-bit atomic ops. | |
18 // TODO(binji): can we do better here? | |
19 #if V8_TARGET_ARCH_64_BIT | |
20 | |
21 #define REQUIRE_LOCK_64_BIT 0 | |
Jarin
2015/06/01 17:50:10
This should go into some header file because it ha
binji
2015/06/02 21:32:55
Done.
| |
22 | |
23 bool IsLockFree(uint32_t size) { | |
Jarin
2015/06/01 17:50:10
Channeling bmeurer@: The file-local methods should
binji
2015/06/02 21:32:55
Done.
| |
24 return size == 1 || size == 2 || size == 4 || size == 8; | |
25 } | |
26 | |
27 #elif V8_TARGET_ARCH_32_BIT | |
28 | |
29 #define REQUIRE_LOCK_64_BIT 1 | |
30 | |
31 bool IsLockFree(uint32_t size) { return size == 1 || size == 2 || size == 4; } | |
32 | |
33 #else | |
34 | |
35 #error Unknown bit size! | |
36 | |
37 #endif | |
38 | |
39 #if V8_CC_GNU | |
Jarin
2015/06/01 17:50:10
This platform-specific bifurcation is a bit ugly,
binji
2015/06/02 21:32:55
Well, if we end up switching to stubs anyway, it d
Jarin
2015/06/03 13:30:15
Yeah, that is a bit nasty. Maybe it is best to lea
| |
40 | |
41 template <typename T> | |
42 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
43 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
44 __ATOMIC_SEQ_CST); | |
45 return oldval; | |
46 } | |
47 | |
48 template <typename T> | |
49 inline T LoadSeqCst(T* p) { | |
50 T result; | |
51 __atomic_load(p, &result, __ATOMIC_SEQ_CST); | |
52 return result; | |
53 } | |
54 | |
55 template <typename T> | |
56 inline void StoreSeqCst(T* p, T value) { | |
57 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); | |
58 } | |
59 | |
60 template <typename T> | |
61 inline T AddSeqCst(T* p, T value) { | |
62 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | |
63 } | |
64 | |
65 template <typename T> | |
66 inline T SubSeqCst(T* p, T value) { | |
67 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | |
68 } | |
69 | |
70 template <typename T> | |
71 inline T AndSeqCst(T* p, T value) { | |
72 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | |
73 } | |
74 | |
75 template <typename T> | |
76 inline T OrSeqCst(T* p, T value) { | |
77 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | |
78 } | |
79 | |
80 template <typename T> | |
81 inline T XorSeqCst(T* p, T value) { | |
82 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | |
83 } | |
84 | |
85 template <typename T> | |
86 inline T ExchangeSeqCst(T* p, T value) { | |
87 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
88 } | |
89 | |
90 | |
91 #if REQUIRE_LOCK_64_BIT | |
92 | |
93 // We only need to implement the following functions, because the rest of the | |
94 // atomic operations only work on integer types, and the only 64-bit type is | |
95 // float64. Similarly, because the values are being bit_cast from double -> | |
96 // uint64_t, we don't need to implement these functions for int64_t either. | |
97 | |
98 static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER; | |
99 | |
100 inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval, | |
101 uint64_t newval) { | |
102 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
103 uint64_t result = *p; | |
104 if (result == oldval) *p = newval; | |
105 return result; | |
106 } | |
107 | |
108 | |
109 inline uint64_t LoadSeqCst(uint64_t* p) { | |
110 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
111 return *p; | |
112 } | |
113 | |
114 | |
115 inline void StoreSeqCst(uint64_t* p, uint64_t value) { | |
116 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
117 *p = value; | |
118 } | |
119 | |
120 #endif // REQUIRE_LOCK_64_BIT | |
121 | |
122 #elif V8_CC_MSVC | |
123 | |
124 #define _InterlockedCompareExchange32 _InterlockedCompareExchange | |
125 #define _InterlockedExchange32 _InterlockedExchange | |
126 #define _InterlockedExchangeAdd32 _InterlockedExchangeAdd | |
127 #define _InterlockedAnd32 _InterlockedAnd | |
128 #define _InterlockedOr32 _InterlockedOr | |
129 #define _InterlockedXor32 _InterlockedXor | |
130 | |
131 #define INTEGER_TYPES(V) \ | |
132 V(int8_t, 8, char) \ | |
133 V(uint8_t, 8, char) \ | |
134 V(int16_t, 16, short) /* NOLINT(runtime/int) */ \ | |
135 V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \ | |
136 V(int32_t, 32, long) /* NOLINT(runtime/int) */ \ | |
137 V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \ | |
138 V(int64_t, 64, LONGLONG) \ | |
139 V(uint64_t, 64, LONGLONG) | |
140 | |
141 #define ATOMIC_OPS(type, suffix, vctype) \ | |
142 inline type CompareExchangeSeqCst(volatile type* p, type oldval, \ | |
143 type newval) { \ | |
144 return _InterlockedCompareExchange##suffix( \ | |
145 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \ | |
146 bit_cast<vctype>(oldval)); \ | |
147 } \ | |
148 inline type LoadSeqCst(volatile type* p) { return *p; } \ | |
149 inline void StoreSeqCst(volatile type* p, type value) { \ | |
150 _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
151 bit_cast<vctype>(value)); \ | |
152 } \ | |
153 inline type AddSeqCst(volatile type* p, type value) { \ | |
154 return _InterlockedExchangeAdd##suffix( \ | |
155 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \ | |
156 } \ | |
157 inline type SubSeqCst(volatile type* p, type value) { \ | |
158 return _InterlockedExchangeAdd##suffix( \ | |
159 reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \ | |
160 } \ | |
161 inline type AndSeqCst(volatile type* p, type value) { \ | |
162 return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
163 bit_cast<vctype>(value)); \ | |
164 } \ | |
165 inline type OrSeqCst(volatile type* p, type value) { \ | |
166 return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
167 bit_cast<vctype>(value)); \ | |
168 } \ | |
169 inline type XorSeqCst(volatile type* p, type value) { \ | |
170 return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
171 bit_cast<vctype>(value)); \ | |
172 } \ | |
173 inline type ExchangeSeqCst(volatile type* p, type value) { \ | |
174 return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
175 bit_cast<vctype>(value)); \ | |
176 } | |
177 INTEGER_TYPES(ATOMIC_OPS) | |
178 #undef ATOMIC_OPS | |
179 | |
180 #undef INTEGER_TYPES | |
181 #undef _InterlockedCompareExchange32 | |
182 #undef _InterlockedExchange32 | |
183 #undef _InterlockedExchangeAdd32 | |
184 #undef _InterlockedAnd32 | |
185 #undef _InterlockedOr32 | |
186 #undef _InterlockedXor32 | |
187 | |
188 #else | |
189 | |
190 #error Unsupported platform! | |
191 | |
192 #endif | |
193 | |
194 template <typename T> | |
195 T FromObject(Handle<Object> number); | |
196 | |
197 template <> | |
198 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | |
199 return NumberToUint32(*number); | |
200 } | |
201 | |
202 template <> | |
203 inline int32_t FromObject<int32_t>(Handle<Object> number) { | |
204 return NumberToInt32(*number); | |
205 } | |
206 | |
207 template <> | |
208 inline float FromObject<float>(Handle<Object> number) { | |
209 return static_cast<float>(number->Number()); | |
210 } | |
211 | |
212 template <> | |
213 inline double FromObject<double>(Handle<Object> number) { | |
214 return number->Number(); | |
215 } | |
216 | |
217 template <typename T, typename F> | |
218 inline T ToAtomic(F from) { | |
219 return static_cast<T>(from); | |
220 } | |
221 | |
222 template <> | |
223 inline uint32_t ToAtomic<uint32_t, float>(float from) { | |
224 return bit_cast<uint32_t, float>(from); | |
225 } | |
226 | |
227 template <> | |
228 inline uint64_t ToAtomic<uint64_t, double>(double from) { | |
229 return bit_cast<uint64_t, double>(from); | |
230 } | |
231 | |
232 template <typename T, typename F> | |
233 inline T FromAtomic(F from) { | |
234 return static_cast<T>(from); | |
235 } | |
236 | |
237 template <> | |
238 inline float FromAtomic<float, uint32_t>(uint32_t from) { | |
239 return bit_cast<float, uint32_t>(from); | |
240 } | |
241 | |
242 template <> | |
243 inline double FromAtomic<double, uint64_t>(uint64_t from) { | |
244 return bit_cast<double, uint64_t>(from); | |
245 } | |
246 | |
247 template <typename T> | |
248 inline Object* ToObject(Isolate* isolate, T t); | |
249 | |
250 template <> | |
251 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { | |
252 return Smi::FromInt(t); | |
253 } | |
254 | |
255 template <> | |
256 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { | |
257 return Smi::FromInt(t); | |
258 } | |
259 | |
260 template <> | |
261 inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) { | |
262 return Smi::FromInt(t); | |
263 } | |
264 | |
265 template <> | |
266 inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) { | |
267 return Smi::FromInt(t); | |
268 } | |
269 | |
270 template <> | |
271 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { | |
272 return *isolate->factory()->NewNumber(t); | |
273 } | |
274 | |
275 template <> | |
276 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { | |
277 return *isolate->factory()->NewNumber(t); | |
278 } | |
279 | |
280 template <> | |
281 inline Object* ToObject<float>(Isolate* isolate, float t) { | |
282 return *isolate->factory()->NewNumber(t); | |
283 } | |
284 | |
285 template <> | |
286 inline Object* ToObject<double>(Isolate* isolate, double t) { | |
287 return *isolate->factory()->NewNumber(t); | |
288 } | |
289 | |
290 template <typename T> | |
291 struct FromObjectTraits {}; | |
292 | |
293 template <> | |
294 struct FromObjectTraits<int8_t> { | |
295 typedef int32_t convert_type; | |
296 typedef int8_t atomic_type; | |
297 }; | |
298 | |
299 template <> | |
300 struct FromObjectTraits<uint8_t> { | |
301 typedef uint32_t convert_type; | |
302 typedef uint8_t atomic_type; | |
303 }; | |
304 | |
305 template <> | |
306 struct FromObjectTraits<int16_t> { | |
307 typedef int32_t convert_type; | |
308 typedef int16_t atomic_type; | |
309 }; | |
310 | |
311 template <> | |
312 struct FromObjectTraits<uint16_t> { | |
313 typedef uint32_t convert_type; | |
314 typedef uint16_t atomic_type; | |
315 }; | |
316 | |
317 template <> | |
318 struct FromObjectTraits<int32_t> { | |
319 typedef int32_t convert_type; | |
320 typedef int32_t atomic_type; | |
321 }; | |
322 | |
323 template <> | |
324 struct FromObjectTraits<uint32_t> { | |
325 typedef uint32_t convert_type; | |
326 typedef uint32_t atomic_type; | |
327 }; | |
328 | |
329 template <> | |
330 struct FromObjectTraits<float> { | |
331 typedef float convert_type; | |
332 typedef uint32_t atomic_type; | |
333 }; | |
334 | |
335 template <> | |
336 struct FromObjectTraits<double> { | |
337 typedef double convert_type; | |
338 typedef uint64_t atomic_type; | |
339 }; | |
340 | |
341 | |
342 template <typename T> | |
343 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
344 Handle<Object> oldobj, Handle<Object> newobj) { | |
345 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
346 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
347 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); | |
348 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); | |
349 atomic_type result = CompareExchangeSeqCst( | |
350 static_cast<atomic_type*>(buffer) + index, oldval, newval); | |
351 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
352 } | |
353 | |
354 | |
355 template <typename T> | |
356 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { | |
357 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
358 atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index); | |
359 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
360 } | |
361 | |
362 | |
363 template <typename T> | |
364 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, | |
365 Handle<Object> obj) { | |
366 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
367 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
368 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
369 StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
370 return *obj; | |
371 } | |
372 | |
373 | |
374 template <typename T> | |
375 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | |
376 Handle<Object> obj) { | |
377 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
378 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
379 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
380 atomic_type result = | |
381 AddSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
382 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
383 } | |
384 | |
385 | |
386 template <typename T> | |
387 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | |
388 Handle<Object> obj) { | |
389 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
390 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
391 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
392 atomic_type result = | |
393 SubSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
394 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
395 } | |
396 | |
397 | |
398 template <typename T> | |
399 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, | |
400 Handle<Object> obj) { | |
401 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
402 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
403 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
404 atomic_type result = | |
405 AndSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
406 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
407 } | |
408 | |
409 | |
410 template <typename T> | |
411 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, | |
412 Handle<Object> obj) { | |
413 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
414 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
415 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
416 atomic_type result = | |
417 OrSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
418 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
419 } | |
420 | |
421 | |
422 template <typename T> | |
423 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | |
424 Handle<Object> obj) { | |
425 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
426 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
427 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
428 atomic_type result = | |
429 XorSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
430 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
431 } | |
432 | |
433 | |
434 template <typename T> | |
435 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
436 Handle<Object> obj) { | |
437 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
438 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
439 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
440 atomic_type result = | |
441 ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
442 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
443 } | |
444 | |
445 | |
446 // Duplicated from objects.h | |
447 // V has parameters (Type, type, TYPE, C type, element_size) | |
448 #define INTEGER_TYPED_ARRAYS(V) \ | |
449 V(Uint8, uint8, UINT8, uint8_t, 1) \ | |
450 V(Int8, int8, INT8, int8_t, 1) \ | |
451 V(Uint16, uint16, UINT16, uint16_t, 2) \ | |
452 V(Int16, int16, INT16, int16_t, 2) \ | |
453 V(Uint32, uint32, UINT32, uint32_t, 4) \ | |
454 V(Int32, int32, INT32, int32_t, 4) \ | |
455 V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) | |
456 | |
457 | |
458 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { | |
459 HandleScope scope(isolate); | |
460 DCHECK(args.length() == 4); | |
461 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
462 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
463 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); | |
464 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); | |
465 DCHECK(sta->GetBuffer()->is_shared()); | |
466 DCHECK(index < NumberToSize(isolate, sta->length())); | |
467 | |
468 void* buffer = sta->GetBuffer()->backing_store(); | |
469 | |
470 switch (sta->type()) { | |
471 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
472 case kExternal##Type##Array: \ | |
473 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); | |
474 | |
475 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
476 #undef TYPED_ARRAY_CASE | |
477 | |
478 default: | |
479 break; | |
480 } | |
481 | |
482 UNREACHABLE(); | |
483 return isolate->heap()->undefined_value(); | |
484 } | |
485 | |
486 | |
487 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { | |
488 HandleScope scope(isolate); | |
489 DCHECK(args.length() == 2); | |
490 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
491 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
492 DCHECK(sta->GetBuffer()->is_shared()); | |
493 DCHECK(index < NumberToSize(isolate, sta->length())); | |
494 | |
495 void* buffer = sta->GetBuffer()->backing_store(); | |
496 | |
497 switch (sta->type()) { | |
498 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
499 case kExternal##Type##Array: \ | |
500 return DoLoad<ctype>(isolate, buffer, index); | |
501 | |
502 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
503 #undef TYPED_ARRAY_CASE | |
504 | |
505 default: | |
506 break; | |
507 } | |
508 | |
509 UNREACHABLE(); | |
510 return isolate->heap()->undefined_value(); | |
511 } | |
512 | |
513 | |
514 RUNTIME_FUNCTION(Runtime_AtomicsStore) { | |
515 HandleScope scope(isolate); | |
516 DCHECK(args.length() == 3); | |
517 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
518 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
519 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
520 DCHECK(sta->GetBuffer()->is_shared()); | |
521 DCHECK(index < NumberToSize(isolate, sta->length())); | |
522 | |
523 void* buffer = sta->GetBuffer()->backing_store(); | |
524 | |
525 switch (sta->type()) { | |
526 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
527 case kExternal##Type##Array: \ | |
528 return DoStore<ctype>(isolate, buffer, index, value); | |
529 | |
530 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
531 #undef TYPED_ARRAY_CASE | |
532 | |
533 default: | |
534 break; | |
535 } | |
536 | |
537 UNREACHABLE(); | |
538 return isolate->heap()->undefined_value(); | |
539 } | |
540 | |
541 | |
542 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { | |
543 HandleScope scope(isolate); | |
544 DCHECK(args.length() == 3); | |
545 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
546 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
547 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
548 DCHECK(sta->GetBuffer()->is_shared()); | |
549 DCHECK(index < NumberToSize(isolate, sta->length())); | |
550 | |
551 void* buffer = sta->GetBuffer()->backing_store(); | |
552 | |
553 switch (sta->type()) { | |
554 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
555 case kExternal##Type##Array: \ | |
556 return DoAdd<ctype>(isolate, buffer, index, value); | |
557 | |
558 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
559 #undef TYPED_ARRAY_CASE | |
560 | |
561 case kExternalFloat32Array: | |
562 case kExternalFloat64Array: | |
563 default: | |
564 break; | |
565 } | |
566 | |
567 UNREACHABLE(); | |
568 return isolate->heap()->undefined_value(); | |
569 } | |
570 | |
571 | |
572 RUNTIME_FUNCTION(Runtime_AtomicsSub) { | |
573 HandleScope scope(isolate); | |
574 DCHECK(args.length() == 3); | |
575 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
576 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
577 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
578 DCHECK(sta->GetBuffer()->is_shared()); | |
579 DCHECK(index < NumberToSize(isolate, sta->length())); | |
580 | |
581 void* buffer = sta->GetBuffer()->backing_store(); | |
582 | |
583 switch (sta->type()) { | |
584 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
585 case kExternal##Type##Array: \ | |
586 return DoSub<ctype>(isolate, buffer, index, value); | |
587 | |
588 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
589 #undef TYPED_ARRAY_CASE | |
590 | |
591 case kExternalFloat32Array: | |
592 case kExternalFloat64Array: | |
593 default: | |
594 break; | |
595 } | |
596 | |
597 UNREACHABLE(); | |
598 return isolate->heap()->undefined_value(); | |
599 } | |
600 | |
601 | |
602 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { | |
603 HandleScope scope(isolate); | |
604 DCHECK(args.length() == 3); | |
605 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
606 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
607 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
608 DCHECK(sta->GetBuffer()->is_shared()); | |
609 DCHECK(index < NumberToSize(isolate, sta->length())); | |
610 | |
611 void* buffer = sta->GetBuffer()->backing_store(); | |
612 | |
613 switch (sta->type()) { | |
614 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
615 case kExternal##Type##Array: \ | |
616 return DoAnd<ctype>(isolate, buffer, index, value); | |
617 | |
618 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
619 #undef TYPED_ARRAY_CASE | |
620 | |
621 case kExternalFloat32Array: | |
622 case kExternalFloat64Array: | |
623 default: | |
624 break; | |
625 } | |
626 | |
627 UNREACHABLE(); | |
628 return isolate->heap()->undefined_value(); | |
629 } | |
630 | |
631 | |
632 RUNTIME_FUNCTION(Runtime_AtomicsOr) { | |
633 HandleScope scope(isolate); | |
634 DCHECK(args.length() == 3); | |
635 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
636 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
637 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
638 DCHECK(sta->GetBuffer()->is_shared()); | |
639 DCHECK(index < NumberToSize(isolate, sta->length())); | |
640 | |
641 void* buffer = sta->GetBuffer()->backing_store(); | |
642 | |
643 switch (sta->type()) { | |
644 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
645 case kExternal##Type##Array: \ | |
646 return DoOr<ctype>(isolate, buffer, index, value); | |
647 | |
648 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
649 #undef TYPED_ARRAY_CASE | |
650 | |
651 case kExternalFloat32Array: | |
652 case kExternalFloat64Array: | |
653 default: | |
654 break; | |
655 } | |
656 | |
657 UNREACHABLE(); | |
658 return isolate->heap()->undefined_value(); | |
659 } | |
660 | |
661 | |
662 RUNTIME_FUNCTION(Runtime_AtomicsXor) { | |
663 HandleScope scope(isolate); | |
664 DCHECK(args.length() == 3); | |
665 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
666 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
667 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
668 DCHECK(sta->GetBuffer()->is_shared()); | |
669 DCHECK(index < NumberToSize(isolate, sta->length())); | |
670 | |
671 void* buffer = sta->GetBuffer()->backing_store(); | |
672 | |
673 switch (sta->type()) { | |
674 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
675 case kExternal##Type##Array: \ | |
676 return DoXor<ctype>(isolate, buffer, index, value); | |
677 | |
678 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
679 #undef TYPED_ARRAY_CASE | |
680 | |
681 case kExternalFloat32Array: | |
682 case kExternalFloat64Array: | |
683 default: | |
684 break; | |
685 } | |
686 | |
687 UNREACHABLE(); | |
688 return isolate->heap()->undefined_value(); | |
689 } | |
690 | |
691 | |
692 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { | |
693 HandleScope scope(isolate); | |
694 DCHECK(args.length() == 3); | |
695 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
696 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
697 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
698 DCHECK(sta->GetBuffer()->is_shared()); | |
699 DCHECK(index < NumberToSize(isolate, sta->length())); | |
700 | |
701 void* buffer = sta->GetBuffer()->backing_store(); | |
702 | |
703 switch (sta->type()) { | |
704 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
705 case kExternal##Type##Array: \ | |
706 return DoExchange<ctype>(isolate, buffer, index, value); | |
707 | |
708 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
709 #undef TYPED_ARRAY_CASE | |
710 | |
711 case kExternalFloat32Array: | |
712 case kExternalFloat64Array: | |
713 default: | |
714 break; | |
715 } | |
716 | |
717 UNREACHABLE(); | |
718 return isolate->heap()->undefined_value(); | |
719 } | |
720 | |
721 | |
722 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { | |
723 HandleScope scope(isolate); | |
724 DCHECK(args.length() == 1); | |
725 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | |
726 uint32_t usize = NumberToUint32(*size); | |
727 | |
728 return IsLockFree(usize) ? isolate->heap()->true_value() | |
729 : isolate->heap()->false_value(); | |
730 } | |
731 | |
732 #undef REQUIRE_LOCK_64_BIT | |
733 } | |
734 } // namespace v8::internal | |
OLD | NEW |