Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: src/runtime/runtime-atomics.cc

Issue 1162503002: Implement Atomics API (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: add more symbols to anonymous namespace Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/runtime/runtime.h ('k') | src/runtime/runtime-typedarray.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/arguments.h"
8 #include "src/base/macros.h"
9 #include "src/base/platform/mutex.h"
10 #include "src/conversions.h"
11 #include "src/runtime/runtime-utils.h"
12
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 // SharedArrayBuffer draft spec, found here
15 // https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77P Fk
16
17 namespace v8 {
18 namespace internal {
19
20 namespace {
21
22 #if V8_CC_GNU
23
24 template <typename T>
25 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
26 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
27 __ATOMIC_SEQ_CST);
28 return oldval;
29 }
30
31 template <typename T>
32 inline T LoadSeqCst(T* p) {
33 T result;
34 __atomic_load(p, &result, __ATOMIC_SEQ_CST);
35 return result;
36 }
37
38 template <typename T>
39 inline void StoreSeqCst(T* p, T value) {
40 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
41 }
42
43 template <typename T>
44 inline T AddSeqCst(T* p, T value) {
45 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
46 }
47
48 template <typename T>
49 inline T SubSeqCst(T* p, T value) {
50 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
51 }
52
53 template <typename T>
54 inline T AndSeqCst(T* p, T value) {
55 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
56 }
57
58 template <typename T>
59 inline T OrSeqCst(T* p, T value) {
60 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
61 }
62
63 template <typename T>
64 inline T XorSeqCst(T* p, T value) {
65 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
66 }
67
68 template <typename T>
69 inline T ExchangeSeqCst(T* p, T value) {
70 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
71 }
72
73 #if ATOMICS_REQUIRE_LOCK_64_BIT
74
75 // We only need to implement the following functions, because the rest of the
76 // atomic operations only work on integer types, and the only 64-bit type is
77 // float64. Similarly, because the values are being bit_cast from double ->
78 // uint64_t, we don't need to implement these functions for int64_t either.
79
80 static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER;
81
82 inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval,
83 uint64_t newval) {
84 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
85 uint64_t result = *p;
86 if (result == oldval) *p = newval;
87 return result;
88 }
89
90
91 inline uint64_t LoadSeqCst(uint64_t* p) {
92 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
93 return *p;
94 }
95
96
97 inline void StoreSeqCst(uint64_t* p, uint64_t value) {
98 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
99 *p = value;
100 }
101
102 #endif // ATOMICS_REQUIRE_LOCK_64_BIT
103
104 #elif V8_CC_MSVC
105
106 #define _InterlockedCompareExchange32 _InterlockedCompareExchange
107 #define _InterlockedExchange32 _InterlockedExchange
108 #define _InterlockedExchangeAdd32 _InterlockedExchangeAdd
109 #define _InterlockedAnd32 _InterlockedAnd
110 #define _InterlockedOr32 _InterlockedOr
111 #define _InterlockedXor32 _InterlockedXor
112
113 #define INTEGER_TYPES(V) \
114 V(int8_t, 8, char) \
115 V(uint8_t, 8, char) \
116 V(int16_t, 16, short) /* NOLINT(runtime/int) */ \
117 V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \
118 V(int32_t, 32, long) /* NOLINT(runtime/int) */ \
119 V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \
120 V(int64_t, 64, LONGLONG) \
121 V(uint64_t, 64, LONGLONG)
122
123 #define ATOMIC_OPS(type, suffix, vctype) \
124 inline type CompareExchangeSeqCst(volatile type* p, type oldval, \
125 type newval) { \
126 return _InterlockedCompareExchange##suffix( \
127 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \
128 bit_cast<vctype>(oldval)); \
129 } \
130 inline type LoadSeqCst(volatile type* p) { return *p; } \
131 inline void StoreSeqCst(volatile type* p, type value) { \
132 _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
133 bit_cast<vctype>(value)); \
134 } \
135 inline type AddSeqCst(volatile type* p, type value) { \
136 return _InterlockedExchangeAdd##suffix( \
137 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \
138 } \
139 inline type SubSeqCst(volatile type* p, type value) { \
140 return _InterlockedExchangeAdd##suffix( \
141 reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \
142 } \
143 inline type AndSeqCst(volatile type* p, type value) { \
144 return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \
145 bit_cast<vctype>(value)); \
146 } \
147 inline type OrSeqCst(volatile type* p, type value) { \
148 return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \
149 bit_cast<vctype>(value)); \
150 } \
151 inline type XorSeqCst(volatile type* p, type value) { \
152 return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \
153 bit_cast<vctype>(value)); \
154 } \
155 inline type ExchangeSeqCst(volatile type* p, type value) { \
156 return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
157 bit_cast<vctype>(value)); \
158 }
159 INTEGER_TYPES(ATOMIC_OPS)
160 #undef ATOMIC_OPS
161
162 #undef INTEGER_TYPES
163 #undef _InterlockedCompareExchange32
164 #undef _InterlockedExchange32
165 #undef _InterlockedExchangeAdd32
166 #undef _InterlockedAnd32
167 #undef _InterlockedOr32
168 #undef _InterlockedXor32
169
170 #else
171
172 #error Unsupported platform!
173
174 #endif
175
176 template <typename T>
177 T FromObject(Handle<Object> number);
178
179 template <>
180 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
181 return NumberToUint32(*number);
182 }
183
184 template <>
185 inline int32_t FromObject<int32_t>(Handle<Object> number) {
186 return NumberToInt32(*number);
187 }
188
189 template <>
190 inline float FromObject<float>(Handle<Object> number) {
191 return static_cast<float>(number->Number());
192 }
193
194 template <>
195 inline double FromObject<double>(Handle<Object> number) {
196 return number->Number();
197 }
198
199 template <typename T, typename F>
200 inline T ToAtomic(F from) {
201 return static_cast<T>(from);
202 }
203
204 template <>
205 inline uint32_t ToAtomic<uint32_t, float>(float from) {
206 return bit_cast<uint32_t, float>(from);
207 }
208
209 template <>
210 inline uint64_t ToAtomic<uint64_t, double>(double from) {
211 return bit_cast<uint64_t, double>(from);
212 }
213
214 template <typename T, typename F>
215 inline T FromAtomic(F from) {
216 return static_cast<T>(from);
217 }
218
219 template <>
220 inline float FromAtomic<float, uint32_t>(uint32_t from) {
221 return bit_cast<float, uint32_t>(from);
222 }
223
224 template <>
225 inline double FromAtomic<double, uint64_t>(uint64_t from) {
226 return bit_cast<double, uint64_t>(from);
227 }
228
229 template <typename T>
230 inline Object* ToObject(Isolate* isolate, T t);
231
232 template <>
233 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
234 return Smi::FromInt(t);
235 }
236
237 template <>
238 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
239 return Smi::FromInt(t);
240 }
241
242 template <>
243 inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
244 return Smi::FromInt(t);
245 }
246
247 template <>
248 inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
249 return Smi::FromInt(t);
250 }
251
252 template <>
253 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
254 return *isolate->factory()->NewNumber(t);
255 }
256
257 template <>
258 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
259 return *isolate->factory()->NewNumber(t);
260 }
261
262 template <>
263 inline Object* ToObject<float>(Isolate* isolate, float t) {
264 return *isolate->factory()->NewNumber(t);
265 }
266
267 template <>
268 inline Object* ToObject<double>(Isolate* isolate, double t) {
269 return *isolate->factory()->NewNumber(t);
270 }
271
272 template <typename T>
273 struct FromObjectTraits {};
274
275 template <>
276 struct FromObjectTraits<int8_t> {
277 typedef int32_t convert_type;
278 typedef int8_t atomic_type;
279 };
280
281 template <>
282 struct FromObjectTraits<uint8_t> {
283 typedef uint32_t convert_type;
284 typedef uint8_t atomic_type;
285 };
286
287 template <>
288 struct FromObjectTraits<int16_t> {
289 typedef int32_t convert_type;
290 typedef int16_t atomic_type;
291 };
292
293 template <>
294 struct FromObjectTraits<uint16_t> {
295 typedef uint32_t convert_type;
296 typedef uint16_t atomic_type;
297 };
298
299 template <>
300 struct FromObjectTraits<int32_t> {
301 typedef int32_t convert_type;
302 typedef int32_t atomic_type;
303 };
304
305 template <>
306 struct FromObjectTraits<uint32_t> {
307 typedef uint32_t convert_type;
308 typedef uint32_t atomic_type;
309 };
310
311 template <>
312 struct FromObjectTraits<float> {
313 typedef float convert_type;
314 typedef uint32_t atomic_type;
315 };
316
317 template <>
318 struct FromObjectTraits<double> {
319 typedef double convert_type;
320 typedef uint64_t atomic_type;
321 };
322
323
324 template <typename T>
325 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
326 Handle<Object> oldobj, Handle<Object> newobj) {
327 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
328 typedef typename FromObjectTraits<T>::convert_type convert_type;
329 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
330 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
331 atomic_type result = CompareExchangeSeqCst(
332 static_cast<atomic_type*>(buffer) + index, oldval, newval);
333 return ToObject<T>(isolate, FromAtomic<T>(result));
334 }
335
336
337 template <typename T>
338 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
339 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
340 atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
341 return ToObject<T>(isolate, FromAtomic<T>(result));
342 }
343
344
345 template <typename T>
346 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
347 Handle<Object> obj) {
348 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
349 typedef typename FromObjectTraits<T>::convert_type convert_type;
350 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
351 StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
352 return *obj;
353 }
354
355
356 template <typename T>
357 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
358 Handle<Object> obj) {
359 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
360 typedef typename FromObjectTraits<T>::convert_type convert_type;
361 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
362 atomic_type result =
363 AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
364 return ToObject<T>(isolate, FromAtomic<T>(result));
365 }
366
367
368 template <typename T>
369 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
370 Handle<Object> obj) {
371 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
372 typedef typename FromObjectTraits<T>::convert_type convert_type;
373 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
374 atomic_type result =
375 SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
376 return ToObject<T>(isolate, FromAtomic<T>(result));
377 }
378
379
380 template <typename T>
381 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
382 Handle<Object> obj) {
383 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
384 typedef typename FromObjectTraits<T>::convert_type convert_type;
385 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
386 atomic_type result =
387 AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
388 return ToObject<T>(isolate, FromAtomic<T>(result));
389 }
390
391
392 template <typename T>
393 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
394 Handle<Object> obj) {
395 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
396 typedef typename FromObjectTraits<T>::convert_type convert_type;
397 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
398 atomic_type result =
399 OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
400 return ToObject<T>(isolate, FromAtomic<T>(result));
401 }
402
403
404 template <typename T>
405 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
406 Handle<Object> obj) {
407 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
408 typedef typename FromObjectTraits<T>::convert_type convert_type;
409 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
410 atomic_type result =
411 XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
412 return ToObject<T>(isolate, FromAtomic<T>(result));
413 }
414
415
416 template <typename T>
417 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
418 Handle<Object> obj) {
419 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
420 typedef typename FromObjectTraits<T>::convert_type convert_type;
421 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
422 atomic_type result =
423 ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
424 return ToObject<T>(isolate, FromAtomic<T>(result));
425 }
426
427 } // anonymous namespace
428
429 // Duplicated from objects.h
430 // V has parameters (Type, type, TYPE, C type, element_size)
431 #define INTEGER_TYPED_ARRAYS(V) \
432 V(Uint8, uint8, UINT8, uint8_t, 1) \
433 V(Int8, int8, INT8, int8_t, 1) \
434 V(Uint16, uint16, UINT16, uint16_t, 2) \
435 V(Int16, int16, INT16, int16_t, 2) \
436 V(Uint32, uint32, UINT32, uint32_t, 4) \
437 V(Int32, int32, INT32, int32_t, 4) \
438 V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
439
440
441 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
442 HandleScope scope(isolate);
443 DCHECK(args.length() == 4);
444 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
445 CONVERT_SIZE_ARG_CHECKED(index, 1);
446 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
447 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
448 DCHECK(sta->GetBuffer()->is_shared());
449 DCHECK(index < NumberToSize(isolate, sta->length()));
450
451 void* buffer = sta->GetBuffer()->backing_store();
452
453 switch (sta->type()) {
454 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
455 case kExternal##Type##Array: \
456 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
457
458 TYPED_ARRAYS(TYPED_ARRAY_CASE)
459 #undef TYPED_ARRAY_CASE
460
461 default:
462 break;
463 }
464
465 UNREACHABLE();
466 return isolate->heap()->undefined_value();
467 }
468
469
470 RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
471 HandleScope scope(isolate);
472 DCHECK(args.length() == 2);
473 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
474 CONVERT_SIZE_ARG_CHECKED(index, 1);
475 DCHECK(sta->GetBuffer()->is_shared());
476 DCHECK(index < NumberToSize(isolate, sta->length()));
477
478 void* buffer = sta->GetBuffer()->backing_store();
479
480 switch (sta->type()) {
481 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
482 case kExternal##Type##Array: \
483 return DoLoad<ctype>(isolate, buffer, index);
484
485 TYPED_ARRAYS(TYPED_ARRAY_CASE)
486 #undef TYPED_ARRAY_CASE
487
488 default:
489 break;
490 }
491
492 UNREACHABLE();
493 return isolate->heap()->undefined_value();
494 }
495
496
497 RUNTIME_FUNCTION(Runtime_AtomicsStore) {
498 HandleScope scope(isolate);
499 DCHECK(args.length() == 3);
500 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
501 CONVERT_SIZE_ARG_CHECKED(index, 1);
502 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
503 DCHECK(sta->GetBuffer()->is_shared());
504 DCHECK(index < NumberToSize(isolate, sta->length()));
505
506 void* buffer = sta->GetBuffer()->backing_store();
507
508 switch (sta->type()) {
509 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
510 case kExternal##Type##Array: \
511 return DoStore<ctype>(isolate, buffer, index, value);
512
513 TYPED_ARRAYS(TYPED_ARRAY_CASE)
514 #undef TYPED_ARRAY_CASE
515
516 default:
517 break;
518 }
519
520 UNREACHABLE();
521 return isolate->heap()->undefined_value();
522 }
523
524
525 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
526 HandleScope scope(isolate);
527 DCHECK(args.length() == 3);
528 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
529 CONVERT_SIZE_ARG_CHECKED(index, 1);
530 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
531 DCHECK(sta->GetBuffer()->is_shared());
532 DCHECK(index < NumberToSize(isolate, sta->length()));
533
534 void* buffer = sta->GetBuffer()->backing_store();
535
536 switch (sta->type()) {
537 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
538 case kExternal##Type##Array: \
539 return DoAdd<ctype>(isolate, buffer, index, value);
540
541 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
542 #undef TYPED_ARRAY_CASE
543
544 case kExternalFloat32Array:
545 case kExternalFloat64Array:
546 default:
547 break;
548 }
549
550 UNREACHABLE();
551 return isolate->heap()->undefined_value();
552 }
553
554
555 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
556 HandleScope scope(isolate);
557 DCHECK(args.length() == 3);
558 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
559 CONVERT_SIZE_ARG_CHECKED(index, 1);
560 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
561 DCHECK(sta->GetBuffer()->is_shared());
562 DCHECK(index < NumberToSize(isolate, sta->length()));
563
564 void* buffer = sta->GetBuffer()->backing_store();
565
566 switch (sta->type()) {
567 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
568 case kExternal##Type##Array: \
569 return DoSub<ctype>(isolate, buffer, index, value);
570
571 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
572 #undef TYPED_ARRAY_CASE
573
574 case kExternalFloat32Array:
575 case kExternalFloat64Array:
576 default:
577 break;
578 }
579
580 UNREACHABLE();
581 return isolate->heap()->undefined_value();
582 }
583
584
585 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
586 HandleScope scope(isolate);
587 DCHECK(args.length() == 3);
588 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
589 CONVERT_SIZE_ARG_CHECKED(index, 1);
590 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
591 DCHECK(sta->GetBuffer()->is_shared());
592 DCHECK(index < NumberToSize(isolate, sta->length()));
593
594 void* buffer = sta->GetBuffer()->backing_store();
595
596 switch (sta->type()) {
597 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
598 case kExternal##Type##Array: \
599 return DoAnd<ctype>(isolate, buffer, index, value);
600
601 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
602 #undef TYPED_ARRAY_CASE
603
604 case kExternalFloat32Array:
605 case kExternalFloat64Array:
606 default:
607 break;
608 }
609
610 UNREACHABLE();
611 return isolate->heap()->undefined_value();
612 }
613
614
615 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
616 HandleScope scope(isolate);
617 DCHECK(args.length() == 3);
618 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
619 CONVERT_SIZE_ARG_CHECKED(index, 1);
620 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
621 DCHECK(sta->GetBuffer()->is_shared());
622 DCHECK(index < NumberToSize(isolate, sta->length()));
623
624 void* buffer = sta->GetBuffer()->backing_store();
625
626 switch (sta->type()) {
627 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
628 case kExternal##Type##Array: \
629 return DoOr<ctype>(isolate, buffer, index, value);
630
631 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
632 #undef TYPED_ARRAY_CASE
633
634 case kExternalFloat32Array:
635 case kExternalFloat64Array:
636 default:
637 break;
638 }
639
640 UNREACHABLE();
641 return isolate->heap()->undefined_value();
642 }
643
644
645 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
646 HandleScope scope(isolate);
647 DCHECK(args.length() == 3);
648 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
649 CONVERT_SIZE_ARG_CHECKED(index, 1);
650 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
651 DCHECK(sta->GetBuffer()->is_shared());
652 DCHECK(index < NumberToSize(isolate, sta->length()));
653
654 void* buffer = sta->GetBuffer()->backing_store();
655
656 switch (sta->type()) {
657 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
658 case kExternal##Type##Array: \
659 return DoXor<ctype>(isolate, buffer, index, value);
660
661 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
662 #undef TYPED_ARRAY_CASE
663
664 case kExternalFloat32Array:
665 case kExternalFloat64Array:
666 default:
667 break;
668 }
669
670 UNREACHABLE();
671 return isolate->heap()->undefined_value();
672 }
673
674
675 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
676 HandleScope scope(isolate);
677 DCHECK(args.length() == 3);
678 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
679 CONVERT_SIZE_ARG_CHECKED(index, 1);
680 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
681 DCHECK(sta->GetBuffer()->is_shared());
682 DCHECK(index < NumberToSize(isolate, sta->length()));
683
684 void* buffer = sta->GetBuffer()->backing_store();
685
686 switch (sta->type()) {
687 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
688 case kExternal##Type##Array: \
689 return DoExchange<ctype>(isolate, buffer, index, value);
690
691 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
692 #undef TYPED_ARRAY_CASE
693
694 case kExternalFloat32Array:
695 case kExternalFloat64Array:
696 default:
697 break;
698 }
699
700 UNREACHABLE();
701 return isolate->heap()->undefined_value();
702 }
703
704
705 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
706 HandleScope scope(isolate);
707 DCHECK(args.length() == 1);
708 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
709 uint32_t usize = NumberToUint32(*size);
710
711 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value()
712 : isolate->heap()->false_value();
713 }
714 }
715 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/runtime/runtime.h ('k') | src/runtime/runtime-typedarray.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698