OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/arguments.h" | |
8 #include "src/base/macros.h" | |
9 #include "src/base/platform/mutex.h" | |
10 #include "src/conversions.h" | |
11 #include "src/runtime/runtime-utils.h" | |
12 | |
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the | |
14 // SharedArrayBuffer draft spec, found here | |
15 // https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77P Fk | |
16 | |
17 namespace v8 { | |
18 namespace internal { | |
19 | |
20 namespace { | |
21 | |
22 #if V8_CC_GNU | |
23 | |
24 template <typename T> | |
25 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
26 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
27 __ATOMIC_SEQ_CST); | |
28 return oldval; | |
29 } | |
30 | |
31 template <typename T> | |
32 inline T LoadSeqCst(T* p) { | |
33 T result; | |
34 __atomic_load(p, &result, __ATOMIC_SEQ_CST); | |
35 return result; | |
36 } | |
37 | |
38 template <typename T> | |
39 inline void StoreSeqCst(T* p, T value) { | |
40 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); | |
41 } | |
42 | |
43 template <typename T> | |
44 inline T AddSeqCst(T* p, T value) { | |
45 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | |
46 } | |
47 | |
48 template <typename T> | |
49 inline T SubSeqCst(T* p, T value) { | |
50 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | |
51 } | |
52 | |
53 template <typename T> | |
54 inline T AndSeqCst(T* p, T value) { | |
55 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | |
56 } | |
57 | |
58 template <typename T> | |
59 inline T OrSeqCst(T* p, T value) { | |
60 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | |
61 } | |
62 | |
63 template <typename T> | |
64 inline T XorSeqCst(T* p, T value) { | |
65 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | |
66 } | |
67 | |
68 template <typename T> | |
69 inline T ExchangeSeqCst(T* p, T value) { | |
70 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
71 } | |
72 | |
73 #if ATOMICS_REQUIRE_LOCK_64_BIT | |
74 | |
75 // We only need to implement the following functions, because the rest of the | |
76 // atomic operations only work on integer types, and the only 64-bit type is | |
77 // float64. Similarly, because the values are being bit_cast from double -> | |
78 // uint64_t, we don't need to implement these functions for int64_t either. | |
79 | |
80 static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER; | |
81 | |
82 inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval, | |
83 uint64_t newval) { | |
84 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
85 uint64_t result = *p; | |
86 if (result == oldval) *p = newval; | |
87 return result; | |
88 } | |
89 | |
90 | |
91 inline uint64_t LoadSeqCst(uint64_t* p) { | |
92 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
93 return *p; | |
94 } | |
95 | |
96 | |
97 inline void StoreSeqCst(uint64_t* p, uint64_t value) { | |
98 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
99 *p = value; | |
100 } | |
101 | |
102 #endif // ATOMICS_REQUIRE_LOCK_64_BIT | |
103 | |
104 #elif V8_CC_MSVC | |
105 | |
106 #define _InterlockedCompareExchange32 _InterlockedCompareExchange | |
107 #define _InterlockedExchange32 _InterlockedExchange | |
108 #define _InterlockedExchangeAdd32 _InterlockedExchangeAdd | |
109 #define _InterlockedAnd32 _InterlockedAnd | |
110 #define _InterlockedOr32 _InterlockedOr | |
111 #define _InterlockedXor32 _InterlockedXor | |
112 | |
113 #define INTEGER_TYPES(V) \ | |
114 V(int8_t, 8, char) \ | |
115 V(uint8_t, 8, char) \ | |
116 V(int16_t, 16, short) /* NOLINT(runtime/int) */ \ | |
117 V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \ | |
118 V(int32_t, 32, long) /* NOLINT(runtime/int) */ \ | |
119 V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \ | |
120 V(int64_t, 64, LONGLONG) \ | |
121 V(uint64_t, 64, LONGLONG) | |
122 | |
123 #define ATOMIC_OPS(type, suffix, vctype) \ | |
124 inline type CompareExchangeSeqCst(volatile type* p, type oldval, \ | |
125 type newval) { \ | |
126 return _InterlockedCompareExchange##suffix( \ | |
127 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \ | |
128 bit_cast<vctype>(oldval)); \ | |
129 } \ | |
130 inline type LoadSeqCst(volatile type* p) { return *p; } \ | |
131 inline void StoreSeqCst(volatile type* p, type value) { \ | |
132 _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
133 bit_cast<vctype>(value)); \ | |
134 } \ | |
135 inline type AddSeqCst(volatile type* p, type value) { \ | |
136 return _InterlockedExchangeAdd##suffix( \ | |
137 reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \ | |
138 } \ | |
139 inline type SubSeqCst(volatile type* p, type value) { \ | |
140 return _InterlockedExchangeAdd##suffix( \ | |
141 reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \ | |
142 } \ | |
143 inline type AndSeqCst(volatile type* p, type value) { \ | |
144 return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
145 bit_cast<vctype>(value)); \ | |
146 } \ | |
147 inline type OrSeqCst(volatile type* p, type value) { \ | |
148 return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
149 bit_cast<vctype>(value)); \ | |
150 } \ | |
151 inline type XorSeqCst(volatile type* p, type value) { \ | |
152 return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
153 bit_cast<vctype>(value)); \ | |
154 } \ | |
155 inline type ExchangeSeqCst(volatile type* p, type value) { \ | |
156 return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ | |
157 bit_cast<vctype>(value)); \ | |
158 } | |
159 INTEGER_TYPES(ATOMIC_OPS) | |
160 #undef ATOMIC_OPS | |
161 | |
162 #undef INTEGER_TYPES | |
163 #undef _InterlockedCompareExchange32 | |
164 #undef _InterlockedExchange32 | |
165 #undef _InterlockedExchangeAdd32 | |
166 #undef _InterlockedAnd32 | |
167 #undef _InterlockedOr32 | |
168 #undef _InterlockedXor32 | |
169 | |
170 #else | |
171 | |
172 #error Unsupported platform! | |
173 | |
174 #endif | |
175 | |
176 } // anonymous namespace | |
Jarin
2015/06/03 13:30:15
Could not even the stuff below go into the anonymo
binji
2015/06/03 20:16:32
Done.
| |
177 | |
178 | |
179 template <typename T> | |
180 T FromObject(Handle<Object> number); | |
181 | |
182 template <> | |
183 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | |
184 return NumberToUint32(*number); | |
185 } | |
186 | |
187 template <> | |
188 inline int32_t FromObject<int32_t>(Handle<Object> number) { | |
189 return NumberToInt32(*number); | |
190 } | |
191 | |
192 template <> | |
193 inline float FromObject<float>(Handle<Object> number) { | |
194 return static_cast<float>(number->Number()); | |
195 } | |
196 | |
197 template <> | |
198 inline double FromObject<double>(Handle<Object> number) { | |
199 return number->Number(); | |
200 } | |
201 | |
202 template <typename T, typename F> | |
203 inline T ToAtomic(F from) { | |
204 return static_cast<T>(from); | |
205 } | |
206 | |
207 template <> | |
208 inline uint32_t ToAtomic<uint32_t, float>(float from) { | |
209 return bit_cast<uint32_t, float>(from); | |
210 } | |
211 | |
212 template <> | |
213 inline uint64_t ToAtomic<uint64_t, double>(double from) { | |
214 return bit_cast<uint64_t, double>(from); | |
215 } | |
216 | |
217 template <typename T, typename F> | |
218 inline T FromAtomic(F from) { | |
219 return static_cast<T>(from); | |
220 } | |
221 | |
222 template <> | |
223 inline float FromAtomic<float, uint32_t>(uint32_t from) { | |
224 return bit_cast<float, uint32_t>(from); | |
225 } | |
226 | |
227 template <> | |
228 inline double FromAtomic<double, uint64_t>(uint64_t from) { | |
229 return bit_cast<double, uint64_t>(from); | |
230 } | |
231 | |
232 template <typename T> | |
233 inline Object* ToObject(Isolate* isolate, T t); | |
234 | |
235 template <> | |
236 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { | |
237 return Smi::FromInt(t); | |
238 } | |
239 | |
240 template <> | |
241 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { | |
242 return Smi::FromInt(t); | |
243 } | |
244 | |
245 template <> | |
246 inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) { | |
247 return Smi::FromInt(t); | |
248 } | |
249 | |
250 template <> | |
251 inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) { | |
252 return Smi::FromInt(t); | |
253 } | |
254 | |
255 template <> | |
256 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { | |
257 return *isolate->factory()->NewNumber(t); | |
258 } | |
259 | |
260 template <> | |
261 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { | |
262 return *isolate->factory()->NewNumber(t); | |
263 } | |
264 | |
265 template <> | |
266 inline Object* ToObject<float>(Isolate* isolate, float t) { | |
267 return *isolate->factory()->NewNumber(t); | |
268 } | |
269 | |
270 template <> | |
271 inline Object* ToObject<double>(Isolate* isolate, double t) { | |
272 return *isolate->factory()->NewNumber(t); | |
273 } | |
274 | |
275 template <typename T> | |
276 struct FromObjectTraits {}; | |
277 | |
278 template <> | |
279 struct FromObjectTraits<int8_t> { | |
280 typedef int32_t convert_type; | |
281 typedef int8_t atomic_type; | |
282 }; | |
283 | |
284 template <> | |
285 struct FromObjectTraits<uint8_t> { | |
286 typedef uint32_t convert_type; | |
287 typedef uint8_t atomic_type; | |
288 }; | |
289 | |
290 template <> | |
291 struct FromObjectTraits<int16_t> { | |
292 typedef int32_t convert_type; | |
293 typedef int16_t atomic_type; | |
294 }; | |
295 | |
296 template <> | |
297 struct FromObjectTraits<uint16_t> { | |
298 typedef uint32_t convert_type; | |
299 typedef uint16_t atomic_type; | |
300 }; | |
301 | |
302 template <> | |
303 struct FromObjectTraits<int32_t> { | |
304 typedef int32_t convert_type; | |
305 typedef int32_t atomic_type; | |
306 }; | |
307 | |
308 template <> | |
309 struct FromObjectTraits<uint32_t> { | |
310 typedef uint32_t convert_type; | |
311 typedef uint32_t atomic_type; | |
312 }; | |
313 | |
314 template <> | |
315 struct FromObjectTraits<float> { | |
316 typedef float convert_type; | |
317 typedef uint32_t atomic_type; | |
318 }; | |
319 | |
320 template <> | |
321 struct FromObjectTraits<double> { | |
322 typedef double convert_type; | |
323 typedef uint64_t atomic_type; | |
324 }; | |
325 | |
326 | |
327 template <typename T> | |
328 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
329 Handle<Object> oldobj, Handle<Object> newobj) { | |
330 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
331 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
332 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); | |
333 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); | |
334 atomic_type result = CompareExchangeSeqCst( | |
335 static_cast<atomic_type*>(buffer) + index, oldval, newval); | |
336 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
337 } | |
338 | |
339 | |
340 template <typename T> | |
341 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { | |
342 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
343 atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index); | |
344 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
345 } | |
346 | |
347 | |
348 template <typename T> | |
349 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, | |
350 Handle<Object> obj) { | |
351 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
352 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
353 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
354 StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
355 return *obj; | |
356 } | |
357 | |
358 | |
359 template <typename T> | |
360 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | |
361 Handle<Object> obj) { | |
362 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
363 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
364 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
365 atomic_type result = | |
366 AddSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
367 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
368 } | |
369 | |
370 | |
371 template <typename T> | |
372 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | |
373 Handle<Object> obj) { | |
374 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
375 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
376 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
377 atomic_type result = | |
378 SubSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
379 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
380 } | |
381 | |
382 | |
383 template <typename T> | |
384 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, | |
385 Handle<Object> obj) { | |
386 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
387 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
388 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
389 atomic_type result = | |
390 AndSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
391 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
392 } | |
393 | |
394 | |
395 template <typename T> | |
396 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, | |
397 Handle<Object> obj) { | |
398 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
399 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
400 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
401 atomic_type result = | |
402 OrSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
403 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
404 } | |
405 | |
406 | |
407 template <typename T> | |
408 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | |
409 Handle<Object> obj) { | |
410 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
411 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
412 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
413 atomic_type result = | |
414 XorSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
415 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
416 } | |
417 | |
418 | |
419 template <typename T> | |
420 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
421 Handle<Object> obj) { | |
422 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | |
423 typedef typename FromObjectTraits<T>::convert_type convert_type; | |
424 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj)); | |
425 atomic_type result = | |
426 ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value); | |
427 return ToObject<T>(isolate, FromAtomic<T>(result)); | |
428 } | |
429 | |
430 | |
431 // Duplicated from objects.h | |
432 // V has parameters (Type, type, TYPE, C type, element_size) | |
433 #define INTEGER_TYPED_ARRAYS(V) \ | |
434 V(Uint8, uint8, UINT8, uint8_t, 1) \ | |
435 V(Int8, int8, INT8, int8_t, 1) \ | |
436 V(Uint16, uint16, UINT16, uint16_t, 2) \ | |
437 V(Int16, int16, INT16, int16_t, 2) \ | |
438 V(Uint32, uint32, UINT32, uint32_t, 4) \ | |
439 V(Int32, int32, INT32, int32_t, 4) \ | |
440 V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) | |
441 | |
442 | |
443 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { | |
444 HandleScope scope(isolate); | |
445 DCHECK(args.length() == 4); | |
446 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
447 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
448 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); | |
449 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); | |
450 DCHECK(sta->GetBuffer()->is_shared()); | |
451 DCHECK(index < NumberToSize(isolate, sta->length())); | |
452 | |
453 void* buffer = sta->GetBuffer()->backing_store(); | |
454 | |
455 switch (sta->type()) { | |
456 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
457 case kExternal##Type##Array: \ | |
458 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); | |
459 | |
460 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
461 #undef TYPED_ARRAY_CASE | |
462 | |
463 default: | |
464 break; | |
465 } | |
466 | |
467 UNREACHABLE(); | |
468 return isolate->heap()->undefined_value(); | |
469 } | |
470 | |
471 | |
472 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { | |
473 HandleScope scope(isolate); | |
474 DCHECK(args.length() == 2); | |
475 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
476 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
477 DCHECK(sta->GetBuffer()->is_shared()); | |
478 DCHECK(index < NumberToSize(isolate, sta->length())); | |
479 | |
480 void* buffer = sta->GetBuffer()->backing_store(); | |
481 | |
482 switch (sta->type()) { | |
483 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
484 case kExternal##Type##Array: \ | |
485 return DoLoad<ctype>(isolate, buffer, index); | |
486 | |
487 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
488 #undef TYPED_ARRAY_CASE | |
489 | |
490 default: | |
491 break; | |
492 } | |
493 | |
494 UNREACHABLE(); | |
495 return isolate->heap()->undefined_value(); | |
496 } | |
497 | |
498 | |
499 RUNTIME_FUNCTION(Runtime_AtomicsStore) { | |
500 HandleScope scope(isolate); | |
501 DCHECK(args.length() == 3); | |
502 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
503 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
504 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
505 DCHECK(sta->GetBuffer()->is_shared()); | |
506 DCHECK(index < NumberToSize(isolate, sta->length())); | |
507 | |
508 void* buffer = sta->GetBuffer()->backing_store(); | |
509 | |
510 switch (sta->type()) { | |
511 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
512 case kExternal##Type##Array: \ | |
513 return DoStore<ctype>(isolate, buffer, index, value); | |
514 | |
515 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
516 #undef TYPED_ARRAY_CASE | |
517 | |
518 default: | |
519 break; | |
520 } | |
521 | |
522 UNREACHABLE(); | |
523 return isolate->heap()->undefined_value(); | |
524 } | |
525 | |
526 | |
527 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { | |
528 HandleScope scope(isolate); | |
529 DCHECK(args.length() == 3); | |
530 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
531 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
532 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
533 DCHECK(sta->GetBuffer()->is_shared()); | |
534 DCHECK(index < NumberToSize(isolate, sta->length())); | |
535 | |
536 void* buffer = sta->GetBuffer()->backing_store(); | |
537 | |
538 switch (sta->type()) { | |
539 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
540 case kExternal##Type##Array: \ | |
541 return DoAdd<ctype>(isolate, buffer, index, value); | |
542 | |
543 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
544 #undef TYPED_ARRAY_CASE | |
545 | |
546 case kExternalFloat32Array: | |
547 case kExternalFloat64Array: | |
548 default: | |
549 break; | |
550 } | |
551 | |
552 UNREACHABLE(); | |
553 return isolate->heap()->undefined_value(); | |
554 } | |
555 | |
556 | |
557 RUNTIME_FUNCTION(Runtime_AtomicsSub) { | |
558 HandleScope scope(isolate); | |
559 DCHECK(args.length() == 3); | |
560 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
561 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
562 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
563 DCHECK(sta->GetBuffer()->is_shared()); | |
564 DCHECK(index < NumberToSize(isolate, sta->length())); | |
565 | |
566 void* buffer = sta->GetBuffer()->backing_store(); | |
567 | |
568 switch (sta->type()) { | |
569 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
570 case kExternal##Type##Array: \ | |
571 return DoSub<ctype>(isolate, buffer, index, value); | |
572 | |
573 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
574 #undef TYPED_ARRAY_CASE | |
575 | |
576 case kExternalFloat32Array: | |
577 case kExternalFloat64Array: | |
578 default: | |
579 break; | |
580 } | |
581 | |
582 UNREACHABLE(); | |
583 return isolate->heap()->undefined_value(); | |
584 } | |
585 | |
586 | |
587 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { | |
588 HandleScope scope(isolate); | |
589 DCHECK(args.length() == 3); | |
590 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
591 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
592 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
593 DCHECK(sta->GetBuffer()->is_shared()); | |
594 DCHECK(index < NumberToSize(isolate, sta->length())); | |
595 | |
596 void* buffer = sta->GetBuffer()->backing_store(); | |
597 | |
598 switch (sta->type()) { | |
599 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
600 case kExternal##Type##Array: \ | |
601 return DoAnd<ctype>(isolate, buffer, index, value); | |
602 | |
603 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
604 #undef TYPED_ARRAY_CASE | |
605 | |
606 case kExternalFloat32Array: | |
607 case kExternalFloat64Array: | |
608 default: | |
609 break; | |
610 } | |
611 | |
612 UNREACHABLE(); | |
613 return isolate->heap()->undefined_value(); | |
614 } | |
615 | |
616 | |
617 RUNTIME_FUNCTION(Runtime_AtomicsOr) { | |
618 HandleScope scope(isolate); | |
619 DCHECK(args.length() == 3); | |
620 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
621 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
622 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
623 DCHECK(sta->GetBuffer()->is_shared()); | |
624 DCHECK(index < NumberToSize(isolate, sta->length())); | |
625 | |
626 void* buffer = sta->GetBuffer()->backing_store(); | |
627 | |
628 switch (sta->type()) { | |
629 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
630 case kExternal##Type##Array: \ | |
631 return DoOr<ctype>(isolate, buffer, index, value); | |
632 | |
633 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
634 #undef TYPED_ARRAY_CASE | |
635 | |
636 case kExternalFloat32Array: | |
637 case kExternalFloat64Array: | |
638 default: | |
639 break; | |
640 } | |
641 | |
642 UNREACHABLE(); | |
643 return isolate->heap()->undefined_value(); | |
644 } | |
645 | |
646 | |
647 RUNTIME_FUNCTION(Runtime_AtomicsXor) { | |
648 HandleScope scope(isolate); | |
649 DCHECK(args.length() == 3); | |
650 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
651 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
652 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
653 DCHECK(sta->GetBuffer()->is_shared()); | |
654 DCHECK(index < NumberToSize(isolate, sta->length())); | |
655 | |
656 void* buffer = sta->GetBuffer()->backing_store(); | |
657 | |
658 switch (sta->type()) { | |
659 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
660 case kExternal##Type##Array: \ | |
661 return DoXor<ctype>(isolate, buffer, index, value); | |
662 | |
663 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
664 #undef TYPED_ARRAY_CASE | |
665 | |
666 case kExternalFloat32Array: | |
667 case kExternalFloat64Array: | |
668 default: | |
669 break; | |
670 } | |
671 | |
672 UNREACHABLE(); | |
673 return isolate->heap()->undefined_value(); | |
674 } | |
675 | |
676 | |
677 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { | |
678 HandleScope scope(isolate); | |
679 DCHECK(args.length() == 3); | |
680 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | |
681 CONVERT_SIZE_ARG_CHECKED(index, 1); | |
682 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | |
683 DCHECK(sta->GetBuffer()->is_shared()); | |
684 DCHECK(index < NumberToSize(isolate, sta->length())); | |
685 | |
686 void* buffer = sta->GetBuffer()->backing_store(); | |
687 | |
688 switch (sta->type()) { | |
689 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
690 case kExternal##Type##Array: \ | |
691 return DoExchange<ctype>(isolate, buffer, index, value); | |
692 | |
693 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
694 #undef TYPED_ARRAY_CASE | |
695 | |
696 case kExternalFloat32Array: | |
697 case kExternalFloat64Array: | |
698 default: | |
699 break; | |
700 } | |
701 | |
702 UNREACHABLE(); | |
703 return isolate->heap()->undefined_value(); | |
704 } | |
705 | |
706 | |
707 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { | |
708 HandleScope scope(isolate); | |
709 DCHECK(args.length() == 1); | |
710 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | |
711 uint32_t usize = NumberToUint32(*size); | |
712 | |
713 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value() | |
714 : isolate->heap()->false_value(); | |
715 } | |
716 } | |
717 } // namespace v8::internal | |
OLD | NEW |