OLD | NEW |
---|---|
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/runtime/runtime-utils.h" | 5 #include "src/runtime/runtime-utils.h" |
6 | 6 |
7 #include "src/arguments.h" | 7 #include "src/arguments.h" |
8 #include "src/base/macros.h" | 8 #include "src/base/macros.h" |
9 #include "src/base/platform/mutex.h" | 9 #include "src/base/platform/mutex.h" |
10 #include "src/conversions-inl.h" | 10 #include "src/conversions-inl.h" |
11 #include "src/factory.h" | 11 #include "src/factory.h" |
12 | 12 |
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the | 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the |
14 // SharedArrayBuffer draft spec, found here | 14 // SharedArrayBuffer draft spec, found here |
15 // https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77P Fk | 15 // https://github.com/lars-t-hansen/ecmascript_sharedmem |
16 | 16 |
17 namespace v8 { | 17 namespace v8 { |
18 namespace internal { | 18 namespace internal { |
19 | 19 |
20 namespace { | 20 namespace { |
21 | 21 |
22 #if V8_CC_GNU | 22 #if V8_CC_GNU |
23 | 23 |
24 template <typename T> | 24 template <typename T> |
25 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | 25 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
63 template <typename T> | 63 template <typename T> |
64 inline T XorSeqCst(T* p, T value) { | 64 inline T XorSeqCst(T* p, T value) { |
65 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 65 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
66 } | 66 } |
67 | 67 |
68 template <typename T> | 68 template <typename T> |
69 inline T ExchangeSeqCst(T* p, T value) { | 69 inline T ExchangeSeqCst(T* p, T value) { |
70 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | 70 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
71 } | 71 } |
72 | 72 |
73 #if ATOMICS_REQUIRE_LOCK_64_BIT | |
74 | |
75 // We only need to implement the following functions, because the rest of the | |
76 // atomic operations only work on integer types, and the only 64-bit type is | |
77 // float64. Similarly, because the values are being bit_cast from double -> | |
78 // uint64_t, we don't need to implement these functions for int64_t either. | |
79 | |
80 static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER; | |
81 | |
82 inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval, | |
83 uint64_t newval) { | |
84 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
85 uint64_t result = *p; | |
86 if (result == oldval) *p = newval; | |
87 return result; | |
88 } | |
89 | |
90 | |
91 inline uint64_t LoadSeqCst(uint64_t* p) { | |
92 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
93 return *p; | |
94 } | |
95 | |
96 | |
97 inline void StoreSeqCst(uint64_t* p, uint64_t value) { | |
98 base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer()); | |
99 *p = value; | |
100 } | |
101 | |
102 #endif // ATOMICS_REQUIRE_LOCK_64_BIT | |
103 | |
104 #elif V8_CC_MSVC | 73 #elif V8_CC_MSVC |
105 | 74 |
106 #define InterlockedCompareExchange32 _InterlockedCompareExchange | 75 #define InterlockedCompareExchange32 _InterlockedCompareExchange |
107 #define InterlockedExchange32 _InterlockedExchange | 76 #define InterlockedExchange32 _InterlockedExchange |
108 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 77 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
109 #define InterlockedAnd32 _InterlockedAnd | 78 #define InterlockedAnd32 _InterlockedAnd |
110 #define InterlockedOr32 _InterlockedOr | 79 #define InterlockedOr32 _InterlockedOr |
111 #define InterlockedXor32 _InterlockedXor | 80 #define InterlockedXor32 _InterlockedXor |
112 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 81 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
113 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | 82 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
114 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 83 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
115 | 84 |
116 #define ATOMIC_OPS_INTEGER(type, suffix, vctype) \ | 85 #define ATOMIC_OPS(type, suffix, vctype) \ |
117 inline type AddSeqCst(type* p, type value) { \ | 86 inline type AddSeqCst(type* p, type value) { \ |
118 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 87 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
119 bit_cast<vctype>(value)); \ | 88 bit_cast<vctype>(value)); \ |
120 } \ | 89 } \ |
121 inline type SubSeqCst(type* p, type value) { \ | 90 inline type SubSeqCst(type* p, type value) { \ |
122 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
123 -bit_cast<vctype>(value)); \ | 92 -bit_cast<vctype>(value)); \ |
124 } \ | 93 } \ |
125 inline type AndSeqCst(type* p, type value) { \ | 94 inline type AndSeqCst(type* p, type value) { \ |
126 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 95 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
127 bit_cast<vctype>(value)); \ | 96 bit_cast<vctype>(value)); \ |
128 } \ | 97 } \ |
129 inline type OrSeqCst(type* p, type value) { \ | 98 inline type OrSeqCst(type* p, type value) { \ |
130 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 99 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
131 bit_cast<vctype>(value)); \ | 100 bit_cast<vctype>(value)); \ |
132 } \ | 101 } \ |
133 inline type XorSeqCst(type* p, type value) { \ | 102 inline type XorSeqCst(type* p, type value) { \ |
134 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 103 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
135 bit_cast<vctype>(value)); \ | 104 bit_cast<vctype>(value)); \ |
136 } \ | 105 } \ |
137 inline type ExchangeSeqCst(type* p, type value) { \ | 106 inline type ExchangeSeqCst(type* p, type value) { \ |
138 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 107 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
139 bit_cast<vctype>(value)); \ | 108 bit_cast<vctype>(value)); \ |
140 } | 109 } \ |
141 | 110 \ |
142 #define ATOMIC_OPS_FLOAT(type, suffix, vctype) \ | |
143 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | 111 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
144 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | 112 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
145 bit_cast<vctype>(newval), \ | 113 bit_cast<vctype>(newval), \ |
146 bit_cast<vctype>(oldval)); \ | 114 bit_cast<vctype>(oldval)); \ |
147 } \ | 115 } \ |
148 inline type LoadSeqCst(type* p) { return *p; } \ | 116 inline type LoadSeqCst(type* p) { return *p; } \ |
149 inline void StoreSeqCst(type* p, type value) { \ | 117 inline void StoreSeqCst(type* p, type value) { \ |
150 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | 118 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
151 bit_cast<vctype>(value)); \ | 119 bit_cast<vctype>(value)); \ |
152 } | 120 } |
153 | 121 |
154 #define ATOMIC_OPS(type, suffix, vctype) \ | |
155 ATOMIC_OPS_INTEGER(type, suffix, vctype) \ | |
156 ATOMIC_OPS_FLOAT(type, suffix, vctype) | |
157 | |
158 ATOMIC_OPS(int8_t, 8, char) | 122 ATOMIC_OPS(int8_t, 8, char) |
159 ATOMIC_OPS(uint8_t, 8, char) | 123 ATOMIC_OPS(uint8_t, 8, char) |
160 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 124 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
161 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 125 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
162 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 126 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
163 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 127 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
164 ATOMIC_OPS_FLOAT(uint64_t, 64, LONGLONG) | |
165 | 128 |
166 #undef ATOMIC_OPS_INTEGER | 129 #undef ATOMIC_OPS_INTEGER |
167 #undef ATOMIC_OPS_FLOAT | |
168 #undef ATOMIC_OPS | 130 #undef ATOMIC_OPS |
169 | 131 |
170 #undef InterlockedCompareExchange32 | 132 #undef InterlockedCompareExchange32 |
171 #undef InterlockedExchange32 | 133 #undef InterlockedExchange32 |
172 #undef InterlockedExchangeAdd32 | 134 #undef InterlockedExchangeAdd32 |
173 #undef InterlockedAnd32 | 135 #undef InterlockedAnd32 |
174 #undef InterlockedOr32 | 136 #undef InterlockedOr32 |
175 #undef InterlockedXor32 | 137 #undef InterlockedXor32 |
176 #undef InterlockedExchangeAdd16 | 138 #undef InterlockedExchangeAdd16 |
177 #undef InterlockedCompareExchange8 | 139 #undef InterlockedCompareExchange8 |
(...skipping 11 matching lines...) Expand all Loading... | |
189 template <> | 151 template <> |
190 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | 152 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { |
191 return NumberToUint32(*number); | 153 return NumberToUint32(*number); |
192 } | 154 } |
193 | 155 |
194 template <> | 156 template <> |
195 inline int32_t FromObject<int32_t>(Handle<Object> number) { | 157 inline int32_t FromObject<int32_t>(Handle<Object> number) { |
196 return NumberToInt32(*number); | 158 return NumberToInt32(*number); |
197 } | 159 } |
198 | 160 |
199 template <> | |
200 inline float FromObject<float>(Handle<Object> number) { | |
201 return static_cast<float>(number->Number()); | |
202 } | |
203 | |
204 template <> | |
205 inline double FromObject<double>(Handle<Object> number) { | |
206 return number->Number(); | |
207 } | |
208 | |
209 template <typename T, typename F> | 161 template <typename T, typename F> |
210 inline T ToAtomic(F from) { | 162 inline T ToAtomic(F from) { |
211 return static_cast<T>(from); | 163 return static_cast<T>(from); |
212 } | 164 } |
213 | 165 |
214 template <> | |
215 inline uint32_t ToAtomic<uint32_t, float>(float from) { | |
216 return bit_cast<uint32_t, float>(from); | |
217 } | |
218 | |
219 template <> | |
220 inline uint64_t ToAtomic<uint64_t, double>(double from) { | |
221 return bit_cast<uint64_t, double>(from); | |
222 } | |
223 | |
224 template <typename T, typename F> | 166 template <typename T, typename F> |
225 inline T FromAtomic(F from) { | 167 inline T FromAtomic(F from) { |
226 return static_cast<T>(from); | 168 return static_cast<T>(from); |
227 } | 169 } |
228 | 170 |
229 template <> | |
230 inline float FromAtomic<float, uint32_t>(uint32_t from) { | |
231 return bit_cast<float, uint32_t>(from); | |
232 } | |
233 | |
234 template <> | |
235 inline double FromAtomic<double, uint64_t>(uint64_t from) { | |
236 return bit_cast<double, uint64_t>(from); | |
237 } | |
238 | |
239 template <typename T> | 171 template <typename T> |
240 inline Object* ToObject(Isolate* isolate, T t); | 172 inline Object* ToObject(Isolate* isolate, T t); |
241 | 173 |
242 template <> | 174 template <> |
243 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { | 175 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) { |
244 return Smi::FromInt(t); | 176 return Smi::FromInt(t); |
245 } | 177 } |
246 | 178 |
247 template <> | 179 template <> |
248 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { | 180 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) { |
(...skipping 13 matching lines...) Expand all Loading... | |
262 template <> | 194 template <> |
263 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { | 195 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) { |
264 return *isolate->factory()->NewNumber(t); | 196 return *isolate->factory()->NewNumber(t); |
265 } | 197 } |
266 | 198 |
267 template <> | 199 template <> |
268 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { | 200 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) { |
269 return *isolate->factory()->NewNumber(t); | 201 return *isolate->factory()->NewNumber(t); |
270 } | 202 } |
271 | 203 |
272 template <> | |
273 inline Object* ToObject<float>(Isolate* isolate, float t) { | |
274 return *isolate->factory()->NewNumber(t); | |
275 } | |
276 | |
277 template <> | |
278 inline Object* ToObject<double>(Isolate* isolate, double t) { | |
279 return *isolate->factory()->NewNumber(t); | |
280 } | |
281 | |
282 template <typename T> | 204 template <typename T> |
283 struct FromObjectTraits {}; | 205 struct FromObjectTraits {}; |
284 | 206 |
285 template <> | 207 template <> |
286 struct FromObjectTraits<int8_t> { | 208 struct FromObjectTraits<int8_t> { |
287 typedef int32_t convert_type; | 209 typedef int32_t convert_type; |
288 typedef int8_t atomic_type; | 210 typedef int8_t atomic_type; |
289 }; | 211 }; |
290 | 212 |
291 template <> | 213 template <> |
(...skipping 19 matching lines...) Expand all Loading... | |
311 typedef int32_t convert_type; | 233 typedef int32_t convert_type; |
312 typedef int32_t atomic_type; | 234 typedef int32_t atomic_type; |
313 }; | 235 }; |
314 | 236 |
315 template <> | 237 template <> |
316 struct FromObjectTraits<uint32_t> { | 238 struct FromObjectTraits<uint32_t> { |
317 typedef uint32_t convert_type; | 239 typedef uint32_t convert_type; |
318 typedef uint32_t atomic_type; | 240 typedef uint32_t atomic_type; |
319 }; | 241 }; |
320 | 242 |
321 template <> | |
322 struct FromObjectTraits<float> { | |
323 typedef float convert_type; | |
324 typedef uint32_t atomic_type; | |
325 }; | |
326 | |
327 template <> | |
328 struct FromObjectTraits<double> { | |
329 typedef double convert_type; | |
330 typedef uint64_t atomic_type; | |
331 }; | |
332 | |
333 | 243 |
334 template <typename T> | 244 template <typename T> |
335 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | 245 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
336 Handle<Object> oldobj, Handle<Object> newobj) { | 246 Handle<Object> oldobj, Handle<Object> newobj) { |
337 typedef typename FromObjectTraits<T>::atomic_type atomic_type; | 247 typedef typename FromObjectTraits<T>::atomic_type atomic_type; |
338 typedef typename FromObjectTraits<T>::convert_type convert_type; | 248 typedef typename FromObjectTraits<T>::convert_type convert_type; |
339 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); | 249 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj)); |
340 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); | 250 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj)); |
341 atomic_type result = CompareExchangeSeqCst( | 251 atomic_type result = CompareExchangeSeqCst( |
342 static_cast<atomic_type*>(buffer) + index, oldval, newval); | 252 static_cast<atomic_type*>(buffer) + index, oldval, newval); |
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
533 void* buffer = sta->GetBuffer()->backing_store(); | 443 void* buffer = sta->GetBuffer()->backing_store(); |
534 | 444 |
535 switch (sta->type()) { | 445 switch (sta->type()) { |
536 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 446 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
537 case kExternal##Type##Array: \ | 447 case kExternal##Type##Array: \ |
538 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); | 448 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj); |
539 | 449 |
540 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 450 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
541 #undef TYPED_ARRAY_CASE | 451 #undef TYPED_ARRAY_CASE |
542 | 452 |
543 case kExternalFloat32Array: | |
544 return DoCompareExchange<float>(isolate, buffer, index, oldobj, newobj); | |
545 | |
546 case kExternalFloat64Array: | |
547 return DoCompareExchange<double>(isolate, buffer, index, oldobj, newobj); | |
548 | |
549 case kExternalUint8ClampedArray: | 453 case kExternalUint8ClampedArray: |
550 return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj, | 454 return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj, |
551 newobj); | 455 newobj); |
552 | 456 |
457 case kExternalFloat32Array: | |
458 case kExternalFloat64Array: | |
553 default: | 459 default: |
554 break; | 460 break; |
555 } | 461 } |
556 | 462 |
557 UNREACHABLE(); | 463 UNREACHABLE(); |
558 return isolate->heap()->undefined_value(); | 464 return isolate->heap()->undefined_value(); |
559 } | 465 } |
560 | 466 |
561 | 467 |
562 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { | 468 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { |
563 HandleScope scope(isolate); | 469 HandleScope scope(isolate); |
564 DCHECK(args.length() == 2); | 470 DCHECK(args.length() == 2); |
565 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 471 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
566 CONVERT_SIZE_ARG_CHECKED(index, 1); | 472 CONVERT_SIZE_ARG_CHECKED(index, 1); |
567 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 473 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
568 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 474 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
569 | 475 |
570 void* buffer = sta->GetBuffer()->backing_store(); | 476 void* buffer = sta->GetBuffer()->backing_store(); |
571 | 477 |
572 switch (sta->type()) { | 478 switch (sta->type()) { |
573 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 479 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
574 case kExternal##Type##Array: \ | 480 case kExternal##Type##Array: \ |
575 return DoLoad<ctype>(isolate, buffer, index); | 481 return DoLoad<ctype>(isolate, buffer, index); |
576 | 482 |
577 TYPED_ARRAYS(TYPED_ARRAY_CASE) | 483 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
578 #undef TYPED_ARRAY_CASE | 484 #undef TYPED_ARRAY_CASE |
579 | 485 |
486 case kExternalUint8ClampedArray: | |
487 return DoLoad<uint8_t>(isolate, buffer, index); | |
488 | |
489 case kExternalFloat32Array: | |
490 case kExternalFloat64Array: | |
Jarin
2015/09/03 08:23:32
Does it really make sense to mention the float cas
binji
2015/09/16 17:15:44
Removed.
| |
580 default: | 491 default: |
581 break; | 492 break; |
582 } | 493 } |
583 | 494 |
584 UNREACHABLE(); | 495 UNREACHABLE(); |
585 return isolate->heap()->undefined_value(); | 496 return isolate->heap()->undefined_value(); |
586 } | 497 } |
587 | 498 |
588 | 499 |
589 RUNTIME_FUNCTION(Runtime_AtomicsStore) { | 500 RUNTIME_FUNCTION(Runtime_AtomicsStore) { |
590 HandleScope scope(isolate); | 501 HandleScope scope(isolate); |
591 DCHECK(args.length() == 3); | 502 DCHECK(args.length() == 3); |
592 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); | 503 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); |
593 CONVERT_SIZE_ARG_CHECKED(index, 1); | 504 CONVERT_SIZE_ARG_CHECKED(index, 1); |
594 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); | 505 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); |
595 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); | 506 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); |
596 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); | 507 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); |
597 | 508 |
598 void* buffer = sta->GetBuffer()->backing_store(); | 509 void* buffer = sta->GetBuffer()->backing_store(); |
599 | 510 |
600 switch (sta->type()) { | 511 switch (sta->type()) { |
601 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | 512 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
602 case kExternal##Type##Array: \ | 513 case kExternal##Type##Array: \ |
603 return DoStore<ctype>(isolate, buffer, index, value); | 514 return DoStore<ctype>(isolate, buffer, index, value); |
604 | 515 |
605 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 516 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
606 #undef TYPED_ARRAY_CASE | 517 #undef TYPED_ARRAY_CASE |
607 | 518 |
608 case kExternalFloat32Array: | |
609 return DoStore<float>(isolate, buffer, index, value); | |
610 | |
611 case kExternalFloat64Array: | |
612 return DoStore<double>(isolate, buffer, index, value); | |
613 | |
614 case kExternalUint8ClampedArray: | 519 case kExternalUint8ClampedArray: |
615 return DoStoreUint8Clamped(isolate, buffer, index, value); | 520 return DoStoreUint8Clamped(isolate, buffer, index, value); |
616 | 521 |
522 case kExternalFloat32Array: | |
523 case kExternalFloat64Array: | |
617 default: | 524 default: |
618 break; | 525 break; |
619 } | 526 } |
620 | 527 |
621 UNREACHABLE(); | 528 UNREACHABLE(); |
622 return isolate->heap()->undefined_value(); | 529 return isolate->heap()->undefined_value(); |
623 } | 530 } |
624 | 531 |
625 | 532 |
626 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { | 533 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
825 HandleScope scope(isolate); | 732 HandleScope scope(isolate); |
826 DCHECK(args.length() == 1); | 733 DCHECK(args.length() == 1); |
827 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); | 734 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
828 uint32_t usize = NumberToUint32(*size); | 735 uint32_t usize = NumberToUint32(*size); |
829 | 736 |
830 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value() | 737 return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value() |
831 : isolate->heap()->false_value(); | 738 : isolate->heap()->false_value(); |
832 } | 739 } |
833 } | 740 } |
834 } // namespace v8::internal | 741 } // namespace v8::internal |
OLD | NEW |