Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(270)

Side by Side Diff: src/builtins/builtins-sharedarraybuffer.cc

Issue 2799863002: [Atomics] use TFJ builtins for atomic add, sub, and, or, and xor (Closed)
Patch Set: [Atomics] use TFJ builtins for atomic add, sub, and, or, and xor Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/base/macros.h" 5 #include "src/base/macros.h"
6 #include "src/base/platform/mutex.h" 6 #include "src/base/platform/mutex.h"
7 #include "src/base/platform/time.h" 7 #include "src/base/platform/time.h"
8 #include "src/builtins/builtins-utils.h" 8 #include "src/builtins/builtins-utils.h"
9 #include "src/builtins/builtins.h" 9 #include "src/builtins/builtins.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
162 isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); 162 isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed));
163 } 163 }
164 164
165 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); 165 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
166 size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); 166 size_t addr = (i << 2) + NumberToSize(sta->byte_offset());
167 167
168 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, 168 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32,
169 timeout_number); 169 timeout_number);
170 } 170 }
171 171
172 namespace {
173
174 #if V8_CC_GNU
175
176 template <typename T>
177 inline T AddSeqCst(T* p, T value) {
178 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
179 }
180
181 template <typename T>
182 inline T SubSeqCst(T* p, T value) {
183 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
184 }
185
186 template <typename T>
187 inline T AndSeqCst(T* p, T value) {
188 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
189 }
190
191 template <typename T>
192 inline T OrSeqCst(T* p, T value) {
193 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
194 }
195
196 template <typename T>
197 inline T XorSeqCst(T* p, T value) {
198 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
199 }
200
201 #elif V8_CC_MSVC
202
203 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
204 #define InterlockedAnd32 _InterlockedAnd
205 #define InterlockedOr32 _InterlockedOr
206 #define InterlockedXor32 _InterlockedXor
207 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
208 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
209
210 #define ATOMIC_OPS(type, suffix, vctype) \
211 inline type AddSeqCst(type* p, type value) { \
212 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
213 bit_cast<vctype>(value)); \
214 } \
215 inline type SubSeqCst(type* p, type value) { \
216 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
217 -bit_cast<vctype>(value)); \
218 } \
219 inline type AndSeqCst(type* p, type value) { \
220 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
221 bit_cast<vctype>(value)); \
222 } \
223 inline type OrSeqCst(type* p, type value) { \
224 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
225 bit_cast<vctype>(value)); \
226 } \
227 inline type XorSeqCst(type* p, type value) { \
228 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
229 bit_cast<vctype>(value)); \
230 }
231
232 ATOMIC_OPS(int8_t, 8, char)
233 ATOMIC_OPS(uint8_t, 8, char)
234 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
235 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
236 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
237 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
238
239 #undef ATOMIC_OPS_INTEGER
240 #undef ATOMIC_OPS
241
242 #undef InterlockedExchangeAdd32
243 #undef InterlockedAnd32
244 #undef InterlockedOr32
245 #undef InterlockedXor32
246 #undef InterlockedExchangeAdd16
247 #undef InterlockedExchangeAdd8
248
249 #else
250
251 #error Unsupported platform!
252
253 #endif
254
255 template <typename T>
256 T FromObject(Handle<Object> number);
257
258 template <>
259 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
260 return NumberToUint32(*number);
261 }
262
263 template <>
264 inline int8_t FromObject<int8_t>(Handle<Object> number) {
265 return NumberToInt32(*number);
266 }
267
268 template <>
269 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
270 return NumberToUint32(*number);
271 }
272
273 template <>
274 inline int16_t FromObject<int16_t>(Handle<Object> number) {
275 return NumberToInt32(*number);
276 }
277
278 template <>
279 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
280 return NumberToUint32(*number);
281 }
282
283 template <>
284 inline int32_t FromObject<int32_t>(Handle<Object> number) {
285 return NumberToInt32(*number);
286 }
287
288 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
289
290 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
291
292 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
293
294 inline Object* ToObject(Isolate* isolate, uint16_t t) {
295 return Smi::FromInt(t);
296 }
297
298 inline Object* ToObject(Isolate* isolate, int32_t t) {
299 return *isolate->factory()->NewNumber(t);
300 }
301
302 inline Object* ToObject(Isolate* isolate, uint32_t t) {
303 return *isolate->factory()->NewNumber(t);
304 }
305
306 template <typename T>
307 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
308 Handle<Object> obj) {
309 T value = FromObject<T>(obj);
310 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
311 return ToObject(isolate, result);
312 }
313
314 template <typename T>
315 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
316 Handle<Object> obj) {
317 T value = FromObject<T>(obj);
318 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
319 return ToObject(isolate, result);
320 }
321
322 template <typename T>
323 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
324 Handle<Object> obj) {
325 T value = FromObject<T>(obj);
326 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
327 return ToObject(isolate, result);
328 }
329
330 template <typename T>
331 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
332 Handle<Object> obj) {
333 T value = FromObject<T>(obj);
334 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
335 return ToObject(isolate, result);
336 }
337
338 template <typename T>
339 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
340 Handle<Object> obj) {
341 T value = FromObject<T>(obj);
342 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
343 return ToObject(isolate, result);
344 }
345
346 } // anonymous namespace
347
348 // Duplicated from objects.h
349 // V has parameters (Type, type, TYPE, C type, element_size)
350 #define INTEGER_TYPED_ARRAYS(V) \
351 V(Uint8, uint8, UINT8, uint8_t, 1) \
352 V(Int8, int8, INT8, int8_t, 1) \
353 V(Uint16, uint16, UINT16, uint16_t, 2) \
354 V(Int16, int16, INT16, int16_t, 2) \
355 V(Uint32, uint32, UINT32, uint32_t, 4) \
356 V(Int32, int32, INT32, int32_t, 4)
357
358 // ES #sec-atomics.add
359 // Atomics.add( typedArray, index, value )
360 BUILTIN(AtomicsAdd) {
361 HandleScope scope(isolate);
362 Handle<Object> array = args.atOrUndefined(isolate, 1);
363 Handle<Object> index = args.atOrUndefined(isolate, 2);
364 Handle<Object> value = args.atOrUndefined(isolate, 3);
365
366 Handle<JSTypedArray> sta;
367 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
368 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
369
370 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
371 if (maybe_index.IsNothing()) return isolate->heap()->exception();
372 size_t i = maybe_index.FromJust();
373
374 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
375 Object::ToInteger(isolate, value));
376
377 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
378 NumberToSize(sta->byte_offset());
379
380 switch (sta->type()) {
381 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
382 case kExternal##Type##Array: \
383 return DoAdd<ctype>(isolate, source, i, value);
384
385 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
386 #undef TYPED_ARRAY_CASE
387
388 default:
389 break;
390 }
391
392 UNREACHABLE();
393 return isolate->heap()->undefined_value();
394 }
395
396 // ES #sec-atomics.sub
397 // Atomics.sub( typedArray, index, value )
398 BUILTIN(AtomicsSub) {
399 HandleScope scope(isolate);
400 Handle<Object> array = args.atOrUndefined(isolate, 1);
401 Handle<Object> index = args.atOrUndefined(isolate, 2);
402 Handle<Object> value = args.atOrUndefined(isolate, 3);
403
404 Handle<JSTypedArray> sta;
405 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
406 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
407
408 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
409 if (maybe_index.IsNothing()) return isolate->heap()->exception();
410 size_t i = maybe_index.FromJust();
411
412 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
413 Object::ToInteger(isolate, value));
414
415 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
416 NumberToSize(sta->byte_offset());
417
418 switch (sta->type()) {
419 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
420 case kExternal##Type##Array: \
421 return DoSub<ctype>(isolate, source, i, value);
422
423 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
424 #undef TYPED_ARRAY_CASE
425
426 default:
427 break;
428 }
429
430 UNREACHABLE();
431 return isolate->heap()->undefined_value();
432 }
433
434 // ES #sec-atomics.and
435 // Atomics.and( typedArray, index, value )
436 BUILTIN(AtomicsAnd) {
437 HandleScope scope(isolate);
438 Handle<Object> array = args.atOrUndefined(isolate, 1);
439 Handle<Object> index = args.atOrUndefined(isolate, 2);
440 Handle<Object> value = args.atOrUndefined(isolate, 3);
441
442 Handle<JSTypedArray> sta;
443 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
444 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
445
446 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
447 if (maybe_index.IsNothing()) return isolate->heap()->exception();
448 size_t i = maybe_index.FromJust();
449
450 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
451 Object::ToInteger(isolate, value));
452
453 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
454 NumberToSize(sta->byte_offset());
455
456 switch (sta->type()) {
457 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
458 case kExternal##Type##Array: \
459 return DoAnd<ctype>(isolate, source, i, value);
460
461 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
462 #undef TYPED_ARRAY_CASE
463
464 default:
465 break;
466 }
467
468 UNREACHABLE();
469 return isolate->heap()->undefined_value();
470 }
471
472 // ES #sec-atomics.or
473 // Atomics.or( typedArray, index, value )
474 BUILTIN(AtomicsOr) {
475 HandleScope scope(isolate);
476 Handle<Object> array = args.atOrUndefined(isolate, 1);
477 Handle<Object> index = args.atOrUndefined(isolate, 2);
478 Handle<Object> value = args.atOrUndefined(isolate, 3);
479
480 Handle<JSTypedArray> sta;
481 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
482 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
483
484 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
485 if (maybe_index.IsNothing()) return isolate->heap()->exception();
486 size_t i = maybe_index.FromJust();
487
488 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
489 Object::ToInteger(isolate, value));
490
491 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
492 NumberToSize(sta->byte_offset());
493
494 switch (sta->type()) {
495 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
496 case kExternal##Type##Array: \
497 return DoOr<ctype>(isolate, source, i, value);
498
499 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
500 #undef TYPED_ARRAY_CASE
501
502 default:
503 break;
504 }
505
506 UNREACHABLE();
507 return isolate->heap()->undefined_value();
508 }
509
510 // ES #sec-atomics.xor
511 // Atomics.xor( typedArray, index, value )
512 BUILTIN(AtomicsXor) {
513 HandleScope scope(isolate);
514 Handle<Object> array = args.atOrUndefined(isolate, 1);
515 Handle<Object> index = args.atOrUndefined(isolate, 2);
516 Handle<Object> value = args.atOrUndefined(isolate, 3);
517
518 Handle<JSTypedArray> sta;
519 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
520 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
521
522 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
523 if (maybe_index.IsNothing()) return isolate->heap()->exception();
524 size_t i = maybe_index.FromJust();
525
526 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
527 Object::ToInteger(isolate, value));
528
529 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
530 NumberToSize(sta->byte_offset());
531
532 switch (sta->type()) {
533 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
534 case kExternal##Type##Array: \
535 return DoXor<ctype>(isolate, source, i, value);
536
537 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
538 #undef TYPED_ARRAY_CASE
539
540 default:
541 break;
542 }
543
544 UNREACHABLE();
545 return isolate->heap()->undefined_value();
546 }
547
548 } // namespace internal 172 } // namespace internal
549 } // namespace v8 173 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698