Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 1617503003: [Atomics] code stubs for atomic operations (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: call code stub from TF Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_ARM 5 #if V8_TARGET_ARCH_ARM
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/bootstrapper.h" 8 #include "src/bootstrapper.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 5426 matching lines...) Expand 10 before | Expand all | Expand 10 after
5437 ExternalReference::invoke_accessor_getter_callback(isolate()); 5437 ExternalReference::invoke_accessor_getter_callback(isolate());
5438 5438
5439 // +3 is to skip prolog, return address and name handle. 5439 // +3 is to skip prolog, return address and name handle.
5440 MemOperand return_value_operand( 5440 MemOperand return_value_operand(
5441 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); 5441 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
5442 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, 5442 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5443 kStackUnwindSpace, NULL, return_value_operand, NULL); 5443 kStackUnwindSpace, NULL, return_value_operand, NULL);
5444 } 5444 }
5445 5445
5446 5446
5447 static void GetTypedArrayBackingStore(MacroAssembler* masm,
Jarin 2016/01/28 08:11:12 Nit: Anonymous namespace for all the static method
binji 2016/01/28 16:20:00 Happy to do it, but it doesn't seem to be consiste
Jarin 2016/02/01 07:42:43 I see your point, but I still think new code shoul
5448 Register backing_store,
5449 Register object,
5450 Register scratch,
5451 LowDwVfpRegister double_scratch) {
5452 Label offset_is_not_smi, done;
5453 __ ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset));
5454 __ ldr(backing_store,
5455 FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
5456 __ ldr(scratch,
5457 FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset));
5458 __ UntagAndJumpIfNotSmi(scratch, scratch, &offset_is_not_smi);
5459 // offset is smi
Jarin 2016/01/28 08:11:12 Nit: If a comment is a sentence, it should start w
binji 2016/01/28 16:20:00 Done.
5460 __ add(backing_store, backing_store, scratch);
5461 __ jmp(&done);
5462
5463 // offset is a heap number
5464 __ bind(&offset_is_not_smi);
5465 __ vldr(double_scratch, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
5466 __ vcvt_u32_f64(double_scratch.low(), double_scratch);
5467 __ vmov(scratch, double_scratch.low());
5468 __ add(backing_store, backing_store, scratch);
5469 __ bind(&done);
5470 }
5471
5472
5473 static void TaggedToInteger32(MacroAssembler* masm,
5474 Register value,
5475 LowDwVfpRegister double_scratch) {
5476 Label not_smi, done;
5477 __ UntagAndJumpIfNotSmi(value, value, &not_smi);
5478 __ jmp(&done);
5479
5480 __ bind(&not_smi);
5481 __ vldr(double_scratch, value, HeapNumber::kValueOffset - kHeapObjectTag);
5482 __ vmov(value, double_scratch.low());
5483 __ bind(&done);
5484 }
5485
5486
5487 static void TypedArrayJumpTable(MacroAssembler* masm,
5488 Register object,
5489 Register scratch,
5490 Register scratch2,
5491 Label* i8,
5492 Label* u8,
5493 Label* i16,
5494 Label* u16,
5495 Label* i32,
5496 Label* u32,
5497 Label* u8c) {
5498 STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
5499 STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
5500 STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
5501 STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
5502 STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
5503 STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
5504 STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
5505 STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
Jarin 2016/01/28 08:11:12 I like this. Thanks!
binji 2016/01/28 16:20:00 Yeah, it's a bit paranoid, but doesn't really hurt
Jarin 2016/02/01 07:42:43 I actually like that it explicitly states what you
5506
5507 __ ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset));
5508 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
5509 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5510 __ mov(scratch2, Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
5511 __ sub(scratch, scratch, scratch2, SetCC);
5512 __ Assert(ge, kOffsetOutOfRange);
5513
5514 Label abort;
5515
5516 {
5517 Assembler::BlockConstPoolScope scope(masm);
5518 __ add(pc, pc, Operand(scratch, LSL, 2));
5519 __ nop();
5520 __ b(i8); // Int8Array
5521 __ b(u8); // Uint8Array
5522 __ b(i16); // Int16Array
5523 __ b(u16); // Uint16Array
5524 __ b(i32); // Int32Array
5525 __ b(u32); // Uint32Array
5526 __ b(&abort); // Float32Array
5527 __ b(&abort); // Float64Array
5528 __ b(u8); // Uint8ClampedArray
Jarin 2016/01/28 08:11:12 This should be u8c, no? (Also on all other platfor
binji 2016/01/28 16:20:00 oops. In this case it is fine (because AtomicLoad
5529 }
5530
5531 __ bind(&abort);
5532 __ Abort(kNoReason);
5533 }
5534
5535
5536 static void ReturnInteger32(MacroAssembler* masm,
5537 DwVfpRegister dst,
5538 Register value,
5539 SwVfpRegister single_scratch,
5540 Label* use_heap_number) {
5541 Label not_smi;
5542 __ TrySmiTag(r0, value, &not_smi);
5543 __ Ret();
5544
5545 __ bind(&not_smi);
5546 __ vmov(single_scratch, value);
5547 __ vcvt_f64_s32(dst, single_scratch);
5548 __ jmp(use_heap_number);
5549 }
5550
5551
5552 static void ReturnUnsignedInteger32(MacroAssembler* masm,
5553 DwVfpRegister dst,
5554 Register value,
5555 Register scratch,
5556 SwVfpRegister single_scratch,
5557 Label* use_heap_number) {
5558 Label not_smi;
5559 __ mov(scratch, Operand(0x40000000U));
5560 __ cmp(value, scratch);
5561 __ b(cs, &not_smi);
5562 __ SmiTag(r0, value);
5563 __ Ret();
5564
5565 __ bind(&not_smi);
5566 __ vmov(single_scratch, value);
5567 __ vcvt_f64_u32(dst, single_scratch);
5568 __ jmp(use_heap_number);
5569 }
5570
5571
5572 static void ReturnAllocatedHeapNumber(MacroAssembler* masm,
5573 DwVfpRegister value,
5574 Register scratch,
5575 Register scratch2,
5576 Register scratch3) {
5577 Label call_runtime;
5578 __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
5579 __ AllocateHeapNumber(r0, scratch, scratch2, scratch3, &call_runtime);
5580 __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset));
5581 __ Ret();
5582
5583 __ bind(&call_runtime);
5584 {
5585 FrameScope scope(masm, StackFrame::INTERNAL);
5586 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5587 __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset));
5588 }
5589 __ Ret();
5590 }
5591
5592
5593 void AtomicsLoadStub::Generate(MacroAssembler* masm) {
5594 Register object = r1;
5595 Register backing_store = r2;
5596 Register index = r0;
5597 Label i8, u8, i16, u16, i32, u32;
5598
5599 GetTypedArrayBackingStore(masm, backing_store, object, r3, d0);
5600 TaggedToInteger32(masm, index, d0);
5601 TypedArrayJumpTable(masm, object, r3, r4, &i8, &u8, &i16, &u16, &i32, &u32,
5602 &u8);
5603
5604 __ bind(&i8);
5605 __ ldrb(r0, MemOperand(backing_store, index));
5606 __ dmb(ISH);
5607 __ sxtb(r0, r0);
5608 __ SmiTag(r0);
5609 __ Ret();
5610
5611 __ bind(&u8);
5612 __ ldrb(r0, MemOperand(backing_store, index));
5613 __ dmb(ISH);
5614 __ SmiTag(r0);
5615 __ Ret();
5616
5617 __ bind(&i16);
5618 __ ldrh(r0, MemOperand(backing_store, index, LSL, 1));
5619 __ dmb(ISH);
5620 __ sxth(r0, r0);
5621 __ SmiTag(r0);
5622 __ Ret();
5623
5624 __ bind(&u16);
5625 __ ldrh(r0, MemOperand(backing_store, index, LSL, 1));
5626 __ dmb(ISH);
5627 __ SmiTag(r0);
5628 __ Ret();
5629
5630 Label use_heap_number;
5631
5632 __ bind(&i32);
5633 __ ldr(r0, MemOperand(backing_store, index, LSL, 2));
5634 __ dmb(ISH);
5635 ReturnInteger32(masm, d0, r0, s2, &use_heap_number);
5636
5637 __ bind(&u32);
5638 __ ldr(r0, MemOperand(backing_store, index, LSL, 2));
5639 __ dmb(ISH);
5640 ReturnUnsignedInteger32(masm, d0, r0, r1, s2, &use_heap_number);
5641
5642 __ bind(&use_heap_number);
5643 ReturnAllocatedHeapNumber(masm, d0, r1, r2, r3);
5644 }
5645
5646
5447 #undef __ 5647 #undef __
5448 5648
5449 } // namespace internal 5649 } // namespace internal
5450 } // namespace v8 5650 } // namespace v8
5451 5651
5452 #endif // V8_TARGET_ARCH_ARM 5652 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.cc ('k') | src/arm/constants-arm.h » ('j') | src/arm/simulator-arm.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698