Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: src/arm64/code-stubs-arm64.cc

Issue 2622643005: ARM64: Add NEON support (Closed)
Patch Set: Fix Math.abs properly Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/assembler-arm64-inl.h ('k') | src/arm64/constants-arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_ARM64 5 #if V8_TARGET_ARCH_ARM64
6 6
7 #include "src/api-arguments.h" 7 #include "src/api-arguments.h"
8 #include "src/arm64/assembler-arm64-inl.h" 8 #include "src/arm64/assembler-arm64-inl.h"
9 #include "src/arm64/frames-arm64.h" 9 #include "src/arm64/frames-arm64.h"
10 #include "src/arm64/macro-assembler-arm64-inl.h" 10 #include "src/arm64/macro-assembler-arm64-inl.h"
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
140 __ Pop(double_scratch); 140 __ Pop(double_scratch);
141 } 141 }
142 __ Pop(scratch2, scratch1); 142 __ Pop(scratch2, scratch1);
143 __ Ret(); 143 __ Ret();
144 } 144 }
145 145
146 146
147 // See call site for description. 147 // See call site for description.
148 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, 148 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
149 Register right, Register scratch, 149 Register right, Register scratch,
150 FPRegister double_scratch, 150 VRegister double_scratch, Label* slow,
151 Label* slow, Condition cond) { 151 Condition cond) {
152 DCHECK(!AreAliased(left, right, scratch)); 152 DCHECK(!AreAliased(left, right, scratch));
153 Label not_identical, return_equal, heap_number; 153 Label not_identical, return_equal, heap_number;
154 Register result = x0; 154 Register result = x0;
155 155
156 __ Cmp(right, left); 156 __ Cmp(right, left);
157 __ B(ne, &not_identical); 157 __ B(ne, &not_identical);
158 158
159 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), 159 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
160 // so we do the second best thing - test it ourselves. 160 // so we do the second best thing - test it ourselves.
161 // They are both equal and they are not both Smis so both of them are not 161 // They are both equal and they are not both Smis so both of them are not
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
285 // same object. We have already tested that case, so if left and right are 285 // same object. We have already tested that case, so if left and right are
286 // both internalized strings, they cannot be equal. 286 // both internalized strings, they cannot be equal.
287 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); 287 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
288 __ Orr(scratch, left_type, right_type); 288 __ Orr(scratch, left_type, right_type);
289 __ TestAndBranchIfAllClear( 289 __ TestAndBranchIfAllClear(
290 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal); 290 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
291 } 291 }
292 292
293 293
294 // See call site for description. 294 // See call site for description.
295 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 295 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left,
296 Register left, 296 Register right, VRegister left_d,
297 Register right, 297 VRegister right_d, Label* slow,
298 FPRegister left_d,
299 FPRegister right_d,
300 Label* slow,
301 bool strict) { 298 bool strict) {
302 DCHECK(!AreAliased(left_d, right_d)); 299 DCHECK(!AreAliased(left_d, right_d));
303 DCHECK((left.is(x0) && right.is(x1)) || 300 DCHECK((left.is(x0) && right.is(x1)) ||
304 (right.is(x0) && left.is(x1))); 301 (right.is(x0) && left.is(x1)));
305 Register result = x0; 302 Register result = x0;
306 303
307 Label right_is_smi, done; 304 Label right_is_smi, done;
308 __ JumpIfSmi(right, &right_is_smi); 305 __ JumpIfSmi(right, &right_is_smi);
309 306
310 // Left is the smi. Check whether right is a heap number. 307 // Left is the smi. Check whether right is a heap number.
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
469 __ JumpIfBothNotSmi(lhs, rhs, &not_smis); 466 __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
470 467
471 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that 468 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
472 // can: 469 // can:
473 // 1) Return the answer. 470 // 1) Return the answer.
474 // 2) Branch to the slow case. 471 // 2) Branch to the slow case.
475 // 3) Fall through to both_loaded_as_doubles. 472 // 3) Fall through to both_loaded_as_doubles.
476 // In case 3, we have found out that we were dealing with a number-number 473 // In case 3, we have found out that we were dealing with a number-number
477 // comparison. The double values of the numbers have been loaded, right into 474 // comparison. The double values of the numbers have been loaded, right into
478 // rhs_d, left into lhs_d. 475 // rhs_d, left into lhs_d.
479 FPRegister rhs_d = d0; 476 VRegister rhs_d = d0;
480 FPRegister lhs_d = d1; 477 VRegister lhs_d = d1;
481 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict()); 478 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
482 479
483 __ Bind(&both_loaded_as_doubles); 480 __ Bind(&both_loaded_as_doubles);
484 // The arguments have been converted to doubles and stored in rhs_d and 481 // The arguments have been converted to doubles and stored in rhs_d and
485 // lhs_d. 482 // lhs_d.
486 Label nan; 483 Label nan;
487 __ Fcmp(lhs_d, rhs_d); 484 __ Fcmp(lhs_d, rhs_d);
488 __ B(vs, &nan); // Overflow flag set if either is NaN. 485 __ B(vs, &nan); // Overflow flag set if either is NaN.
489 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); 486 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
490 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). 487 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
606 __ TailCallRuntime(Runtime::kCompare); 603 __ TailCallRuntime(Runtime::kCompare);
607 } 604 }
608 605
609 __ Bind(&miss); 606 __ Bind(&miss);
610 GenerateMiss(masm); 607 GenerateMiss(masm);
611 } 608 }
612 609
613 610
614 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 611 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
615 CPURegList saved_regs = kCallerSaved; 612 CPURegList saved_regs = kCallerSaved;
616 CPURegList saved_fp_regs = kCallerSavedFP; 613 CPURegList saved_fp_regs = kCallerSavedV;
617 614
618 // We don't allow a GC during a store buffer overflow so there is no need to 615 // We don't allow a GC during a store buffer overflow so there is no need to
619 // store the registers in any particular way, but we do have to store and 616 // store the registers in any particular way, but we do have to store and
620 // restore them. 617 // restore them.
621 618
622 // We don't care if MacroAssembler scratch registers are corrupted. 619 // We don't care if MacroAssembler scratch registers are corrupted.
623 saved_regs.Remove(*(masm->TmpList())); 620 saved_regs.Remove(*(masm->TmpList()));
624 saved_fp_regs.Remove(*(masm->FPTmpList())); 621 saved_fp_regs.Remove(*(masm->FPTmpList()));
625 622
626 __ PushCPURegList(saved_regs); 623 __ PushCPURegList(saved_regs);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
679 // jssp[0]: Exponent (as a tagged value). 676 // jssp[0]: Exponent (as a tagged value).
680 // jssp[1]: Base (as a tagged value). 677 // jssp[1]: Base (as a tagged value).
681 // 678 //
682 // The (tagged) result will be returned in x0, as a heap number. 679 // The (tagged) result will be returned in x0, as a heap number.
683 680
684 Register exponent_tagged = MathPowTaggedDescriptor::exponent(); 681 Register exponent_tagged = MathPowTaggedDescriptor::exponent();
685 DCHECK(exponent_tagged.is(x11)); 682 DCHECK(exponent_tagged.is(x11));
686 Register exponent_integer = MathPowIntegerDescriptor::exponent(); 683 Register exponent_integer = MathPowIntegerDescriptor::exponent();
687 DCHECK(exponent_integer.is(x12)); 684 DCHECK(exponent_integer.is(x12));
688 Register saved_lr = x19; 685 Register saved_lr = x19;
689 FPRegister result_double = d0; 686 VRegister result_double = d0;
690 FPRegister base_double = d0; 687 VRegister base_double = d0;
691 FPRegister exponent_double = d1; 688 VRegister exponent_double = d1;
692 FPRegister base_double_copy = d2; 689 VRegister base_double_copy = d2;
693 FPRegister scratch1_double = d6; 690 VRegister scratch1_double = d6;
694 FPRegister scratch0_double = d7; 691 VRegister scratch0_double = d7;
695 692
696 // A fast-path for integer exponents. 693 // A fast-path for integer exponents.
697 Label exponent_is_smi, exponent_is_integer; 694 Label exponent_is_smi, exponent_is_integer;
698 // Allocate a heap number for the result, and return it. 695 // Allocate a heap number for the result, and return it.
699 Label done; 696 Label done;
700 697
701 // Unpack the inputs. 698 // Unpack the inputs.
702 if (exponent_type() == TAGGED) { 699 if (exponent_type() == TAGGED) {
703 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); 700 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
704 __ Ldr(exponent_double, 701 __ Ldr(exponent_double,
(...skipping 937 matching lines...) Expand 10 before | Expand all | Expand 10 after
1642 DCHECK(state() == CompareICState::NUMBER); 1639 DCHECK(state() == CompareICState::NUMBER);
1643 ASM_LOCATION("CompareICStub[HeapNumbers]"); 1640 ASM_LOCATION("CompareICStub[HeapNumbers]");
1644 1641
1645 Label unordered, maybe_undefined1, maybe_undefined2; 1642 Label unordered, maybe_undefined1, maybe_undefined2;
1646 Label miss, handle_lhs, values_in_d_regs; 1643 Label miss, handle_lhs, values_in_d_regs;
1647 Label untag_rhs, untag_lhs; 1644 Label untag_rhs, untag_lhs;
1648 1645
1649 Register result = x0; 1646 Register result = x0;
1650 Register rhs = x0; 1647 Register rhs = x0;
1651 Register lhs = x1; 1648 Register lhs = x1;
1652 FPRegister rhs_d = d0; 1649 VRegister rhs_d = d0;
1653 FPRegister lhs_d = d1; 1650 VRegister lhs_d = d1;
1654 1651
1655 if (left() == CompareICState::SMI) { 1652 if (left() == CompareICState::SMI) {
1656 __ JumpIfNotSmi(lhs, &miss); 1653 __ JumpIfNotSmi(lhs, &miss);
1657 } 1654 }
1658 if (right() == CompareICState::SMI) { 1655 if (right() == CompareICState::SMI) {
1659 __ JumpIfNotSmi(rhs, &miss); 1656 __ JumpIfNotSmi(rhs, &miss);
1660 } 1657 }
1661 1658
1662 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); 1659 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
1663 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); 1660 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
(...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after
2102 __ TailCallStub(&stub); 2099 __ TailCallStub(&stub);
2103 } 2100 }
2104 2101
2105 RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object, 2102 RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
2106 Register address, 2103 Register address,
2107 Register scratch) 2104 Register scratch)
2108 : object_(object), 2105 : object_(object),
2109 address_(address), 2106 address_(address),
2110 scratch0_(scratch), 2107 scratch0_(scratch),
2111 saved_regs_(kCallerSaved), 2108 saved_regs_(kCallerSaved),
2112 saved_fp_regs_(kCallerSavedFP) { 2109 saved_fp_regs_(kCallerSavedV) {
2113 DCHECK(!AreAliased(scratch, object, address)); 2110 DCHECK(!AreAliased(scratch, object, address));
2114 2111
2115 // The SaveCallerSaveRegisters method needs to save caller-saved 2112 // The SaveCallerSaveRegisters method needs to save caller-saved
2116 // registers, but we don't bother saving MacroAssembler scratch registers. 2113 // registers, but we don't bother saving MacroAssembler scratch registers.
2117 saved_regs_.Remove(MacroAssembler::DefaultTmpList()); 2114 saved_regs_.Remove(MacroAssembler::DefaultTmpList());
2118 saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList()); 2115 saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
2119 2116
2120 // We would like to require more scratch registers for this stub, 2117 // We would like to require more scratch registers for this stub,
2121 // but the number of registers comes down to the ones used in 2118 // but the number of registers comes down to the ones used in
2122 // FullCodeGen::SetVar(), which is architecture independent. 2119 // FullCodeGen::SetVar(), which is architecture independent.
(...skipping 1107 matching lines...) Expand 10 before | Expand all | Expand 10 after
3230 kStackUnwindSpace, NULL, spill_offset, 3227 kStackUnwindSpace, NULL, spill_offset,
3231 return_value_operand, NULL); 3228 return_value_operand, NULL);
3232 } 3229 }
3233 3230
3234 #undef __ 3231 #undef __
3235 3232
3236 } // namespace internal 3233 } // namespace internal
3237 } // namespace v8 3234 } // namespace v8
3238 3235
3239 #endif // V8_TARGET_ARCH_ARM64 3236 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/assembler-arm64-inl.h ('k') | src/arm64/constants-arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698