OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
135 __ Pop(double_scratch); | 135 __ Pop(double_scratch); |
136 } | 136 } |
137 __ Pop(scratch2, scratch1); | 137 __ Pop(scratch2, scratch1); |
138 __ Ret(); | 138 __ Ret(); |
139 } | 139 } |
140 | 140 |
141 | 141 |
142 // See call site for description. | 142 // See call site for description. |
143 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, | 143 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, |
144 Register right, Register scratch, | 144 Register right, Register scratch, |
145 FPRegister double_scratch, | 145 VRegister double_scratch, Label* slow, |
146 Label* slow, Condition cond) { | 146 Condition cond) { |
147 DCHECK(!AreAliased(left, right, scratch)); | 147 DCHECK(!AreAliased(left, right, scratch)); |
148 Label not_identical, return_equal, heap_number; | 148 Label not_identical, return_equal, heap_number; |
149 Register result = x0; | 149 Register result = x0; |
150 | 150 |
151 __ Cmp(right, left); | 151 __ Cmp(right, left); |
152 __ B(ne, ¬_identical); | 152 __ B(ne, ¬_identical); |
153 | 153 |
154 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), | 154 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), |
155 // so we do the second best thing - test it ourselves. | 155 // so we do the second best thing - test it ourselves. |
156 // They are both equal and they are not both Smis so both of them are not | 156 // They are both equal and they are not both Smis so both of them are not |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
286 // same object. We have already tested that case, so if left and right are | 286 // same object. We have already tested that case, so if left and right are |
287 // both internalized strings, they cannot be equal. | 287 // both internalized strings, they cannot be equal. |
288 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | 288 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); |
289 __ Orr(scratch, left_type, right_type); | 289 __ Orr(scratch, left_type, right_type); |
290 __ TestAndBranchIfAllClear( | 290 __ TestAndBranchIfAllClear( |
291 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal); | 291 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal); |
292 } | 292 } |
293 | 293 |
294 | 294 |
295 // See call site for description. | 295 // See call site for description. |
296 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 296 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left, |
297 Register left, | 297 Register right, VRegister left_d, |
298 Register right, | 298 VRegister right_d, Label* slow, |
299 FPRegister left_d, | |
300 FPRegister right_d, | |
301 Label* slow, | |
302 bool strict) { | 299 bool strict) { |
303 DCHECK(!AreAliased(left_d, right_d)); | 300 DCHECK(!AreAliased(left_d, right_d)); |
304 DCHECK((left.is(x0) && right.is(x1)) || | 301 DCHECK((left.is(x0) && right.is(x1)) || |
305 (right.is(x0) && left.is(x1))); | 302 (right.is(x0) && left.is(x1))); |
306 Register result = x0; | 303 Register result = x0; |
307 | 304 |
308 Label right_is_smi, done; | 305 Label right_is_smi, done; |
309 __ JumpIfSmi(right, &right_is_smi); | 306 __ JumpIfSmi(right, &right_is_smi); |
310 | 307 |
311 // Left is the smi. Check whether right is a heap number. | 308 // Left is the smi. Check whether right is a heap number. |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
470 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis); | 467 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis); |
471 | 468 |
472 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that | 469 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that |
473 // can: | 470 // can: |
474 // 1) Return the answer. | 471 // 1) Return the answer. |
475 // 2) Branch to the slow case. | 472 // 2) Branch to the slow case. |
476 // 3) Fall through to both_loaded_as_doubles. | 473 // 3) Fall through to both_loaded_as_doubles. |
477 // In case 3, we have found out that we were dealing with a number-number | 474 // In case 3, we have found out that we were dealing with a number-number |
478 // comparison. The double values of the numbers have been loaded, right into | 475 // comparison. The double values of the numbers have been loaded, right into |
479 // rhs_d, left into lhs_d. | 476 // rhs_d, left into lhs_d. |
480 FPRegister rhs_d = d0; | 477 VRegister rhs_d = d0; |
481 FPRegister lhs_d = d1; | 478 VRegister lhs_d = d1; |
482 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict()); | 479 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict()); |
483 | 480 |
484 __ Bind(&both_loaded_as_doubles); | 481 __ Bind(&both_loaded_as_doubles); |
485 // The arguments have been converted to doubles and stored in rhs_d and | 482 // The arguments have been converted to doubles and stored in rhs_d and |
486 // lhs_d. | 483 // lhs_d. |
487 Label nan; | 484 Label nan; |
488 __ Fcmp(lhs_d, rhs_d); | 485 __ Fcmp(lhs_d, rhs_d); |
489 __ B(vs, &nan); // Overflow flag set if either is NaN. | 486 __ B(vs, &nan); // Overflow flag set if either is NaN. |
490 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | 487 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); |
491 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | 488 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
607 __ TailCallRuntime(Runtime::kCompare); | 604 __ TailCallRuntime(Runtime::kCompare); |
608 } | 605 } |
609 | 606 |
610 __ Bind(&miss); | 607 __ Bind(&miss); |
611 GenerateMiss(masm); | 608 GenerateMiss(masm); |
612 } | 609 } |
613 | 610 |
614 | 611 |
615 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 612 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
616 CPURegList saved_regs = kCallerSaved; | 613 CPURegList saved_regs = kCallerSaved; |
617 CPURegList saved_fp_regs = kCallerSavedFP; | 614 CPURegList saved_fp_regs = kCallerSavedV; |
618 | 615 |
619 // We don't allow a GC during a store buffer overflow so there is no need to | 616 // We don't allow a GC during a store buffer overflow so there is no need to |
620 // store the registers in any particular way, but we do have to store and | 617 // store the registers in any particular way, but we do have to store and |
621 // restore them. | 618 // restore them. |
622 | 619 |
623 // We don't care if MacroAssembler scratch registers are corrupted. | 620 // We don't care if MacroAssembler scratch registers are corrupted. |
624 saved_regs.Remove(*(masm->TmpList())); | 621 saved_regs.Remove(*(masm->TmpList())); |
625 saved_fp_regs.Remove(*(masm->FPTmpList())); | 622 saved_fp_regs.Remove(*(masm->FPTmpList())); |
626 | 623 |
627 __ PushCPURegList(saved_regs); | 624 __ PushCPURegList(saved_regs); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
680 // jssp[0]: Exponent (as a tagged value). | 677 // jssp[0]: Exponent (as a tagged value). |
681 // jssp[1]: Base (as a tagged value). | 678 // jssp[1]: Base (as a tagged value). |
682 // | 679 // |
683 // The (tagged) result will be returned in x0, as a heap number. | 680 // The (tagged) result will be returned in x0, as a heap number. |
684 | 681 |
685 Register exponent_tagged = MathPowTaggedDescriptor::exponent(); | 682 Register exponent_tagged = MathPowTaggedDescriptor::exponent(); |
686 DCHECK(exponent_tagged.is(x11)); | 683 DCHECK(exponent_tagged.is(x11)); |
687 Register exponent_integer = MathPowIntegerDescriptor::exponent(); | 684 Register exponent_integer = MathPowIntegerDescriptor::exponent(); |
688 DCHECK(exponent_integer.is(x12)); | 685 DCHECK(exponent_integer.is(x12)); |
689 Register saved_lr = x19; | 686 Register saved_lr = x19; |
690 FPRegister result_double = d0; | 687 VRegister result_double = d0; |
691 FPRegister base_double = d0; | 688 VRegister base_double = d0; |
692 FPRegister exponent_double = d1; | 689 VRegister exponent_double = d1; |
693 FPRegister base_double_copy = d2; | 690 VRegister base_double_copy = d2; |
694 FPRegister scratch1_double = d6; | 691 VRegister scratch1_double = d6; |
695 FPRegister scratch0_double = d7; | 692 VRegister scratch0_double = d7; |
696 | 693 |
697 // A fast-path for integer exponents. | 694 // A fast-path for integer exponents. |
698 Label exponent_is_smi, exponent_is_integer; | 695 Label exponent_is_smi, exponent_is_integer; |
699 // Allocate a heap number for the result, and return it. | 696 // Allocate a heap number for the result, and return it. |
700 Label done; | 697 Label done; |
701 | 698 |
702 // Unpack the inputs. | 699 // Unpack the inputs. |
703 if (exponent_type() == TAGGED) { | 700 if (exponent_type() == TAGGED) { |
704 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | 701 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); |
705 __ Ldr(exponent_double, | 702 __ Ldr(exponent_double, |
(...skipping 1629 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2335 DCHECK(state() == CompareICState::NUMBER); | 2332 DCHECK(state() == CompareICState::NUMBER); |
2336 ASM_LOCATION("CompareICStub[HeapNumbers]"); | 2333 ASM_LOCATION("CompareICStub[HeapNumbers]"); |
2337 | 2334 |
2338 Label unordered, maybe_undefined1, maybe_undefined2; | 2335 Label unordered, maybe_undefined1, maybe_undefined2; |
2339 Label miss, handle_lhs, values_in_d_regs; | 2336 Label miss, handle_lhs, values_in_d_regs; |
2340 Label untag_rhs, untag_lhs; | 2337 Label untag_rhs, untag_lhs; |
2341 | 2338 |
2342 Register result = x0; | 2339 Register result = x0; |
2343 Register rhs = x0; | 2340 Register rhs = x0; |
2344 Register lhs = x1; | 2341 Register lhs = x1; |
2345 FPRegister rhs_d = d0; | 2342 VRegister rhs_d = d0; |
2346 FPRegister lhs_d = d1; | 2343 VRegister lhs_d = d1; |
2347 | 2344 |
2348 if (left() == CompareICState::SMI) { | 2345 if (left() == CompareICState::SMI) { |
2349 __ JumpIfNotSmi(lhs, &miss); | 2346 __ JumpIfNotSmi(lhs, &miss); |
2350 } | 2347 } |
2351 if (right() == CompareICState::SMI) { | 2348 if (right() == CompareICState::SMI) { |
2352 __ JumpIfNotSmi(rhs, &miss); | 2349 __ JumpIfNotSmi(rhs, &miss); |
2353 } | 2350 } |
2354 | 2351 |
2355 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); | 2352 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); |
2356 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); | 2353 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); |
(...skipping 2198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4555 kStackUnwindSpace, NULL, spill_offset, | 4552 kStackUnwindSpace, NULL, spill_offset, |
4556 return_value_operand, NULL); | 4553 return_value_operand, NULL); |
4557 } | 4554 } |
4558 | 4555 |
4559 #undef __ | 4556 #undef __ |
4560 | 4557 |
4561 } // namespace internal | 4558 } // namespace internal |
4562 } // namespace v8 | 4559 } // namespace v8 |
4563 | 4560 |
4564 #endif // V8_TARGET_ARCH_ARM64 | 4561 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |