OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
11 #include "src/codegen.h" | 11 #include "src/codegen.h" |
12 #include "src/regexp-macro-assembler.h" | 12 #include "src/regexp-macro-assembler.h" |
13 #include "src/stub-cache.h" | 13 #include "src/stub-cache.h" |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
16 namespace internal { | 16 namespace internal { |
17 | 17 |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
323 Representation representations[] = { | 323 Representation representations[] = { |
324 Representation::Tagged(), // context | 324 Representation::Tagged(), // context |
325 Representation::Tagged(), // receiver | 325 Representation::Tagged(), // receiver |
326 }; | 326 }; |
327 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 327 descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); |
328 } | 328 } |
329 { | 329 { |
330 CallInterfaceDescriptor* descriptor = | 330 CallInterfaceDescriptor* descriptor = |
331 isolate->call_descriptor(Isolate::ApiFunctionCall); | 331 isolate->call_descriptor(Isolate::ApiFunctionCall); |
332 Register registers[] = { a0, // callee | 332 Register registers[] = { a0, // callee |
333 t0, // call_data | 333 a4, // call_data |
334 a2, // holder | 334 a2, // holder |
335 a1, // api_function_address | 335 a1, // api_function_address |
336 cp, // context | 336 cp, // context |
337 }; | 337 }; |
338 Representation representations[] = { | 338 Representation representations[] = { |
339 Representation::Tagged(), // callee | 339 Representation::Tagged(), // callee |
340 Representation::Tagged(), // call_data | 340 Representation::Tagged(), // call_data |
341 Representation::Tagged(), // holder | 341 Representation::Tagged(), // holder |
342 Representation::External(), // api_function_address | 342 Representation::External(), // api_function_address |
343 Representation::Tagged(), // context | 343 Representation::Tagged(), // context |
(...skipping 25 matching lines...) Expand all Loading... |
369 isolate()->counters()->code_stubs()->Increment(); | 369 isolate()->counters()->code_stubs()->Increment(); |
370 | 370 |
371 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); | 371 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); |
372 int param_count = descriptor->register_param_count(); | 372 int param_count = descriptor->register_param_count(); |
373 { | 373 { |
374 // Call the runtime system in a fresh internal frame. | 374 // Call the runtime system in a fresh internal frame. |
375 FrameScope scope(masm, StackFrame::INTERNAL); | 375 FrameScope scope(masm, StackFrame::INTERNAL); |
376 ASSERT(descriptor->register_param_count() == 0 || | 376 ASSERT(descriptor->register_param_count() == 0 || |
377 a0.is(descriptor->GetParameterRegister(param_count - 1))); | 377 a0.is(descriptor->GetParameterRegister(param_count - 1))); |
378 // Push arguments, adjust sp. | 378 // Push arguments, adjust sp. |
379 __ Subu(sp, sp, Operand(param_count * kPointerSize)); | 379 __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); |
380 for (int i = 0; i < param_count; ++i) { | 380 for (int i = 0; i < param_count; ++i) { |
381 // Store argument to stack. | 381 // Store argument to stack. |
382 __ sw(descriptor->GetParameterRegister(i), | 382 __ sd(descriptor->GetParameterRegister(i), |
383 MemOperand(sp, (param_count-1-i) * kPointerSize)); | 383 MemOperand(sp, (param_count-1-i) * kPointerSize)); |
384 } | 384 } |
385 ExternalReference miss = descriptor->miss_handler(); | 385 ExternalReference miss = descriptor->miss_handler(); |
386 __ CallExternalReference(miss, descriptor->register_param_count()); | 386 __ CallExternalReference(miss, descriptor->register_param_count()); |
387 } | 387 } |
388 | 388 |
389 __ Ret(); | 389 __ Ret(); |
390 } | 390 } |
391 | 391 |
392 | 392 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
425 (result2_.code() << 4) + | 425 (result2_.code() << 4) + |
426 (source_.code() << 8) + | 426 (source_.code() << 8) + |
427 (zeros_.code() << 12); | 427 (zeros_.code() << 12); |
428 } | 428 } |
429 | 429 |
430 void Generate(MacroAssembler* masm); | 430 void Generate(MacroAssembler* masm); |
431 }; | 431 }; |
432 | 432 |
433 | 433 |
434 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 434 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
435 Register exponent, mantissa; | 435 #ifndef BIG_ENDIAN_FLOATING_POINT |
436 if (kArchEndian == kLittle) { | 436 Register exponent = result1_; |
437 exponent = result1_; | 437 Register mantissa = result2_; |
438 mantissa = result2_; | 438 #else |
439 } else { | 439 Register exponent = result2_; |
440 exponent = result2_; | 440 Register mantissa = result1_; |
441 mantissa = result1_; | 441 #endif |
442 } | |
443 Label not_special; | 442 Label not_special; |
444 // Convert from Smi to integer. | 443 // Convert from Smi to integer. |
445 __ sra(source_, source_, kSmiTagSize); | 444 __ SmiUntag(source_); |
446 // Move sign bit from source to destination. This works because the sign bit | 445 // Move sign bit from source to destination. This works because the sign bit |
447 // in the exponent word of the double has the same position and polarity as | 446 // in the exponent word of the double has the same position and polarity as |
448 // the 2's complement sign bit in a Smi. | 447 // the 2's complement sign bit in a Smi. |
449 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 448 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
450 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); | 449 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); |
451 // Subtract from 0 if source was negative. | 450 // Subtract from 0 if source was negative. |
452 __ subu(at, zero_reg, source_); | 451 __ subu(at, zero_reg, source_); |
453 __ Movn(source_, at, exponent); | 452 __ Movn(source_, at, exponent); |
454 | 453 |
455 // We have -1, 0 or 1, which we treat specially. Register source_ contains | 454 // We have -1, 0 or 1, which we treat specially. Register source_ contains |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
503 | 502 |
504 Register scratch = | 503 Register scratch = |
505 GetRegisterThatIsNotOneOf(input_reg, result_reg); | 504 GetRegisterThatIsNotOneOf(input_reg, result_reg); |
506 Register scratch2 = | 505 Register scratch2 = |
507 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 506 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); |
508 Register scratch3 = | 507 Register scratch3 = |
509 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); | 508 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); |
510 DoubleRegister double_scratch = kLithiumScratchDouble; | 509 DoubleRegister double_scratch = kLithiumScratchDouble; |
511 | 510 |
512 __ Push(scratch, scratch2, scratch3); | 511 __ Push(scratch, scratch2, scratch3); |
513 | |
514 if (!skip_fastpath()) { | 512 if (!skip_fastpath()) { |
515 // Load double input. | 513 // Load double input. |
516 __ ldc1(double_scratch, MemOperand(input_reg, double_offset)); | 514 __ ldc1(double_scratch, MemOperand(input_reg, double_offset)); |
517 | 515 |
518 // Clear cumulative exception flags and save the FCSR. | 516 // Clear cumulative exception flags and save the FCSR. |
519 __ cfc1(scratch2, FCSR); | 517 __ cfc1(scratch2, FCSR); |
520 __ ctc1(zero_reg, FCSR); | 518 __ ctc1(zero_reg, FCSR); |
521 | 519 |
522 // Try a conversion to a signed integer. | 520 // Try a conversion to a signed integer. |
523 __ Trunc_w_d(double_scratch, double_scratch); | 521 __ Trunc_w_d(double_scratch, double_scratch); |
(...skipping 14 matching lines...) Expand all Loading... |
538 __ Branch(&error, ne, scratch, Operand(zero_reg)); | 536 __ Branch(&error, ne, scratch, Operand(zero_reg)); |
539 __ Move(result_reg, scratch3); | 537 __ Move(result_reg, scratch3); |
540 __ Branch(&done); | 538 __ Branch(&done); |
541 __ bind(&error); | 539 __ bind(&error); |
542 } | 540 } |
543 | 541 |
544 // Load the double value and perform a manual truncation. | 542 // Load the double value and perform a manual truncation. |
545 Register input_high = scratch2; | 543 Register input_high = scratch2; |
546 Register input_low = scratch3; | 544 Register input_low = scratch3; |
547 | 545 |
548 __ lw(input_low, | 546 __ lw(input_low, MemOperand(input_reg, double_offset)); |
549 MemOperand(input_reg, double_offset + Register::kMantissaOffset)); | 547 __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize)); |
550 __ lw(input_high, | |
551 MemOperand(input_reg, double_offset + Register::kExponentOffset)); | |
552 | 548 |
553 Label normal_exponent, restore_sign; | 549 Label normal_exponent, restore_sign; |
554 // Extract the biased exponent in result. | 550 // Extract the biased exponent in result. |
555 __ Ext(result_reg, | 551 __ Ext(result_reg, |
556 input_high, | 552 input_high, |
557 HeapNumber::kExponentShift, | 553 HeapNumber::kExponentShift, |
558 HeapNumber::kExponentBits); | 554 HeapNumber::kExponentBits); |
559 | 555 |
560 // Check for Infinity and NaNs, which should return 0. | 556 // Check for Infinity and NaNs, which should return 0. |
561 __ Subu(scratch, result_reg, HeapNumber::kExponentMask); | 557 __ Subu(scratch, result_reg, HeapNumber::kExponentMask); |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
693 | 689 |
694 | 690 |
695 // Handle the case where the lhs and rhs are the same object. | 691 // Handle the case where the lhs and rhs are the same object. |
696 // Equality is almost reflexive (everything but NaN), so this is a test | 692 // Equality is almost reflexive (everything but NaN), so this is a test |
697 // for "identity and not NaN". | 693 // for "identity and not NaN". |
698 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 694 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
699 Label* slow, | 695 Label* slow, |
700 Condition cc) { | 696 Condition cc) { |
701 Label not_identical; | 697 Label not_identical; |
702 Label heap_number, return_equal; | 698 Label heap_number, return_equal; |
703 Register exp_mask_reg = t5; | 699 Register exp_mask_reg = t1; |
704 | 700 |
705 __ Branch(¬_identical, ne, a0, Operand(a1)); | 701 __ Branch(¬_identical, ne, a0, Operand(a1)); |
706 | 702 |
707 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); | 703 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); |
708 | 704 |
709 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 705 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), |
710 // so we do the second best thing - test it ourselves. | 706 // so we do the second best thing - test it ourselves. |
711 // They are both equal and they are not both Smis so both of them are not | 707 // They are both equal and they are not both Smis so both of them are not |
712 // Smis. If it's not a heap number, then return equal. | 708 // Smis. If it's not a heap number, then return equal. |
713 if (cc == less || cc == greater) { | 709 if (cc == less || cc == greater) { |
714 __ GetObjectType(a0, t4, t4); | 710 __ GetObjectType(a0, t0, t0); |
715 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 711 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
716 } else { | 712 } else { |
717 __ GetObjectType(a0, t4, t4); | 713 __ GetObjectType(a0, t0, t0); |
718 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); | 714 __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE)); |
719 // Comparing JS objects with <=, >= is complicated. | 715 // Comparing JS objects with <=, >= is complicated. |
720 if (cc != eq) { | 716 if (cc != eq) { |
721 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 717 __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
722 // Normally here we fall through to return_equal, but undefined is | 718 // Normally here we fall through to return_equal, but undefined is |
723 // special: (undefined == undefined) == true, but | 719 // special: (undefined == undefined) == true, but |
724 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 720 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
725 if (cc == less_equal || cc == greater_equal) { | 721 if (cc == less_equal || cc == greater_equal) { |
726 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); | 722 __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE)); |
727 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 723 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
728 __ Branch(&return_equal, ne, a0, Operand(t2)); | 724 __ Branch(&return_equal, ne, a0, Operand(a6)); |
729 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 725 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
730 __ Ret(USE_DELAY_SLOT); | 726 __ Ret(USE_DELAY_SLOT); |
731 if (cc == le) { | 727 if (cc == le) { |
732 // undefined <= undefined should fail. | 728 // undefined <= undefined should fail. |
733 __ li(v0, Operand(GREATER)); | 729 __ li(v0, Operand(GREATER)); |
734 } else { | 730 } else { |
735 // undefined >= undefined should fail. | 731 // undefined >= undefined should fail. |
736 __ li(v0, Operand(LESS)); | 732 __ li(v0, Operand(LESS)); |
737 } | 733 } |
738 } | 734 } |
739 } | 735 } |
740 } | 736 } |
741 | 737 |
742 __ bind(&return_equal); | 738 __ bind(&return_equal); |
743 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 739 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
744 __ Ret(USE_DELAY_SLOT); | 740 __ Ret(USE_DELAY_SLOT); |
745 if (cc == less) { | 741 if (cc == less) { |
746 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | 742 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
747 } else if (cc == greater) { | 743 } else if (cc == greater) { |
748 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | 744 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
749 } else { | 745 } else { |
750 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. | 746 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
751 } | 747 } |
752 | |
753 // For less and greater we don't have to check for NaN since the result of | 748 // For less and greater we don't have to check for NaN since the result of |
754 // x < x is false regardless. For the others here is some code to check | 749 // x < x is false regardless. For the others here is some code to check |
755 // for NaN. | 750 // for NaN. |
756 if (cc != lt && cc != gt) { | 751 if (cc != lt && cc != gt) { |
757 __ bind(&heap_number); | 752 __ bind(&heap_number); |
758 // It is a heap number, so return non-equal if it's NaN and equal if it's | 753 // It is a heap number, so return non-equal if it's NaN and equal if it's |
759 // not NaN. | 754 // not NaN. |
760 | 755 |
761 // The representation of NaN values has all exponent bits (52..62) set, | 756 // The representation of NaN values has all exponent bits (52..62) set, |
762 // and not all mantissa bits (0..51) clear. | 757 // and not all mantissa bits (0..51) clear. |
763 // Read top bits of double representation (second word of value). | 758 // Read top bits of double representation (second word of value). |
764 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 759 __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
765 // Test that exponent bits are all set. | 760 // Test that exponent bits are all set. |
766 __ And(t3, t2, Operand(exp_mask_reg)); | 761 __ And(a7, a6, Operand(exp_mask_reg)); |
767 // If all bits not set (ne cond), then not a NaN, objects are equal. | 762 // If all bits not set (ne cond), then not a NaN, objects are equal. |
768 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); | 763 __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg)); |
769 | 764 |
770 // Shift out flag and all exponent bits, retaining only mantissa. | 765 // Shift out flag and all exponent bits, retaining only mantissa. |
771 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); | 766 __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord); |
772 // Or with all low-bits of mantissa. | 767 // Or with all low-bits of mantissa. |
773 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 768 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
774 __ Or(v0, t3, Operand(t2)); | 769 __ Or(v0, a7, Operand(a6)); |
775 // For equal we already have the right value in v0: Return zero (equal) | 770 // For equal we already have the right value in v0: Return zero (equal) |
776 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 771 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
777 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 772 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
778 // value if it's a NaN. | 773 // value if it's a NaN. |
779 if (cc != eq) { | 774 if (cc != eq) { |
780 // All-zero means Infinity means equal. | 775 // All-zero means Infinity means equal. |
781 __ Ret(eq, v0, Operand(zero_reg)); | 776 __ Ret(eq, v0, Operand(zero_reg)); |
782 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 777 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
783 __ Ret(USE_DELAY_SLOT); | 778 __ Ret(USE_DELAY_SLOT); |
784 if (cc == le) { | 779 if (cc == le) { |
(...skipping 15 matching lines...) Expand all Loading... |
800 Label* both_loaded_as_doubles, | 795 Label* both_loaded_as_doubles, |
801 Label* slow, | 796 Label* slow, |
802 bool strict) { | 797 bool strict) { |
803 ASSERT((lhs.is(a0) && rhs.is(a1)) || | 798 ASSERT((lhs.is(a0) && rhs.is(a1)) || |
804 (lhs.is(a1) && rhs.is(a0))); | 799 (lhs.is(a1) && rhs.is(a0))); |
805 | 800 |
806 Label lhs_is_smi; | 801 Label lhs_is_smi; |
807 __ JumpIfSmi(lhs, &lhs_is_smi); | 802 __ JumpIfSmi(lhs, &lhs_is_smi); |
808 // Rhs is a Smi. | 803 // Rhs is a Smi. |
809 // Check whether the non-smi is a heap number. | 804 // Check whether the non-smi is a heap number. |
810 __ GetObjectType(lhs, t4, t4); | 805 __ GetObjectType(lhs, t0, t0); |
811 if (strict) { | 806 if (strict) { |
812 // If lhs was not a number and rhs was a Smi then strict equality cannot | 807 // If lhs was not a number and rhs was a Smi then strict equality cannot |
813 // succeed. Return non-equal (lhs is already not zero). | 808 // succeed. Return non-equal (lhs is already not zero). |
814 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 809 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
815 __ mov(v0, lhs); | 810 __ mov(v0, lhs); |
816 } else { | 811 } else { |
817 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 812 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
818 // the runtime. | 813 // the runtime. |
819 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 814 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
820 } | 815 } |
821 | |
822 // Rhs is a smi, lhs is a number. | 816 // Rhs is a smi, lhs is a number. |
823 // Convert smi rhs to double. | 817 // Convert smi rhs to double. |
824 __ sra(at, rhs, kSmiTagSize); | 818 __ SmiUntag(at, rhs); |
825 __ mtc1(at, f14); | 819 __ mtc1(at, f14); |
826 __ cvt_d_w(f14, f14); | 820 __ cvt_d_w(f14, f14); |
827 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 821 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
828 | 822 |
829 // We now have both loaded as doubles. | 823 // We now have both loaded as doubles. |
830 __ jmp(both_loaded_as_doubles); | 824 __ jmp(both_loaded_as_doubles); |
831 | 825 |
832 __ bind(&lhs_is_smi); | 826 __ bind(&lhs_is_smi); |
833 // Lhs is a Smi. Check whether the non-smi is a heap number. | 827 // Lhs is a Smi. Check whether the non-smi is a heap number. |
834 __ GetObjectType(rhs, t4, t4); | 828 __ GetObjectType(rhs, t0, t0); |
835 if (strict) { | 829 if (strict) { |
836 // If lhs was not a number and rhs was a Smi then strict equality cannot | 830 // If lhs was not a number and rhs was a Smi then strict equality cannot |
837 // succeed. Return non-equal. | 831 // succeed. Return non-equal. |
838 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 832 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
839 __ li(v0, Operand(1)); | 833 __ li(v0, Operand(1)); |
840 } else { | 834 } else { |
841 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 835 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
842 // the runtime. | 836 // the runtime. |
843 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); | 837 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
844 } | 838 } |
845 | 839 |
846 // Lhs is a smi, rhs is a number. | 840 // Lhs is a smi, rhs is a number. |
847 // Convert smi lhs to double. | 841 // Convert smi lhs to double. |
848 __ sra(at, lhs, kSmiTagSize); | 842 __ SmiUntag(at, lhs); |
849 __ mtc1(at, f12); | 843 __ mtc1(at, f12); |
850 __ cvt_d_w(f12, f12); | 844 __ cvt_d_w(f12, f12); |
851 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 845 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
852 // Fall through to both_loaded_as_doubles. | 846 // Fall through to both_loaded_as_doubles. |
853 } | 847 } |
854 | 848 |
855 | 849 |
856 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 850 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
857 Register lhs, | 851 Register lhs, |
858 Register rhs) { | 852 Register rhs) { |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
892 | 886 |
893 | 887 |
894 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 888 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
895 Register lhs, | 889 Register lhs, |
896 Register rhs, | 890 Register rhs, |
897 Label* both_loaded_as_doubles, | 891 Label* both_loaded_as_doubles, |
898 Label* not_heap_numbers, | 892 Label* not_heap_numbers, |
899 Label* slow) { | 893 Label* slow) { |
900 __ GetObjectType(lhs, a3, a2); | 894 __ GetObjectType(lhs, a3, a2); |
901 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 895 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
902 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 896 __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
903 // If first was a heap number & second wasn't, go to slow case. | 897 // If first was a heap number & second wasn't, go to slow case. |
904 __ Branch(slow, ne, a3, Operand(a2)); | 898 __ Branch(slow, ne, a3, Operand(a2)); |
905 | 899 |
906 // Both are heap numbers. Load them up then jump to the code we have | 900 // Both are heap numbers. Load them up then jump to the code we have |
907 // for that. | 901 // for that. |
908 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 902 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
909 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 903 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
910 | 904 |
911 __ jmp(both_loaded_as_doubles); | 905 __ jmp(both_loaded_as_doubles); |
912 } | 906 } |
(...skipping 26 matching lines...) Expand all Loading... |
939 __ li(v0, Operand(1)); // Non-zero indicates not equal. | 933 __ li(v0, Operand(1)); // Non-zero indicates not equal. |
940 | 934 |
941 __ bind(&object_test); | 935 __ bind(&object_test); |
942 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); | 936 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
943 __ GetObjectType(rhs, a2, a3); | 937 __ GetObjectType(rhs, a2, a3); |
944 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); | 938 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); |
945 | 939 |
946 // If both objects are undetectable, they are equal. Otherwise, they | 940 // If both objects are undetectable, they are equal. Otherwise, they |
947 // are not equal, since they are different objects and an object is not | 941 // are not equal, since they are different objects and an object is not |
948 // equal to undefined. | 942 // equal to undefined. |
949 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 943 __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
950 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); | 944 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); |
951 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); | 945 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); |
952 __ and_(a0, a2, a3); | 946 __ and_(a0, a2, a3); |
953 __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); | 947 __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); |
954 __ Ret(USE_DELAY_SLOT); | 948 __ Ret(USE_DELAY_SLOT); |
955 __ xori(v0, a0, 1 << Map::kIsUndetectable); | 949 __ xori(v0, a0, 1 << Map::kIsUndetectable); |
956 } | 950 } |
957 | 951 |
958 | 952 |
959 static void ICCompareStub_CheckInputType(MacroAssembler* masm, | 953 static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
(...skipping 26 matching lines...) Expand all Loading... |
986 Label miss; | 980 Label miss; |
987 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss); | 981 ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss); |
988 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss); | 982 ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss); |
989 | 983 |
990 Label slow; // Call builtin. | 984 Label slow; // Call builtin. |
991 Label not_smis, both_loaded_as_doubles; | 985 Label not_smis, both_loaded_as_doubles; |
992 | 986 |
993 Label not_two_smis, smi_done; | 987 Label not_two_smis, smi_done; |
994 __ Or(a2, a1, a0); | 988 __ Or(a2, a1, a0); |
995 __ JumpIfNotSmi(a2, ¬_two_smis); | 989 __ JumpIfNotSmi(a2, ¬_two_smis); |
996 __ sra(a1, a1, 1); | 990 __ SmiUntag(a1); |
997 __ sra(a0, a0, 1); | 991 __ SmiUntag(a0); |
| 992 |
998 __ Ret(USE_DELAY_SLOT); | 993 __ Ret(USE_DELAY_SLOT); |
999 __ subu(v0, a1, a0); | 994 __ dsubu(v0, a1, a0); |
1000 __ bind(¬_two_smis); | 995 __ bind(¬_two_smis); |
1001 | 996 |
1002 // NOTICE! This code is only reached after a smi-fast-case check, so | 997 // NOTICE! This code is only reached after a smi-fast-case check, so |
1003 // it is certain that at least one operand isn't a smi. | 998 // it is certain that at least one operand isn't a smi. |
1004 | 999 |
1005 // Handle the case where the objects are identical. Either returns the answer | 1000 // Handle the case where the objects are identical. Either returns the answer |
1006 // or goes to slow. Only falls through if the objects were not identical. | 1001 // or goes to slow. Only falls through if the objects were not identical. |
1007 EmitIdenticalObjectComparison(masm, &slow, cc); | 1002 EmitIdenticalObjectComparison(masm, &slow, cc); |
1008 | 1003 |
1009 // If either is a Smi (we know that not both are), then they can only | 1004 // If either is a Smi (we know that not both are), then they can only |
1010 // be strictly equal if the other is a HeapNumber. | 1005 // be strictly equal if the other is a HeapNumber. |
1011 STATIC_ASSERT(kSmiTag == 0); | 1006 STATIC_ASSERT(kSmiTag == 0); |
1012 ASSERT_EQ(0, Smi::FromInt(0)); | 1007 ASSERT_EQ(0, Smi::FromInt(0)); |
1013 __ And(t2, lhs, Operand(rhs)); | 1008 __ And(a6, lhs, Operand(rhs)); |
1014 __ JumpIfNotSmi(t2, ¬_smis, t0); | 1009 __ JumpIfNotSmi(a6, ¬_smis, a4); |
1015 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 1010 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
1016 // 1) Return the answer. | 1011 // 1) Return the answer. |
1017 // 2) Go to slow. | 1012 // 2) Go to slow. |
1018 // 3) Fall through to both_loaded_as_doubles. | 1013 // 3) Fall through to both_loaded_as_doubles. |
1019 // 4) Jump to rhs_not_nan. | 1014 // 4) Jump to rhs_not_nan. |
1020 // In cases 3 and 4 we have found out we were dealing with a number-number | 1015 // In cases 3 and 4 we have found out we were dealing with a number-number |
1021 // comparison and the numbers have been loaded into f12 and f14 as doubles, | 1016 // comparison and the numbers have been loaded into f12 and f14 as doubles, |
1022 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. | 1017 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
1023 EmitSmiNonsmiComparison(masm, lhs, rhs, | 1018 EmitSmiNonsmiComparison(masm, lhs, rhs, |
1024 &both_loaded_as_doubles, &slow, strict()); | 1019 &both_loaded_as_doubles, &slow, strict()); |
1025 | 1020 |
1026 __ bind(&both_loaded_as_doubles); | 1021 __ bind(&both_loaded_as_doubles); |
1027 // f12, f14 are the double representations of the left hand side | 1022 // f12, f14 are the double representations of the left hand side |
1028 // and the right hand side if we have FPU. Otherwise a2, a3 represent | 1023 // and the right hand side if we have FPU. Otherwise a2, a3 represent |
1029 // left hand side and a0, a1 represent right hand side. | 1024 // left hand side and a0, a1 represent right hand side. |
| 1025 |
1030 Label nan; | 1026 Label nan; |
1031 __ li(t0, Operand(LESS)); | 1027 __ li(a4, Operand(LESS)); |
1032 __ li(t1, Operand(GREATER)); | 1028 __ li(a5, Operand(GREATER)); |
1033 __ li(t2, Operand(EQUAL)); | 1029 __ li(a6, Operand(EQUAL)); |
1034 | 1030 |
1035 // Check if either rhs or lhs is NaN. | 1031 // Check if either rhs or lhs is NaN. |
1036 __ BranchF(NULL, &nan, eq, f12, f14); | 1032 __ BranchF(NULL, &nan, eq, f12, f14); |
1037 | 1033 |
1038 // Check if LESS condition is satisfied. If true, move conditionally | 1034 // Check if LESS condition is satisfied. If true, move conditionally |
1039 // result to v0. | 1035 // result to v0. |
1040 __ c(OLT, D, f12, f14); | 1036 __ c(OLT, D, f12, f14); |
1041 __ Movt(v0, t0); | 1037 __ Movt(v0, a4); |
1042 // Use previous check to store conditionally to v0 oposite condition | 1038 // Use previous check to store conditionally to v0 oposite condition |
1043 // (GREATER). If rhs is equal to lhs, this will be corrected in next | 1039 // (GREATER). If rhs is equal to lhs, this will be corrected in next |
1044 // check. | 1040 // check. |
1045 __ Movf(v0, t1); | 1041 __ Movf(v0, a5); |
1046 // Check if EQUAL condition is satisfied. If true, move conditionally | 1042 // Check if EQUAL condition is satisfied. If true, move conditionally |
1047 // result to v0. | 1043 // result to v0. |
1048 __ c(EQ, D, f12, f14); | 1044 __ c(EQ, D, f12, f14); |
1049 __ Movt(v0, t2); | 1045 __ Movt(v0, a6); |
1050 | 1046 |
1051 __ Ret(); | 1047 __ Ret(); |
1052 | 1048 |
1053 __ bind(&nan); | 1049 __ bind(&nan); |
1054 // NaN comparisons always fail. | 1050 // NaN comparisons always fail. |
1055 // Load whatever we need in v0 to make the comparison fail. | 1051 // Load whatever we need in v0 to make the comparison fail. |
1056 ASSERT(is_int16(GREATER) && is_int16(LESS)); | 1052 ASSERT(is_int16(GREATER) && is_int16(LESS)); |
1057 __ Ret(USE_DELAY_SLOT); | 1053 __ Ret(USE_DELAY_SLOT); |
1058 if (cc == lt || cc == le) { | 1054 if (cc == lt || cc == le) { |
1059 __ li(v0, Operand(GREATER)); | 1055 __ li(v0, Operand(GREATER)); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1102 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); | 1098 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); |
1103 | 1099 |
1104 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2, | 1100 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2, |
1105 a3); | 1101 a3); |
1106 if (cc == eq) { | 1102 if (cc == eq) { |
1107 StringCompareStub::GenerateFlatAsciiStringEquals(masm, | 1103 StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
1108 lhs, | 1104 lhs, |
1109 rhs, | 1105 rhs, |
1110 a2, | 1106 a2, |
1111 a3, | 1107 a3, |
1112 t0); | 1108 a4); |
1113 } else { | 1109 } else { |
1114 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 1110 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
1115 lhs, | 1111 lhs, |
1116 rhs, | 1112 rhs, |
1117 a2, | 1113 a2, |
1118 a3, | 1114 a3, |
1119 t0, | 1115 a4, |
1120 t1); | 1116 a5); |
1121 } | 1117 } |
1122 // Never falls through to here. | 1118 // Never falls through to here. |
1123 | 1119 |
1124 __ bind(&slow); | 1120 __ bind(&slow); |
1125 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, | 1121 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, |
1126 // a1 (rhs) second. | 1122 // a1 (rhs) second. |
1127 __ Push(lhs, rhs); | 1123 __ Push(lhs, rhs); |
1128 // Figure out which native to call and setup the arguments. | 1124 // Figure out which native to call and setup the arguments. |
1129 Builtins::JavaScript native; | 1125 Builtins::JavaScript native; |
1130 if (cc == eq) { | 1126 if (cc == eq) { |
(...skipping 28 matching lines...) Expand all Loading... |
1159 } else { | 1155 } else { |
1160 __ PushSafepointRegisters(); | 1156 __ PushSafepointRegisters(); |
1161 } | 1157 } |
1162 __ Jump(t9); | 1158 __ Jump(t9); |
1163 } | 1159 } |
1164 | 1160 |
1165 | 1161 |
1166 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { | 1162 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { |
1167 __ mov(t9, ra); | 1163 __ mov(t9, ra); |
1168 __ pop(ra); | 1164 __ pop(ra); |
1169 __ StoreToSafepointRegisterSlot(t9, t9); | |
1170 if (save_doubles_ == kSaveFPRegs) { | 1165 if (save_doubles_ == kSaveFPRegs) { |
1171 __ PopSafepointRegistersAndDoubles(); | 1166 __ PopSafepointRegistersAndDoubles(); |
1172 } else { | 1167 } else { |
1173 __ PopSafepointRegisters(); | 1168 __ PopSafepointRegisters(); |
1174 } | 1169 } |
1175 __ Jump(t9); | 1170 __ Jump(t9); |
1176 } | 1171 } |
1177 | 1172 |
1178 | 1173 |
1179 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 1174 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
(...skipping 19 matching lines...) Expand all Loading... |
1199 } | 1194 } |
1200 | 1195 |
1201 __ MultiPop(kJSCallerSaved | ra.bit()); | 1196 __ MultiPop(kJSCallerSaved | ra.bit()); |
1202 __ Ret(); | 1197 __ Ret(); |
1203 } | 1198 } |
1204 | 1199 |
1205 | 1200 |
1206 void MathPowStub::Generate(MacroAssembler* masm) { | 1201 void MathPowStub::Generate(MacroAssembler* masm) { |
1207 const Register base = a1; | 1202 const Register base = a1; |
1208 const Register exponent = a2; | 1203 const Register exponent = a2; |
1209 const Register heapnumbermap = t1; | 1204 const Register heapnumbermap = a5; |
1210 const Register heapnumber = v0; | 1205 const Register heapnumber = v0; |
1211 const DoubleRegister double_base = f2; | 1206 const DoubleRegister double_base = f2; |
1212 const DoubleRegister double_exponent = f4; | 1207 const DoubleRegister double_exponent = f4; |
1213 const DoubleRegister double_result = f0; | 1208 const DoubleRegister double_result = f0; |
1214 const DoubleRegister double_scratch = f6; | 1209 const DoubleRegister double_scratch = f6; |
1215 const FPURegister single_scratch = f8; | 1210 const FPURegister single_scratch = f8; |
1216 const Register scratch = t5; | 1211 const Register scratch = t1; |
1217 const Register scratch2 = t3; | 1212 const Register scratch2 = a7; |
1218 | 1213 |
1219 Label call_runtime, done, int_exponent; | 1214 Label call_runtime, done, int_exponent; |
1220 if (exponent_type_ == ON_STACK) { | 1215 if (exponent_type_ == ON_STACK) { |
1221 Label base_is_smi, unpack_exponent; | 1216 Label base_is_smi, unpack_exponent; |
1222 // The exponent and base are supplied as arguments on the stack. | 1217 // The exponent and base are supplied as arguments on the stack. |
1223 // This can only happen if the stub is called from non-optimized code. | 1218 // This can only happen if the stub is called from non-optimized code. |
1224 // Load input parameters from stack to double registers. | 1219 // Load input parameters from stack to double registers. |
1225 __ lw(base, MemOperand(sp, 1 * kPointerSize)); | 1220 __ ld(base, MemOperand(sp, 1 * kPointerSize)); |
1226 __ lw(exponent, MemOperand(sp, 0 * kPointerSize)); | 1221 __ ld(exponent, MemOperand(sp, 0 * kPointerSize)); |
1227 | 1222 |
1228 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 1223 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
1229 | 1224 |
1230 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | 1225 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); |
1231 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 1226 __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
1232 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 1227 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
1233 | 1228 |
1234 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 1229 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
1235 __ jmp(&unpack_exponent); | 1230 __ jmp(&unpack_exponent); |
1236 | 1231 |
1237 __ bind(&base_is_smi); | 1232 __ bind(&base_is_smi); |
1238 __ mtc1(scratch, single_scratch); | 1233 __ mtc1(scratch, single_scratch); |
1239 __ cvt_d_w(double_base, single_scratch); | 1234 __ cvt_d_w(double_base, single_scratch); |
1240 __ bind(&unpack_exponent); | 1235 __ bind(&unpack_exponent); |
1241 | 1236 |
1242 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 1237 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
1243 | 1238 |
1244 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 1239 __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
1245 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 1240 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
1246 __ ldc1(double_exponent, | 1241 __ ldc1(double_exponent, |
1247 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 1242 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
1248 } else if (exponent_type_ == TAGGED) { | 1243 } else if (exponent_type_ == TAGGED) { |
1249 // Base is already in double_base. | 1244 // Base is already in double_base. |
1250 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 1245 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
1251 | 1246 |
1252 __ ldc1(double_exponent, | 1247 __ ldc1(double_exponent, |
1253 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 1248 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
1254 } | 1249 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1341 // Exponent has previously been stored into scratch as untagged integer. | 1336 // Exponent has previously been stored into scratch as untagged integer. |
1342 __ mov(exponent, scratch); | 1337 __ mov(exponent, scratch); |
1343 } | 1338 } |
1344 | 1339 |
1345 __ mov_d(double_scratch, double_base); // Back up base. | 1340 __ mov_d(double_scratch, double_base); // Back up base. |
1346 __ Move(double_result, 1.0); | 1341 __ Move(double_result, 1.0); |
1347 | 1342 |
1348 // Get absolute value of exponent. | 1343 // Get absolute value of exponent. |
1349 Label positive_exponent; | 1344 Label positive_exponent; |
1350 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); | 1345 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); |
1351 __ Subu(scratch, zero_reg, scratch); | 1346 __ Dsubu(scratch, zero_reg, scratch); |
1352 __ bind(&positive_exponent); | 1347 __ bind(&positive_exponent); |
1353 | 1348 |
1354 Label while_true, no_carry, loop_end; | 1349 Label while_true, no_carry, loop_end; |
1355 __ bind(&while_true); | 1350 __ bind(&while_true); |
1356 | 1351 |
1357 __ And(scratch2, scratch, 1); | 1352 __ And(scratch2, scratch, 1); |
1358 | 1353 |
1359 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg)); | 1354 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg)); |
1360 __ mul_d(double_result, double_result, double_scratch); | 1355 __ mul_d(double_result, double_result, double_scratch); |
1361 __ bind(&no_carry); | 1356 __ bind(&no_carry); |
1362 | 1357 |
1363 __ sra(scratch, scratch, 1); | 1358 __ dsra(scratch, scratch, 1); |
1364 | 1359 |
1365 __ Branch(&loop_end, eq, scratch, Operand(zero_reg)); | 1360 __ Branch(&loop_end, eq, scratch, Operand(zero_reg)); |
1366 __ mul_d(double_scratch, double_scratch, double_scratch); | 1361 __ mul_d(double_scratch, double_scratch, double_scratch); |
1367 | 1362 |
1368 __ Branch(&while_true); | 1363 __ Branch(&while_true); |
1369 | 1364 |
1370 __ bind(&loop_end); | 1365 __ bind(&loop_end); |
1371 | 1366 |
1372 __ Branch(&done, ge, exponent, Operand(zero_reg)); | 1367 __ Branch(&done, ge, exponent, Operand(zero_reg)); |
1373 __ Move(double_scratch, 1.0); | 1368 __ Move(double_scratch, 1.0); |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1492 // cp: current context (C callee-saved) | 1487 // cp: current context (C callee-saved) |
1493 | 1488 |
1494 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 1489 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1495 | 1490 |
1496 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. | 1491 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. |
1497 // The reason for this is that these arguments would need to be saved anyway | 1492 // The reason for this is that these arguments would need to be saved anyway |
1498 // so it's faster to set them up directly. | 1493 // so it's faster to set them up directly. |
1499 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. | 1494 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. |
1500 | 1495 |
1501 // Compute the argv pointer in a callee-saved register. | 1496 // Compute the argv pointer in a callee-saved register. |
1502 __ Addu(s1, sp, s1); | 1497 __ Daddu(s1, sp, s1); |
1503 | 1498 |
1504 // Enter the exit frame that transitions from JavaScript to C++. | 1499 // Enter the exit frame that transitions from JavaScript to C++. |
1505 FrameScope scope(masm, StackFrame::MANUAL); | 1500 FrameScope scope(masm, StackFrame::MANUAL); |
1506 __ EnterExitFrame(save_doubles_); | 1501 __ EnterExitFrame(save_doubles_); |
1507 | 1502 |
1508 // s0: number of arguments including receiver (C callee-saved) | 1503 // s0: number of arguments including receiver (C callee-saved) |
1509 // s1: pointer to first argument (C callee-saved) | 1504 // s1: pointer to first argument (C callee-saved) |
1510 // s2: pointer to builtin function (C callee-saved) | 1505 // s2: pointer to builtin function (C callee-saved) |
1511 | 1506 |
1512 // Prepare arguments for C routine. | 1507 // Prepare arguments for C routine. |
(...skipping 19 matching lines...) Expand all Loading... |
1532 // coverage code can interfere with the proper calculation of ra. | 1527 // coverage code can interfere with the proper calculation of ra. |
1533 Label find_ra; | 1528 Label find_ra; |
1534 masm->bal(&find_ra); // bal exposes branch delay slot. | 1529 masm->bal(&find_ra); // bal exposes branch delay slot. |
1535 masm->mov(a1, s1); | 1530 masm->mov(a1, s1); |
1536 masm->bind(&find_ra); | 1531 masm->bind(&find_ra); |
1537 | 1532 |
1538 // Adjust the value in ra to point to the correct return location, 2nd | 1533 // Adjust the value in ra to point to the correct return location, 2nd |
1539 // instruction past the real call into C code (the jalr(t9)), and push it. | 1534 // instruction past the real call into C code (the jalr(t9)), and push it. |
1540 // This is the return address of the exit frame. | 1535 // This is the return address of the exit frame. |
1541 const int kNumInstructionsToJump = 5; | 1536 const int kNumInstructionsToJump = 5; |
1542 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize); | 1537 masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size); |
1543 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. | 1538 masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. |
1544 // Stack space reservation moved to the branch delay slot below. | 1539 // Stack space reservation moved to the branch delay slot below. |
1545 // Stack is still aligned. | 1540 // Stack is still aligned. |
1546 | 1541 |
1547 // Call the C routine. | 1542 // Call the C routine. |
1548 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. | 1543 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. |
1549 masm->jalr(t9); | 1544 masm->jalr(t9); |
1550 // Set up sp in the delay slot. | 1545 // Set up sp in the delay slot. |
1551 masm->addiu(sp, sp, -kCArgsSlotsSize); | 1546 masm->daddiu(sp, sp, -kCArgsSlotsSize); |
1552 // Make sure the stored 'ra' points to this position. | 1547 // Make sure the stored 'ra' points to this position. |
1553 ASSERT_EQ(kNumInstructionsToJump, | 1548 ASSERT_EQ(kNumInstructionsToJump, |
1554 masm->InstructionsGeneratedSince(&find_ra)); | 1549 masm->InstructionsGeneratedSince(&find_ra)); |
1555 } | 1550 } |
1556 | 1551 |
1557 | |
1558 // Runtime functions should not return 'the hole'. Allowing it to escape may | 1552 // Runtime functions should not return 'the hole'. Allowing it to escape may |
1559 // lead to crashes in the IC code later. | 1553 // lead to crashes in the IC code later. |
1560 if (FLAG_debug_code) { | 1554 if (FLAG_debug_code) { |
1561 Label okay; | 1555 Label okay; |
1562 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); | 1556 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
1563 __ Branch(&okay, ne, v0, Operand(t0)); | 1557 __ Branch(&okay, ne, v0, Operand(a4)); |
1564 __ stop("The hole escaped"); | 1558 __ stop("The hole escaped"); |
1565 __ bind(&okay); | 1559 __ bind(&okay); |
1566 } | 1560 } |
1567 | 1561 |
1568 // Check result for exception sentinel. | 1562 // Check result for exception sentinel. |
1569 Label exception_returned; | 1563 Label exception_returned; |
1570 __ LoadRoot(t0, Heap::kExceptionRootIndex); | 1564 __ LoadRoot(a4, Heap::kExceptionRootIndex); |
1571 __ Branch(&exception_returned, eq, t0, Operand(v0)); | 1565 __ Branch(&exception_returned, eq, a4, Operand(v0)); |
1572 | 1566 |
1573 ExternalReference pending_exception_address( | 1567 ExternalReference pending_exception_address( |
1574 Isolate::kPendingExceptionAddress, isolate()); | 1568 Isolate::kPendingExceptionAddress, isolate()); |
1575 | 1569 |
1576 // Check that there is no pending exception, otherwise we | 1570 // Check that there is no pending exception, otherwise we |
1577 // should have returned the exception sentinel. | 1571 // should have returned the exception sentinel. |
1578 if (FLAG_debug_code) { | 1572 if (FLAG_debug_code) { |
1579 Label okay; | 1573 Label okay; |
1580 __ li(a2, Operand(pending_exception_address)); | 1574 __ li(a2, Operand(pending_exception_address)); |
1581 __ lw(a2, MemOperand(a2)); | 1575 __ ld(a2, MemOperand(a2)); |
1582 __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); | 1576 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
1583 // Cannot use check here as it attempts to generate call into runtime. | 1577 // Cannot use check here as it attempts to generate call into runtime. |
1584 __ Branch(&okay, eq, t0, Operand(a2)); | 1578 __ Branch(&okay, eq, a4, Operand(a2)); |
1585 __ stop("Unexpected pending exception"); | 1579 __ stop("Unexpected pending exception"); |
1586 __ bind(&okay); | 1580 __ bind(&okay); |
1587 } | 1581 } |
1588 | 1582 |
1589 // Exit C frame and return. | 1583 // Exit C frame and return. |
1590 // v0:v1: result | 1584 // v0:v1: result |
1591 // sp: stack pointer | 1585 // sp: stack pointer |
1592 // fp: frame pointer | 1586 // fp: frame pointer |
1593 // s0: still holds argc (callee-saved). | 1587 // s0: still holds argc (callee-saved). |
1594 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN); | 1588 __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN); |
1595 | 1589 |
1596 // Handling of exception. | 1590 // Handling of exception. |
1597 __ bind(&exception_returned); | 1591 __ bind(&exception_returned); |
1598 | 1592 |
1599 // Retrieve the pending exception. | 1593 // Retrieve the pending exception. |
1600 __ li(a2, Operand(pending_exception_address)); | 1594 __ li(a2, Operand(pending_exception_address)); |
1601 __ lw(v0, MemOperand(a2)); | 1595 __ ld(v0, MemOperand(a2)); |
1602 | 1596 |
1603 // Clear the pending exception. | 1597 // Clear the pending exception. |
1604 __ li(a3, Operand(isolate()->factory()->the_hole_value())); | 1598 __ li(a3, Operand(isolate()->factory()->the_hole_value())); |
1605 __ sw(a3, MemOperand(a2)); | 1599 __ sd(a3, MemOperand(a2)); |
1606 | 1600 |
1607 // Special handling of termination exceptions which are uncatchable | 1601 // Special handling of termination exceptions which are uncatchable |
1608 // by javascript code. | 1602 // by javascript code. |
1609 Label throw_termination_exception; | 1603 Label throw_termination_exception; |
1610 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex); | 1604 __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex); |
1611 __ Branch(&throw_termination_exception, eq, v0, Operand(t0)); | 1605 __ Branch(&throw_termination_exception, eq, v0, Operand(a4)); |
1612 | 1606 |
1613 // Handle normal exception. | 1607 // Handle normal exception. |
1614 __ Throw(v0); | 1608 __ Throw(v0); |
1615 | 1609 |
1616 __ bind(&throw_termination_exception); | 1610 __ bind(&throw_termination_exception); |
1617 __ ThrowUncatchable(v0); | 1611 __ ThrowUncatchable(v0); |
1618 } | 1612 } |
1619 | 1613 |
1620 | 1614 |
1621 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 1615 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
1622 Label invoke, handler_entry, exit; | 1616 Label invoke, handler_entry, exit; |
1623 Isolate* isolate = masm->isolate(); | 1617 Isolate* isolate = masm->isolate(); |
1624 | 1618 |
| 1619 // TODO(plind): unify the ABI description here. |
1625 // Registers: | 1620 // Registers: |
1626 // a0: entry address | 1621 // a0: entry address |
1627 // a1: function | 1622 // a1: function |
1628 // a2: receiver | 1623 // a2: receiver |
1629 // a3: argc | 1624 // a3: argc |
1630 // | 1625 // a4 (a4): on mips64 |
| 1626 |
1631 // Stack: | 1627 // Stack: |
1632 // 4 args slots | 1628 // 0 arg slots on mips64 (4 args slots on mips) |
1633 // args | 1629 // args -- in a4/a4 on mips64, on stack on mips |
1634 | 1630 |
1635 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 1631 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1636 | 1632 |
1637 // Save callee saved registers on the stack. | 1633 // Save callee saved registers on the stack. |
1638 __ MultiPush(kCalleeSaved | ra.bit()); | 1634 __ MultiPush(kCalleeSaved | ra.bit()); |
1639 | 1635 |
1640 // Save callee-saved FPU registers. | 1636 // Save callee-saved FPU registers. |
1641 __ MultiPushFPU(kCalleeSavedFPU); | 1637 __ MultiPushFPU(kCalleeSavedFPU); |
1642 // Set up the reserved register for 0.0. | 1638 // Set up the reserved register for 0.0. |
1643 __ Move(kDoubleRegZero, 0.0); | 1639 __ Move(kDoubleRegZero, 0.0); |
1644 | 1640 |
1645 | |
1646 // Load argv in s0 register. | 1641 // Load argv in s0 register. |
1647 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 1642 if (kMipsAbi == kN64) { |
1648 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; | 1643 __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register. |
| 1644 } else { // Abi O32. |
| 1645 // 5th parameter on stack for O32 abi. |
| 1646 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
| 1647 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; |
| 1648 __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); |
| 1649 } |
1649 | 1650 |
1650 __ InitializeRootRegister(); | 1651 __ InitializeRootRegister(); |
1651 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); | |
1652 | 1652 |
1653 // We build an EntryFrame. | 1653 // We build an EntryFrame. |
1654 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 1654 __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
1655 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 1655 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
1656 __ li(t2, Operand(Smi::FromInt(marker))); | 1656 __ li(a6, Operand(Smi::FromInt(marker))); |
1657 __ li(t1, Operand(Smi::FromInt(marker))); | 1657 __ li(a5, Operand(Smi::FromInt(marker))); |
1658 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 1658 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate); |
1659 isolate))); | 1659 __ li(a4, Operand(c_entry_fp)); |
1660 __ lw(t0, MemOperand(t0)); | 1660 __ ld(a4, MemOperand(a4)); |
1661 __ Push(t3, t2, t1, t0); | 1661 __ Push(a7, a6, a5, a4); |
1662 // Set up frame pointer for the frame to be pushed. | 1662 // Set up frame pointer for the frame to be pushed. |
1663 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); | 1663 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); |
1664 | 1664 |
1665 // Registers: | 1665 // Registers: |
1666 // a0: entry_address | 1666 // a0: entry_address |
1667 // a1: function | 1667 // a1: function |
1668 // a2: receiver_pointer | 1668 // a2: receiver_pointer |
1669 // a3: argc | 1669 // a3: argc |
1670 // s0: argv | 1670 // s0: argv |
1671 // | 1671 // |
1672 // Stack: | 1672 // Stack: |
1673 // caller fp | | 1673 // caller fp | |
1674 // function slot | entry frame | 1674 // function slot | entry frame |
1675 // context slot | | 1675 // context slot | |
1676 // bad fp (0xff...f) | | 1676 // bad fp (0xff...f) | |
1677 // callee saved registers + ra | 1677 // callee saved registers + ra |
1678 // 4 args slots | 1678 // [ O32: 4 args slots] |
1679 // args | 1679 // args |
1680 | 1680 |
1681 // If this is the outermost JS call, set js_entry_sp value. | 1681 // If this is the outermost JS call, set js_entry_sp value. |
1682 Label non_outermost_js; | 1682 Label non_outermost_js; |
1683 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | 1683 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
1684 __ li(t1, Operand(ExternalReference(js_entry_sp))); | 1684 __ li(a5, Operand(ExternalReference(js_entry_sp))); |
1685 __ lw(t2, MemOperand(t1)); | 1685 __ ld(a6, MemOperand(a5)); |
1686 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); | 1686 __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg)); |
1687 __ sw(fp, MemOperand(t1)); | 1687 __ sd(fp, MemOperand(a5)); |
1688 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 1688 __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
1689 Label cont; | 1689 Label cont; |
1690 __ b(&cont); | 1690 __ b(&cont); |
1691 __ nop(); // Branch delay slot nop. | 1691 __ nop(); // Branch delay slot nop. |
1692 __ bind(&non_outermost_js); | 1692 __ bind(&non_outermost_js); |
1693 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); | 1693 __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); |
1694 __ bind(&cont); | 1694 __ bind(&cont); |
1695 __ push(t0); | 1695 __ push(a4); |
1696 | 1696 |
1697 // Jump to a faked try block that does the invoke, with a faked catch | 1697 // Jump to a faked try block that does the invoke, with a faked catch |
1698 // block that sets the pending exception. | 1698 // block that sets the pending exception. |
1699 __ jmp(&invoke); | 1699 __ jmp(&invoke); |
1700 __ bind(&handler_entry); | 1700 __ bind(&handler_entry); |
1701 handler_offset_ = handler_entry.pos(); | 1701 handler_offset_ = handler_entry.pos(); |
1702 // Caught exception: Store result (exception) in the pending exception | 1702 // Caught exception: Store result (exception) in the pending exception |
1703 // field in the JSEnv and return a failure sentinel. Coming in here the | 1703 // field in the JSEnv and return a failure sentinel. Coming in here the |
1704 // fp will be invalid because the PushTryHandler below sets it to 0 to | 1704 // fp will be invalid because the PushTryHandler below sets it to 0 to |
1705 // signal the existence of the JSEntry frame. | 1705 // signal the existence of the JSEntry frame. |
1706 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1706 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1707 isolate))); | 1707 isolate))); |
1708 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. | 1708 __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0. |
1709 __ LoadRoot(v0, Heap::kExceptionRootIndex); | 1709 __ LoadRoot(v0, Heap::kExceptionRootIndex); |
1710 __ b(&exit); // b exposes branch delay slot. | 1710 __ b(&exit); // b exposes branch delay slot. |
1711 __ nop(); // Branch delay slot nop. | 1711 __ nop(); // Branch delay slot nop. |
1712 | 1712 |
1713 // Invoke: Link this frame into the handler chain. There's only one | 1713 // Invoke: Link this frame into the handler chain. There's only one |
1714 // handler block in this code object, so its index is 0. | 1714 // handler block in this code object, so its index is 0. |
1715 __ bind(&invoke); | 1715 __ bind(&invoke); |
1716 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 1716 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
1717 // If an exception not caught by another handler occurs, this handler | 1717 // If an exception not caught by another handler occurs, this handler |
1718 // returns control to the code after the bal(&invoke) above, which | 1718 // returns control to the code after the bal(&invoke) above, which |
1719 // restores all kCalleeSaved registers (including cp and fp) to their | 1719 // restores all kCalleeSaved registers (including cp and fp) to their |
1720 // saved values before returning a failure to C. | 1720 // saved values before returning a failure to C. |
1721 | 1721 |
1722 // Clear any pending exceptions. | 1722 // Clear any pending exceptions. |
1723 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); | 1723 __ LoadRoot(a5, Heap::kTheHoleValueRootIndex); |
1724 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1724 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1725 isolate))); | 1725 isolate))); |
1726 __ sw(t1, MemOperand(t0)); | 1726 __ sd(a5, MemOperand(a4)); |
1727 | 1727 |
1728 // Invoke the function by calling through JS entry trampoline builtin. | 1728 // Invoke the function by calling through JS entry trampoline builtin. |
1729 // Notice that we cannot store a reference to the trampoline code directly in | 1729 // Notice that we cannot store a reference to the trampoline code directly in |
1730 // this stub, because runtime stubs are not traversed when doing GC. | 1730 // this stub, because runtime stubs are not traversed when doing GC. |
1731 | 1731 |
1732 // Registers: | 1732 // Registers: |
1733 // a0: entry_address | 1733 // a0: entry_address |
1734 // a1: function | 1734 // a1: function |
1735 // a2: receiver_pointer | 1735 // a2: receiver_pointer |
1736 // a3: argc | 1736 // a3: argc |
1737 // s0: argv | 1737 // s0: argv |
1738 // | 1738 // |
1739 // Stack: | 1739 // Stack: |
1740 // handler frame | 1740 // handler frame |
1741 // entry frame | 1741 // entry frame |
1742 // callee saved registers + ra | 1742 // callee saved registers + ra |
1743 // 4 args slots | 1743 // [ O32: 4 args slots] |
1744 // args | 1744 // args |
1745 | 1745 |
1746 if (is_construct) { | 1746 if (is_construct) { |
1747 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 1747 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
1748 isolate); | 1748 isolate); |
1749 __ li(t0, Operand(construct_entry)); | 1749 __ li(a4, Operand(construct_entry)); |
1750 } else { | 1750 } else { |
1751 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); | 1751 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); |
1752 __ li(t0, Operand(entry)); | 1752 __ li(a4, Operand(entry)); |
1753 } | 1753 } |
1754 __ lw(t9, MemOperand(t0)); // Deref address. | 1754 __ ld(t9, MemOperand(a4)); // Deref address. |
1755 | |
1756 // Call JSEntryTrampoline. | 1755 // Call JSEntryTrampoline. |
1757 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); | 1756 __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
1758 __ Call(t9); | 1757 __ Call(t9); |
1759 | 1758 |
1760 // Unlink this frame from the handler chain. | 1759 // Unlink this frame from the handler chain. |
1761 __ PopTryHandler(); | 1760 __ PopTryHandler(); |
1762 | 1761 |
1763 __ bind(&exit); // v0 holds result | 1762 __ bind(&exit); // v0 holds result |
1764 // Check if the current stack frame is marked as the outermost JS frame. | 1763 // Check if the current stack frame is marked as the outermost JS frame. |
1765 Label non_outermost_js_2; | 1764 Label non_outermost_js_2; |
1766 __ pop(t1); | 1765 __ pop(a5); |
1767 __ Branch(&non_outermost_js_2, | 1766 __ Branch(&non_outermost_js_2, |
1768 ne, | 1767 ne, |
1769 t1, | 1768 a5, |
1770 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 1769 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
1771 __ li(t1, Operand(ExternalReference(js_entry_sp))); | 1770 __ li(a5, Operand(ExternalReference(js_entry_sp))); |
1772 __ sw(zero_reg, MemOperand(t1)); | 1771 __ sd(zero_reg, MemOperand(a5)); |
1773 __ bind(&non_outermost_js_2); | 1772 __ bind(&non_outermost_js_2); |
1774 | 1773 |
1775 // Restore the top frame descriptors from the stack. | 1774 // Restore the top frame descriptors from the stack. |
1776 __ pop(t1); | 1775 __ pop(a5); |
1777 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 1776 __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
1778 isolate))); | 1777 isolate))); |
1779 __ sw(t1, MemOperand(t0)); | 1778 __ sd(a5, MemOperand(a4)); |
1780 | 1779 |
1781 // Reset the stack to the callee saved registers. | 1780 // Reset the stack to the callee saved registers. |
1782 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); | 1781 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
1783 | 1782 |
1784 // Restore callee-saved fpu registers. | 1783 // Restore callee-saved fpu registers. |
1785 __ MultiPopFPU(kCalleeSavedFPU); | 1784 __ MultiPopFPU(kCalleeSavedFPU); |
1786 | 1785 |
1787 // Restore callee saved registers from the stack. | 1786 // Restore callee saved registers from the stack. |
1788 __ MultiPop(kCalleeSaved | ra.bit()); | 1787 __ MultiPop(kCalleeSaved | ra.bit()); |
1789 // Return. | 1788 // Return. |
1790 __ Jump(ra); | 1789 __ Jump(ra); |
1791 } | 1790 } |
1792 | 1791 |
1793 | 1792 |
1794 // Uses registers a0 to t0. | 1793 // Uses registers a0 to a4. |
1795 // Expected input (depending on whether args are in registers or on the stack): | 1794 // Expected input (depending on whether args are in registers or on the stack): |
1796 // * object: a0 or at sp + 1 * kPointerSize. | 1795 // * object: a0 or at sp + 1 * kPointerSize. |
1797 // * function: a1 or at sp. | 1796 // * function: a1 or at sp. |
1798 // | 1797 // |
1799 // An inlined call site may have been generated before calling this stub. | 1798 // An inlined call site may have been generated before calling this stub. |
1800 // In this case the offset to the inline site to patch is passed on the stack, | 1799 // In this case the offset to the inline site to patch is passed on the stack, |
1801 // in the safepoint slot for register t0. | 1800 // in the safepoint slot for register a4. |
1802 void InstanceofStub::Generate(MacroAssembler* masm) { | 1801 void InstanceofStub::Generate(MacroAssembler* masm) { |
1803 // Call site inlining and patching implies arguments in registers. | 1802 // Call site inlining and patching implies arguments in registers. |
1804 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 1803 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
1805 // ReturnTrueFalse is only implemented for inlined call sites. | 1804 // ReturnTrueFalse is only implemented for inlined call sites. |
1806 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); | 1805 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
1807 | 1806 |
1808 // Fixed register usage throughout the stub: | 1807 // Fixed register usage throughout the stub: |
1809 const Register object = a0; // Object (lhs). | 1808 const Register object = a0; // Object (lhs). |
1810 Register map = a3; // Map of the object. | 1809 Register map = a3; // Map of the object. |
1811 const Register function = a1; // Function (rhs). | 1810 const Register function = a1; // Function (rhs). |
1812 const Register prototype = t0; // Prototype of the function. | 1811 const Register prototype = a4; // Prototype of the function. |
1813 const Register inline_site = t5; | 1812 const Register inline_site = t1; |
1814 const Register scratch = a2; | 1813 const Register scratch = a2; |
1815 | 1814 |
1816 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize; | 1815 const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize; |
1817 | 1816 |
1818 Label slow, loop, is_instance, is_not_instance, not_js_object; | 1817 Label slow, loop, is_instance, is_not_instance, not_js_object; |
1819 | 1818 |
1820 if (!HasArgsInRegisters()) { | 1819 if (!HasArgsInRegisters()) { |
1821 __ lw(object, MemOperand(sp, 1 * kPointerSize)); | 1820 __ ld(object, MemOperand(sp, 1 * kPointerSize)); |
1822 __ lw(function, MemOperand(sp, 0)); | 1821 __ ld(function, MemOperand(sp, 0)); |
1823 } | 1822 } |
1824 | 1823 |
1825 // Check that the left hand is a JS object and load map. | 1824 // Check that the left hand is a JS object and load map. |
1826 __ JumpIfSmi(object, ¬_js_object); | 1825 __ JumpIfSmi(object, ¬_js_object); |
1827 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 1826 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
1828 | 1827 |
1829 // If there is a call site cache don't look in the global cache, but do the | 1828 // If there is a call site cache don't look in the global cache, but do the |
1830 // real lookup and update the call site cache. | 1829 // real lookup and update the call site cache. |
1831 if (!HasCallSiteInlineCheck()) { | 1830 if (!HasCallSiteInlineCheck()) { |
1832 Label miss; | 1831 Label miss; |
(...skipping 16 matching lines...) Expand all Loading... |
1849 | 1848 |
1850 // Update the global instanceof or call site inlined cache with the current | 1849 // Update the global instanceof or call site inlined cache with the current |
1851 // map and function. The cached answer will be set when it is known below. | 1850 // map and function. The cached answer will be set when it is known below. |
1852 if (!HasCallSiteInlineCheck()) { | 1851 if (!HasCallSiteInlineCheck()) { |
1853 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1852 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
1854 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 1853 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
1855 } else { | 1854 } else { |
1856 ASSERT(HasArgsInRegisters()); | 1855 ASSERT(HasArgsInRegisters()); |
1857 // Patch the (relocated) inlined map check. | 1856 // Patch the (relocated) inlined map check. |
1858 | 1857 |
1859 // The offset was stored in t0 safepoint slot. | 1858 // The offset was stored in a4 safepoint slot. |
1860 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 1859 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). |
1861 __ LoadFromSafepointRegisterSlot(scratch, t0); | 1860 __ LoadFromSafepointRegisterSlot(scratch, a4); |
1862 __ Subu(inline_site, ra, scratch); | 1861 __ Dsubu(inline_site, ra, scratch); |
1863 // Get the map location in scratch and patch it. | 1862 // Get the map location in scratch and patch it. |
1864 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. | 1863 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. |
1865 __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset)); | 1864 __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset)); |
1866 } | 1865 } |
1867 | 1866 |
1868 // Register mapping: a3 is object map and t0 is function prototype. | 1867 // Register mapping: a3 is object map and a4 is function prototype. |
1869 // Get prototype of object into a2. | 1868 // Get prototype of object into a2. |
1870 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 1869 __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
1871 | 1870 |
1872 // We don't need map any more. Use it as a scratch register. | 1871 // We don't need map any more. Use it as a scratch register. |
1873 Register scratch2 = map; | 1872 Register scratch2 = map; |
1874 map = no_reg; | 1873 map = no_reg; |
1875 | 1874 |
1876 // Loop through the prototype chain looking for the function prototype. | 1875 // Loop through the prototype chain looking for the function prototype. |
1877 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 1876 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
1878 __ bind(&loop); | 1877 __ bind(&loop); |
1879 __ Branch(&is_instance, eq, scratch, Operand(prototype)); | 1878 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
1880 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); | 1879 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); |
1881 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1880 __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
1882 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 1881 __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
1883 __ Branch(&loop); | 1882 __ Branch(&loop); |
1884 | 1883 |
1885 __ bind(&is_instance); | 1884 __ bind(&is_instance); |
1886 ASSERT(Smi::FromInt(0) == 0); | 1885 ASSERT(Smi::FromInt(0) == 0); |
1887 if (!HasCallSiteInlineCheck()) { | 1886 if (!HasCallSiteInlineCheck()) { |
1888 __ mov(v0, zero_reg); | 1887 __ mov(v0, zero_reg); |
1889 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1888 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
1890 } else { | 1889 } else { |
1891 // Patch the call site to return true. | 1890 // Patch the call site to return true. |
1892 __ LoadRoot(v0, Heap::kTrueValueRootIndex); | 1891 __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
1893 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 1892 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
1894 // Get the boolean result location in scratch and patch it. | 1893 // Get the boolean result location in scratch and patch it. |
1895 __ PatchRelocatedValue(inline_site, scratch, v0); | 1894 __ PatchRelocatedValue(inline_site, scratch, v0); |
1896 | 1895 |
1897 if (!ReturnTrueFalseObject()) { | 1896 if (!ReturnTrueFalseObject()) { |
1898 ASSERT_EQ(Smi::FromInt(0), 0); | 1897 ASSERT_EQ(Smi::FromInt(0), 0); |
1899 __ mov(v0, zero_reg); | 1898 __ mov(v0, zero_reg); |
1900 } | 1899 } |
1901 } | 1900 } |
1902 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 1901 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
1903 | 1902 |
1904 __ bind(&is_not_instance); | 1903 __ bind(&is_not_instance); |
1905 if (!HasCallSiteInlineCheck()) { | 1904 if (!HasCallSiteInlineCheck()) { |
1906 __ li(v0, Operand(Smi::FromInt(1))); | 1905 __ li(v0, Operand(Smi::FromInt(1))); |
1907 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 1906 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
1908 } else { | 1907 } else { |
1909 // Patch the call site to return false. | 1908 // Patch the call site to return false. |
1910 __ LoadRoot(v0, Heap::kFalseValueRootIndex); | 1909 __ LoadRoot(v0, Heap::kFalseValueRootIndex); |
1911 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 1910 __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
1912 // Get the boolean result location in scratch and patch it. | 1911 // Get the boolean result location in scratch and patch it. |
1913 __ PatchRelocatedValue(inline_site, scratch, v0); | 1912 __ PatchRelocatedValue(inline_site, scratch, v0); |
1914 | 1913 |
1915 if (!ReturnTrueFalseObject()) { | 1914 if (!ReturnTrueFalseObject()) { |
1916 __ li(v0, Operand(Smi::FromInt(1))); | 1915 __ li(v0, Operand(Smi::FromInt(1))); |
1917 } | 1916 } |
1918 } | 1917 } |
1919 | 1918 |
1920 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 1919 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
1921 | 1920 |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1975 Register name = LoadIC::NameRegister(); | 1974 Register name = LoadIC::NameRegister(); |
1976 | 1975 |
1977 ASSERT(kind() == Code::LOAD_IC || | 1976 ASSERT(kind() == Code::LOAD_IC || |
1978 kind() == Code::KEYED_LOAD_IC); | 1977 kind() == Code::KEYED_LOAD_IC); |
1979 | 1978 |
1980 if (kind() == Code::KEYED_LOAD_IC) { | 1979 if (kind() == Code::KEYED_LOAD_IC) { |
1981 __ Branch(&miss, ne, name, | 1980 __ Branch(&miss, ne, name, |
1982 Operand(isolate()->factory()->prototype_string())); | 1981 Operand(isolate()->factory()->prototype_string())); |
1983 } | 1982 } |
1984 | 1983 |
1985 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss); | 1984 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, a4, &miss); |
1986 __ bind(&miss); | 1985 __ bind(&miss); |
1987 StubCompiler::TailCallBuiltin( | 1986 StubCompiler::TailCallBuiltin( |
1988 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); | 1987 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
1989 } | 1988 } |
1990 | 1989 |
1991 | 1990 |
1992 Register InstanceofStub::left() { return a0; } | 1991 Register InstanceofStub::left() { return a0; } |
1993 | 1992 |
1994 | 1993 |
1995 Register InstanceofStub::right() { return a1; } | 1994 Register InstanceofStub::right() { return a1; } |
1996 | 1995 |
1997 | 1996 |
1998 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 1997 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
1999 // The displacement is the offset of the last parameter (if any) | 1998 // The displacement is the offset of the last parameter (if any) |
2000 // relative to the frame pointer. | 1999 // relative to the frame pointer. |
2001 const int kDisplacement = | 2000 const int kDisplacement = |
2002 StandardFrameConstants::kCallerSPOffset - kPointerSize; | 2001 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
2003 | 2002 |
2004 // Check that the key is a smiGenerateReadElement. | 2003 // Check that the key is a smiGenerateReadElement. |
2005 Label slow; | 2004 Label slow; |
2006 __ JumpIfNotSmi(a1, &slow); | 2005 __ JumpIfNotSmi(a1, &slow); |
2007 | 2006 |
2008 // Check if the calling frame is an arguments adaptor frame. | 2007 // Check if the calling frame is an arguments adaptor frame. |
2009 Label adaptor; | 2008 Label adaptor; |
2010 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 2009 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2011 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | 2010 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
2012 __ Branch(&adaptor, | 2011 __ Branch(&adaptor, |
2013 eq, | 2012 eq, |
2014 a3, | 2013 a3, |
2015 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 2014 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2016 | 2015 |
2017 // Check index (a1) against formal parameters count limit passed in | 2016 // Check index (a1) against formal parameters count limit passed in |
2018 // through register a0. Use unsigned comparison to get negative | 2017 // through register a0. Use unsigned comparison to get negative |
2019 // check for free. | 2018 // check for free. |
2020 __ Branch(&slow, hs, a1, Operand(a0)); | 2019 __ Branch(&slow, hs, a1, Operand(a0)); |
2021 | 2020 |
2022 // Read the argument from the stack and return it. | 2021 // Read the argument from the stack and return it. |
2023 __ subu(a3, a0, a1); | 2022 __ dsubu(a3, a0, a1); |
2024 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 2023 __ SmiScale(a7, a3, kPointerSizeLog2); |
2025 __ Addu(a3, fp, Operand(t3)); | 2024 __ Daddu(a3, fp, Operand(a7)); |
2026 __ Ret(USE_DELAY_SLOT); | 2025 __ Ret(USE_DELAY_SLOT); |
2027 __ lw(v0, MemOperand(a3, kDisplacement)); | 2026 __ ld(v0, MemOperand(a3, kDisplacement)); |
2028 | 2027 |
2029 // Arguments adaptor case: Check index (a1) against actual arguments | 2028 // Arguments adaptor case: Check index (a1) against actual arguments |
2030 // limit found in the arguments adaptor frame. Use unsigned | 2029 // limit found in the arguments adaptor frame. Use unsigned |
2031 // comparison to get negative check for free. | 2030 // comparison to get negative check for free. |
2032 __ bind(&adaptor); | 2031 __ bind(&adaptor); |
2033 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2032 __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2034 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); | 2033 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); |
2035 | 2034 |
2036 // Read the argument from the adaptor frame and return it. | 2035 // Read the argument from the adaptor frame and return it. |
2037 __ subu(a3, a0, a1); | 2036 __ dsubu(a3, a0, a1); |
2038 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 2037 __ SmiScale(a7, a3, kPointerSizeLog2); |
2039 __ Addu(a3, a2, Operand(t3)); | 2038 __ Daddu(a3, a2, Operand(a7)); |
2040 __ Ret(USE_DELAY_SLOT); | 2039 __ Ret(USE_DELAY_SLOT); |
2041 __ lw(v0, MemOperand(a3, kDisplacement)); | 2040 __ ld(v0, MemOperand(a3, kDisplacement)); |
2042 | 2041 |
2043 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 2042 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
2044 // by calling the runtime system. | 2043 // by calling the runtime system. |
2045 __ bind(&slow); | 2044 __ bind(&slow); |
2046 __ push(a1); | 2045 __ push(a1); |
2047 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 2046 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
2048 } | 2047 } |
2049 | 2048 |
2050 | 2049 |
2051 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { | 2050 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { |
2052 // sp[0] : number of parameters | 2051 // sp[0] : number of parameters |
2053 // sp[4] : receiver displacement | 2052 // sp[4] : receiver displacement |
2054 // sp[8] : function | 2053 // sp[8] : function |
2055 // Check if the calling frame is an arguments adaptor frame. | 2054 // Check if the calling frame is an arguments adaptor frame. |
2056 Label runtime; | 2055 Label runtime; |
2057 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 2056 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2058 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); | 2057 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); |
2059 __ Branch(&runtime, | 2058 __ Branch(&runtime, |
2060 ne, | 2059 ne, |
2061 a2, | 2060 a2, |
2062 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 2061 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2063 | 2062 |
2064 // Patch the arguments.length and the parameters pointer in the current frame. | 2063 // Patch the arguments.length and the parameters pointer in the current frame. |
2065 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2064 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2066 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); | 2065 __ sd(a2, MemOperand(sp, 0 * kPointerSize)); |
2067 __ sll(t3, a2, 1); | 2066 __ SmiScale(a7, a2, kPointerSizeLog2); |
2068 __ Addu(a3, a3, Operand(t3)); | 2067 __ Daddu(a3, a3, Operand(a7)); |
2069 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset); | 2068 __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset); |
2070 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); | 2069 __ sd(a3, MemOperand(sp, 1 * kPointerSize)); |
2071 | 2070 |
2072 __ bind(&runtime); | 2071 __ bind(&runtime); |
2073 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); | 2072 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); |
2074 } | 2073 } |
2075 | 2074 |
2076 | 2075 |
2077 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | 2076 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { |
2078 // Stack layout: | 2077 // Stack layout: |
2079 // sp[0] : number of parameters (tagged) | 2078 // sp[0] : number of parameters (tagged) |
2080 // sp[4] : address of receiver argument | 2079 // sp[4] : address of receiver argument |
2081 // sp[8] : function | 2080 // sp[8] : function |
2082 // Registers used over whole function: | 2081 // Registers used over whole function: |
2083 // t2 : allocated object (tagged) | 2082 // a6 : allocated object (tagged) |
2084 // t5 : mapped parameter count (tagged) | 2083 // t1 : mapped parameter count (tagged) |
2085 | 2084 |
2086 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); | 2085 __ ld(a1, MemOperand(sp, 0 * kPointerSize)); |
2087 // a1 = parameter count (tagged) | 2086 // a1 = parameter count (tagged) |
2088 | 2087 |
2089 // Check if the calling frame is an arguments adaptor frame. | 2088 // Check if the calling frame is an arguments adaptor frame. |
2090 Label runtime; | 2089 Label runtime; |
2091 Label adaptor_frame, try_allocate; | 2090 Label adaptor_frame, try_allocate; |
2092 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 2091 __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2093 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); | 2092 __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); |
2094 __ Branch(&adaptor_frame, | 2093 __ Branch(&adaptor_frame, |
2095 eq, | 2094 eq, |
2096 a2, | 2095 a2, |
2097 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 2096 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2098 | 2097 |
2099 // No adaptor, parameter count = argument count. | 2098 // No adaptor, parameter count = argument count. |
2100 __ mov(a2, a1); | 2099 __ mov(a2, a1); |
2101 __ b(&try_allocate); | 2100 __ Branch(&try_allocate); |
2102 __ nop(); // Branch delay slot nop. | |
2103 | 2101 |
2104 // We have an adaptor frame. Patch the parameters pointer. | 2102 // We have an adaptor frame. Patch the parameters pointer. |
2105 __ bind(&adaptor_frame); | 2103 __ bind(&adaptor_frame); |
2106 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2104 __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2107 __ sll(t6, a2, 1); | 2105 __ SmiScale(t2, a2, kPointerSizeLog2); |
2108 __ Addu(a3, a3, Operand(t6)); | 2106 __ Daddu(a3, a3, Operand(t2)); |
2109 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); | 2107 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); |
2110 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); | 2108 __ sd(a3, MemOperand(sp, 1 * kPointerSize)); |
2111 | 2109 |
2112 // a1 = parameter count (tagged) | 2110 // a1 = parameter count (tagged) |
2113 // a2 = argument count (tagged) | 2111 // a2 = argument count (tagged) |
2114 // Compute the mapped parameter count = min(a1, a2) in a1. | 2112 // Compute the mapped parameter count = min(a1, a2) in a1. |
2115 Label skip_min; | 2113 Label skip_min; |
2116 __ Branch(&skip_min, lt, a1, Operand(a2)); | 2114 __ Branch(&skip_min, lt, a1, Operand(a2)); |
2117 __ mov(a1, a2); | 2115 __ mov(a1, a2); |
2118 __ bind(&skip_min); | 2116 __ bind(&skip_min); |
2119 | 2117 |
2120 __ bind(&try_allocate); | 2118 __ bind(&try_allocate); |
2121 | 2119 |
2122 // Compute the sizes of backing store, parameter map, and arguments object. | 2120 // Compute the sizes of backing store, parameter map, and arguments object. |
2123 // 1. Parameter map, has 2 extra words containing context and backing store. | 2121 // 1. Parameter map, has 2 extra words containing context and backing store. |
2124 const int kParameterMapHeaderSize = | 2122 const int kParameterMapHeaderSize = |
2125 FixedArray::kHeaderSize + 2 * kPointerSize; | 2123 FixedArray::kHeaderSize + 2 * kPointerSize; |
2126 // If there are no mapped parameters, we do not need the parameter_map. | 2124 // If there are no mapped parameters, we do not need the parameter_map. |
2127 Label param_map_size; | 2125 Label param_map_size; |
2128 ASSERT_EQ(0, Smi::FromInt(0)); | 2126 ASSERT_EQ(0, Smi::FromInt(0)); |
2129 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); | 2127 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); |
2130 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. | 2128 __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0. |
2131 __ sll(t5, a1, 1); | 2129 __ SmiScale(t1, a1, kPointerSizeLog2); |
2132 __ addiu(t5, t5, kParameterMapHeaderSize); | 2130 __ daddiu(t1, t1, kParameterMapHeaderSize); |
2133 __ bind(¶m_map_size); | 2131 __ bind(¶m_map_size); |
2134 | 2132 |
2135 // 2. Backing store. | 2133 // 2. Backing store. |
2136 __ sll(t6, a2, 1); | 2134 __ SmiScale(t2, a2, kPointerSizeLog2); |
2137 __ Addu(t5, t5, Operand(t6)); | 2135 __ Daddu(t1, t1, Operand(t2)); |
2138 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); | 2136 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize)); |
2139 | 2137 |
2140 // 3. Arguments object. | 2138 // 3. Arguments object. |
2141 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize)); | 2139 __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize)); |
2142 | 2140 |
2143 // Do the allocation of all three objects in one go. | 2141 // Do the allocation of all three objects in one go. |
2144 __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT); | 2142 __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT); |
2145 | 2143 |
2146 // v0 = address of new object(s) (tagged) | 2144 // v0 = address of new object(s) (tagged) |
2147 // a2 = argument count (smi-tagged) | 2145 // a2 = argument count (smi-tagged) |
2148 // Get the arguments boilerplate from the current native context into t0. | 2146 // Get the arguments boilerplate from the current native context into a4. |
2149 const int kNormalOffset = | 2147 const int kNormalOffset = |
2150 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); | 2148 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); |
2151 const int kAliasedOffset = | 2149 const int kAliasedOffset = |
2152 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); | 2150 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); |
2153 | 2151 |
2154 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2152 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2155 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); | 2153 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset)); |
2156 Label skip2_ne, skip2_eq; | 2154 Label skip2_ne, skip2_eq; |
2157 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); | 2155 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); |
2158 __ lw(t0, MemOperand(t0, kNormalOffset)); | 2156 __ ld(a4, MemOperand(a4, kNormalOffset)); |
2159 __ bind(&skip2_ne); | 2157 __ bind(&skip2_ne); |
2160 | 2158 |
2161 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); | 2159 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); |
2162 __ lw(t0, MemOperand(t0, kAliasedOffset)); | 2160 __ ld(a4, MemOperand(a4, kAliasedOffset)); |
2163 __ bind(&skip2_eq); | 2161 __ bind(&skip2_eq); |
2164 | 2162 |
2165 // v0 = address of new object (tagged) | 2163 // v0 = address of new object (tagged) |
2166 // a1 = mapped parameter count (tagged) | 2164 // a1 = mapped parameter count (tagged) |
2167 // a2 = argument count (smi-tagged) | 2165 // a2 = argument count (smi-tagged) |
2168 // t0 = address of arguments map (tagged) | 2166 // a4 = address of arguments map (tagged) |
2169 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); | 2167 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); |
2170 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); | 2168 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); |
2171 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); | 2169 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
2172 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); | 2170 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
2173 | 2171 |
2174 // Set up the callee in-object property. | 2172 // Set up the callee in-object property. |
2175 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | 2173 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
2176 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); | 2174 __ ld(a3, MemOperand(sp, 2 * kPointerSize)); |
2177 __ AssertNotSmi(a3); | 2175 __ AssertNotSmi(a3); |
2178 const int kCalleeOffset = JSObject::kHeaderSize + | 2176 const int kCalleeOffset = JSObject::kHeaderSize + |
2179 Heap::kArgumentsCalleeIndex * kPointerSize; | 2177 Heap::kArgumentsCalleeIndex * kPointerSize; |
2180 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); | 2178 __ sd(a3, FieldMemOperand(v0, kCalleeOffset)); |
2181 | 2179 |
2182 // Use the length (smi tagged) and set that as an in-object property too. | 2180 // Use the length (smi tagged) and set that as an in-object property too. |
2183 __ AssertSmi(a2); | |
2184 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 2181 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
2185 const int kLengthOffset = JSObject::kHeaderSize + | 2182 const int kLengthOffset = JSObject::kHeaderSize + |
2186 Heap::kArgumentsLengthIndex * kPointerSize; | 2183 Heap::kArgumentsLengthIndex * kPointerSize; |
2187 __ sw(a2, FieldMemOperand(v0, kLengthOffset)); | 2184 __ sd(a2, FieldMemOperand(v0, kLengthOffset)); |
2188 | 2185 |
2189 // Set up the elements pointer in the allocated arguments object. | 2186 // Set up the elements pointer in the allocated arguments object. |
2190 // If we allocated a parameter map, t0 will point there, otherwise | 2187 // If we allocated a parameter map, a4 will point there, otherwise |
2191 // it will point to the backing store. | 2188 // it will point to the backing store. |
2192 __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize)); | 2189 __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize)); |
2193 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); | 2190 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); |
2194 | 2191 |
2195 // v0 = address of new object (tagged) | 2192 // v0 = address of new object (tagged) |
2196 // a1 = mapped parameter count (tagged) | 2193 // a1 = mapped parameter count (tagged) |
2197 // a2 = argument count (tagged) | 2194 // a2 = argument count (tagged) |
2198 // t0 = address of parameter map or backing store (tagged) | 2195 // a4 = address of parameter map or backing store (tagged) |
2199 // Initialize parameter map. If there are no mapped arguments, we're done. | 2196 // Initialize parameter map. If there are no mapped arguments, we're done. |
2200 Label skip_parameter_map; | 2197 Label skip_parameter_map; |
2201 Label skip3; | 2198 Label skip3; |
2202 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0))); | 2199 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0))); |
2203 // Move backing store address to a3, because it is | 2200 // Move backing store address to a3, because it is |
2204 // expected there when filling in the unmapped arguments. | 2201 // expected there when filling in the unmapped arguments. |
2205 __ mov(a3, t0); | 2202 __ mov(a3, a4); |
2206 __ bind(&skip3); | 2203 __ bind(&skip3); |
2207 | 2204 |
2208 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0))); | 2205 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0))); |
2209 | 2206 |
2210 __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex); | 2207 __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex); |
2211 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset)); | 2208 __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset)); |
2212 __ Addu(t2, a1, Operand(Smi::FromInt(2))); | 2209 __ Daddu(a6, a1, Operand(Smi::FromInt(2))); |
2213 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 2210 __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset)); |
2214 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); | 2211 __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize)); |
2215 __ sll(t6, a1, 1); | 2212 __ SmiScale(t2, a1, kPointerSizeLog2); |
2216 __ Addu(t2, t0, Operand(t6)); | 2213 __ Daddu(a6, a4, Operand(t2)); |
2217 __ Addu(t2, t2, Operand(kParameterMapHeaderSize)); | 2214 __ Daddu(a6, a6, Operand(kParameterMapHeaderSize)); |
2218 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); | 2215 __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize)); |
2219 | 2216 |
2220 // Copy the parameter slots and the holes in the arguments. | 2217 // Copy the parameter slots and the holes in the arguments. |
2221 // We need to fill in mapped_parameter_count slots. They index the context, | 2218 // We need to fill in mapped_parameter_count slots. They index the context, |
2222 // where parameters are stored in reverse order, at | 2219 // where parameters are stored in reverse order, at |
2223 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 2220 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
2224 // The mapped parameter thus need to get indices | 2221 // The mapped parameter thus need to get indices |
2225 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 2222 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
2226 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 2223 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
2227 // We loop from right to left. | 2224 // We loop from right to left. |
2228 Label parameters_loop, parameters_test; | 2225 Label parameters_loop, parameters_test; |
2229 __ mov(t2, a1); | 2226 __ mov(a6, a1); |
2230 __ lw(t5, MemOperand(sp, 0 * kPointerSize)); | 2227 __ ld(t1, MemOperand(sp, 0 * kPointerSize)); |
2231 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 2228 __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
2232 __ Subu(t5, t5, Operand(a1)); | 2229 __ Dsubu(t1, t1, Operand(a1)); |
2233 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); | 2230 __ LoadRoot(a7, Heap::kTheHoleValueRootIndex); |
2234 __ sll(t6, t2, 1); | 2231 __ SmiScale(t2, a6, kPointerSizeLog2); |
2235 __ Addu(a3, t0, Operand(t6)); | 2232 __ Daddu(a3, a4, Operand(t2)); |
2236 __ Addu(a3, a3, Operand(kParameterMapHeaderSize)); | 2233 __ Daddu(a3, a3, Operand(kParameterMapHeaderSize)); |
2237 | 2234 |
2238 // t2 = loop variable (tagged) | 2235 // a6 = loop variable (tagged) |
2239 // a1 = mapping index (tagged) | 2236 // a1 = mapping index (tagged) |
2240 // a3 = address of backing store (tagged) | 2237 // a3 = address of backing store (tagged) |
2241 // t0 = address of parameter map (tagged) | 2238 // a4 = address of parameter map (tagged) |
2242 // t1 = temporary scratch (a.o., for address calculation) | 2239 // a5 = temporary scratch (a.o., for address calculation) |
2243 // t3 = the hole value | 2240 // a7 = the hole value |
2244 __ jmp(¶meters_test); | 2241 __ jmp(¶meters_test); |
2245 | 2242 |
2246 __ bind(¶meters_loop); | 2243 __ bind(¶meters_loop); |
2247 __ Subu(t2, t2, Operand(Smi::FromInt(1))); | 2244 |
2248 __ sll(t1, t2, 1); | 2245 __ Dsubu(a6, a6, Operand(Smi::FromInt(1))); |
2249 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 2246 __ SmiScale(a5, a6, kPointerSizeLog2); |
2250 __ Addu(t6, t0, t1); | 2247 __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
2251 __ sw(t5, MemOperand(t6)); | 2248 __ Daddu(t2, a4, a5); |
2252 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 2249 __ sd(t1, MemOperand(t2)); |
2253 __ Addu(t6, a3, t1); | 2250 __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
2254 __ sw(t3, MemOperand(t6)); | 2251 __ Daddu(t2, a3, a5); |
2255 __ Addu(t5, t5, Operand(Smi::FromInt(1))); | 2252 __ sd(a7, MemOperand(t2)); |
| 2253 __ Daddu(t1, t1, Operand(Smi::FromInt(1))); |
2256 __ bind(¶meters_test); | 2254 __ bind(¶meters_test); |
2257 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0))); | 2255 __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0))); |
2258 | 2256 |
2259 __ bind(&skip_parameter_map); | 2257 __ bind(&skip_parameter_map); |
2260 // a2 = argument count (tagged) | 2258 // a2 = argument count (tagged) |
2261 // a3 = address of backing store (tagged) | 2259 // a3 = address of backing store (tagged) |
2262 // t1 = scratch | 2260 // a5 = scratch |
2263 // Copy arguments header and remaining slots (if there are any). | 2261 // Copy arguments header and remaining slots (if there are any). |
2264 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); | 2262 __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex); |
2265 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset)); | 2263 __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset)); |
2266 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset)); | 2264 __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset)); |
2267 | 2265 |
2268 Label arguments_loop, arguments_test; | 2266 Label arguments_loop, arguments_test; |
2269 __ mov(t5, a1); | 2267 __ mov(t1, a1); |
2270 __ lw(t0, MemOperand(sp, 1 * kPointerSize)); | 2268 __ ld(a4, MemOperand(sp, 1 * kPointerSize)); |
2271 __ sll(t6, t5, 1); | 2269 __ SmiScale(t2, t1, kPointerSizeLog2); |
2272 __ Subu(t0, t0, Operand(t6)); | 2270 __ Dsubu(a4, a4, Operand(t2)); |
2273 __ jmp(&arguments_test); | 2271 __ jmp(&arguments_test); |
2274 | 2272 |
2275 __ bind(&arguments_loop); | 2273 __ bind(&arguments_loop); |
2276 __ Subu(t0, t0, Operand(kPointerSize)); | 2274 __ Dsubu(a4, a4, Operand(kPointerSize)); |
2277 __ lw(t2, MemOperand(t0, 0)); | 2275 __ ld(a6, MemOperand(a4, 0)); |
2278 __ sll(t6, t5, 1); | 2276 __ SmiScale(t2, t1, kPointerSizeLog2); |
2279 __ Addu(t1, a3, Operand(t6)); | 2277 __ Daddu(a5, a3, Operand(t2)); |
2280 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize)); | 2278 __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize)); |
2281 __ Addu(t5, t5, Operand(Smi::FromInt(1))); | 2279 __ Daddu(t1, t1, Operand(Smi::FromInt(1))); |
2282 | 2280 |
2283 __ bind(&arguments_test); | 2281 __ bind(&arguments_test); |
2284 __ Branch(&arguments_loop, lt, t5, Operand(a2)); | 2282 __ Branch(&arguments_loop, lt, t1, Operand(a2)); |
2285 | 2283 |
2286 // Return and remove the on-stack parameters. | 2284 // Return and remove the on-stack parameters. |
2287 __ DropAndRet(3); | 2285 __ DropAndRet(3); |
2288 | 2286 |
2289 // Do the runtime call to allocate the arguments object. | 2287 // Do the runtime call to allocate the arguments object. |
2290 // a2 = argument count (tagged) | 2288 // a2 = argument count (tagged) |
2291 __ bind(&runtime); | 2289 __ bind(&runtime); |
2292 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. | 2290 __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
2293 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); | 2291 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); |
2294 } | 2292 } |
2295 | 2293 |
2296 | 2294 |
2297 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 2295 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
2298 // sp[0] : number of parameters | 2296 // sp[0] : number of parameters |
2299 // sp[4] : receiver displacement | 2297 // sp[4] : receiver displacement |
2300 // sp[8] : function | 2298 // sp[8] : function |
2301 // Check if the calling frame is an arguments adaptor frame. | 2299 // Check if the calling frame is an arguments adaptor frame. |
2302 Label adaptor_frame, try_allocate, runtime; | 2300 Label adaptor_frame, try_allocate, runtime; |
2303 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 2301 __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2304 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | 2302 __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
2305 __ Branch(&adaptor_frame, | 2303 __ Branch(&adaptor_frame, |
2306 eq, | 2304 eq, |
2307 a3, | 2305 a3, |
2308 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 2306 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2309 | 2307 |
2310 // Get the length from the frame. | 2308 // Get the length from the frame. |
2311 __ lw(a1, MemOperand(sp, 0)); | 2309 __ ld(a1, MemOperand(sp, 0)); |
2312 __ Branch(&try_allocate); | 2310 __ Branch(&try_allocate); |
2313 | 2311 |
2314 // Patch the arguments.length and the parameters pointer. | 2312 // Patch the arguments.length and the parameters pointer. |
2315 __ bind(&adaptor_frame); | 2313 __ bind(&adaptor_frame); |
2316 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2314 __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2317 __ sw(a1, MemOperand(sp, 0)); | 2315 __ sd(a1, MemOperand(sp, 0)); |
2318 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize); | 2316 __ SmiScale(at, a1, kPointerSizeLog2); |
2319 __ Addu(a3, a2, Operand(at)); | |
2320 | 2317 |
2321 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); | 2318 __ Daddu(a3, a2, Operand(at)); |
2322 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); | 2319 |
| 2320 __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 2321 __ sd(a3, MemOperand(sp, 1 * kPointerSize)); |
2323 | 2322 |
2324 // Try the new space allocation. Start out with computing the size | 2323 // Try the new space allocation. Start out with computing the size |
2325 // of the arguments object and the elements array in words. | 2324 // of the arguments object and the elements array in words. |
2326 Label add_arguments_object; | 2325 Label add_arguments_object; |
2327 __ bind(&try_allocate); | 2326 __ bind(&try_allocate); |
2328 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); | 2327 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); |
2329 __ srl(a1, a1, kSmiTagSize); | 2328 __ SmiUntag(a1); |
2330 | 2329 |
2331 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 2330 __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
2332 __ bind(&add_arguments_object); | 2331 __ bind(&add_arguments_object); |
2333 __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); | 2332 __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); |
2334 | 2333 |
2335 // Do the allocation of both objects in one go. | 2334 // Do the allocation of both objects in one go. |
2336 __ Allocate(a1, v0, a2, a3, &runtime, | 2335 __ Allocate(a1, v0, a2, a3, &runtime, |
2337 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 2336 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
2338 | 2337 |
2339 // Get the arguments boilerplate from the current native context. | 2338 // Get the arguments boilerplate from the current native context. |
2340 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2339 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2341 __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset)); | 2340 __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset)); |
2342 __ lw(t0, MemOperand( | 2341 __ ld(a4, MemOperand(a4, Context::SlotOffset( |
2343 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX))); | 2342 Context::STRICT_ARGUMENTS_MAP_INDEX))); |
2344 | 2343 |
2345 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); | 2344 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); |
2346 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); | 2345 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); |
2347 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); | 2346 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
2348 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); | 2347 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
2349 | 2348 |
2350 // Get the length (smi tagged) and set that as an in-object property too. | 2349 // Get the length (smi tagged) and set that as an in-object property too. |
2351 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 2350 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
2352 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); | 2351 __ ld(a1, MemOperand(sp, 0 * kPointerSize)); |
2353 __ AssertSmi(a1); | 2352 __ AssertSmi(a1); |
2354 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + | 2353 __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize + |
2355 Heap::kArgumentsLengthIndex * kPointerSize)); | 2354 Heap::kArgumentsLengthIndex * kPointerSize)); |
2356 | 2355 |
2357 Label done; | 2356 Label done; |
2358 __ Branch(&done, eq, a1, Operand(zero_reg)); | 2357 __ Branch(&done, eq, a1, Operand(zero_reg)); |
2359 | 2358 |
2360 // Get the parameters pointer from the stack. | 2359 // Get the parameters pointer from the stack. |
2361 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); | 2360 __ ld(a2, MemOperand(sp, 1 * kPointerSize)); |
2362 | 2361 |
2363 // Set up the elements pointer in the allocated arguments object and | 2362 // Set up the elements pointer in the allocated arguments object and |
2364 // initialize the header in the elements fixed array. | 2363 // initialize the header in the elements fixed array. |
2365 __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize)); | 2364 __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize)); |
2366 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); | 2365 __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); |
2367 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); | 2366 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); |
2368 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); | 2367 __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset)); |
2369 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 2368 __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset)); |
2370 // Untag the length for the loop. | 2369 // Untag the length for the loop. |
2371 __ srl(a1, a1, kSmiTagSize); | 2370 __ SmiUntag(a1); |
| 2371 |
2372 | 2372 |
2373 // Copy the fixed array slots. | 2373 // Copy the fixed array slots. |
2374 Label loop; | 2374 Label loop; |
2375 // Set up t0 to point to the first array slot. | 2375 // Set up a4 to point to the first array slot. |
2376 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 2376 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
2377 __ bind(&loop); | 2377 __ bind(&loop); |
2378 // Pre-decrement a2 with kPointerSize on each iteration. | 2378 // Pre-decrement a2 with kPointerSize on each iteration. |
2379 // Pre-decrement in order to skip receiver. | 2379 // Pre-decrement in order to skip receiver. |
2380 __ Addu(a2, a2, Operand(-kPointerSize)); | 2380 __ Daddu(a2, a2, Operand(-kPointerSize)); |
2381 __ lw(a3, MemOperand(a2)); | 2381 __ ld(a3, MemOperand(a2)); |
2382 // Post-increment t0 with kPointerSize on each iteration. | 2382 // Post-increment a4 with kPointerSize on each iteration. |
2383 __ sw(a3, MemOperand(t0)); | 2383 __ sd(a3, MemOperand(a4)); |
2384 __ Addu(t0, t0, Operand(kPointerSize)); | 2384 __ Daddu(a4, a4, Operand(kPointerSize)); |
2385 __ Subu(a1, a1, Operand(1)); | 2385 __ Dsubu(a1, a1, Operand(1)); |
2386 __ Branch(&loop, ne, a1, Operand(zero_reg)); | 2386 __ Branch(&loop, ne, a1, Operand(zero_reg)); |
2387 | 2387 |
2388 // Return and remove the on-stack parameters. | 2388 // Return and remove the on-stack parameters. |
2389 __ bind(&done); | 2389 __ bind(&done); |
2390 __ DropAndRet(3); | 2390 __ DropAndRet(3); |
2391 | 2391 |
2392 // Do the runtime call to allocate the arguments object. | 2392 // Do the runtime call to allocate the arguments object. |
2393 __ bind(&runtime); | 2393 __ bind(&runtime); |
2394 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); | 2394 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); |
2395 } | 2395 } |
(...skipping 29 matching lines...) Expand all Loading... |
2425 Register regexp_data = s1; | 2425 Register regexp_data = s1; |
2426 Register last_match_info_elements = s2; | 2426 Register last_match_info_elements = s2; |
2427 | 2427 |
2428 // Ensure that a RegExp stack is allocated. | 2428 // Ensure that a RegExp stack is allocated. |
2429 ExternalReference address_of_regexp_stack_memory_address = | 2429 ExternalReference address_of_regexp_stack_memory_address = |
2430 ExternalReference::address_of_regexp_stack_memory_address( | 2430 ExternalReference::address_of_regexp_stack_memory_address( |
2431 isolate()); | 2431 isolate()); |
2432 ExternalReference address_of_regexp_stack_memory_size = | 2432 ExternalReference address_of_regexp_stack_memory_size = |
2433 ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 2433 ExternalReference::address_of_regexp_stack_memory_size(isolate()); |
2434 __ li(a0, Operand(address_of_regexp_stack_memory_size)); | 2434 __ li(a0, Operand(address_of_regexp_stack_memory_size)); |
2435 __ lw(a0, MemOperand(a0, 0)); | 2435 __ ld(a0, MemOperand(a0, 0)); |
2436 __ Branch(&runtime, eq, a0, Operand(zero_reg)); | 2436 __ Branch(&runtime, eq, a0, Operand(zero_reg)); |
2437 | 2437 |
2438 // Check that the first argument is a JSRegExp object. | 2438 // Check that the first argument is a JSRegExp object. |
2439 __ lw(a0, MemOperand(sp, kJSRegExpOffset)); | 2439 __ ld(a0, MemOperand(sp, kJSRegExpOffset)); |
2440 STATIC_ASSERT(kSmiTag == 0); | 2440 STATIC_ASSERT(kSmiTag == 0); |
2441 __ JumpIfSmi(a0, &runtime); | 2441 __ JumpIfSmi(a0, &runtime); |
2442 __ GetObjectType(a0, a1, a1); | 2442 __ GetObjectType(a0, a1, a1); |
2443 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE)); | 2443 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE)); |
2444 | 2444 |
2445 // Check that the RegExp has been compiled (data contains a fixed array). | 2445 // Check that the RegExp has been compiled (data contains a fixed array). |
2446 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset)); | 2446 __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset)); |
2447 if (FLAG_debug_code) { | 2447 if (FLAG_debug_code) { |
2448 __ SmiTst(regexp_data, t0); | 2448 __ SmiTst(regexp_data, a4); |
2449 __ Check(nz, | 2449 __ Check(nz, |
2450 kUnexpectedTypeForRegExpDataFixedArrayExpected, | 2450 kUnexpectedTypeForRegExpDataFixedArrayExpected, |
2451 t0, | 2451 a4, |
2452 Operand(zero_reg)); | 2452 Operand(zero_reg)); |
2453 __ GetObjectType(regexp_data, a0, a0); | 2453 __ GetObjectType(regexp_data, a0, a0); |
2454 __ Check(eq, | 2454 __ Check(eq, |
2455 kUnexpectedTypeForRegExpDataFixedArrayExpected, | 2455 kUnexpectedTypeForRegExpDataFixedArrayExpected, |
2456 a0, | 2456 a0, |
2457 Operand(FIXED_ARRAY_TYPE)); | 2457 Operand(FIXED_ARRAY_TYPE)); |
2458 } | 2458 } |
2459 | 2459 |
2460 // regexp_data: RegExp data (FixedArray) | 2460 // regexp_data: RegExp data (FixedArray) |
2461 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 2461 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
2462 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 2462 __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
2463 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 2463 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
2464 | 2464 |
2465 // regexp_data: RegExp data (FixedArray) | 2465 // regexp_data: RegExp data (FixedArray) |
2466 // Check that the number of captures fit in the static offsets vector buffer. | 2466 // Check that the number of captures fit in the static offsets vector buffer. |
2467 __ lw(a2, | 2467 __ ld(a2, |
2468 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 2468 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
2469 // Check (number_of_captures + 1) * 2 <= offsets vector size | 2469 // Check (number_of_captures + 1) * 2 <= offsets vector size |
2470 // Or number_of_captures * 2 <= offsets vector size - 2 | 2470 // Or number_of_captures * 2 <= offsets vector size - 2 |
| 2471 // Or number_of_captures <= offsets vector size / 2 - 1 |
2471 // Multiplying by 2 comes for free since a2 is smi-tagged. | 2472 // Multiplying by 2 comes for free since a2 is smi-tagged. |
2472 STATIC_ASSERT(kSmiTag == 0); | |
2473 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | |
2474 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | 2473 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); |
2475 __ Branch( | 2474 int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1; |
2476 &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); | 2475 __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp))); |
2477 | 2476 |
2478 // Reset offset for possibly sliced string. | 2477 // Reset offset for possibly sliced string. |
2479 __ mov(t0, zero_reg); | 2478 __ mov(t0, zero_reg); |
2480 __ lw(subject, MemOperand(sp, kSubjectOffset)); | 2479 __ ld(subject, MemOperand(sp, kSubjectOffset)); |
2481 __ JumpIfSmi(subject, &runtime); | 2480 __ JumpIfSmi(subject, &runtime); |
2482 __ mov(a3, subject); // Make a copy of the original subject string. | 2481 __ mov(a3, subject); // Make a copy of the original subject string. |
2483 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 2482 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
2484 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); | 2483 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
2485 // subject: subject string | 2484 // subject: subject string |
2486 // a3: subject string | 2485 // a3: subject string |
2487 // a0: subject string instance type | 2486 // a0: subject string instance type |
2488 // regexp_data: RegExp data (FixedArray) | 2487 // regexp_data: RegExp data (FixedArray) |
2489 // Handle subject string according to its encoding and representation: | 2488 // Handle subject string according to its encoding and representation: |
2490 // (1) Sequential string? If yes, go to (5). | 2489 // (1) Sequential string? If yes, go to (5). |
2491 // (2) Anything but sequential or cons? If yes, go to (6). | 2490 // (2) Anything but sequential or cons? If yes, go to (6). |
2492 // (3) Cons string. If the string is flat, replace subject with first string. | 2491 // (3) Cons string. If the string is flat, replace subject with first string. |
2493 // Otherwise bailout. | 2492 // Otherwise bailout. |
2494 // (4) Is subject external? If yes, go to (7). | 2493 // (4) Is subject external? If yes, go to (7). |
2495 // (5) Sequential string. Load regexp code according to encoding. | 2494 // (5) Sequential string. Load regexp code according to encoding. |
2496 // (E) Carry on. | 2495 // (E) Carry on. |
2497 /// [...] | 2496 /// [...] |
2498 | 2497 |
2499 // Deferred code at the end of the stub: | 2498 // Deferred code at the end of the stub: |
2500 // (6) Not a long external string? If yes, go to (8). | 2499 // (6) Not a long external string? If yes, go to (8). |
2501 // (7) External string. Make it, offset-wise, look like a sequential string. | 2500 // (7) External string. Make it, offset-wise, look like a sequential string. |
2502 // Go to (5). | 2501 // Go to (5). |
2503 // (8) Short external string or not a string? If yes, bail out to runtime. | 2502 // (8) Short external string or not a string? If yes, bail out to runtime. |
2504 // (9) Sliced string. Replace subject with parent. Go to (4). | 2503 // (9) Sliced string. Replace subject with parent. Go to (4). |
2505 | 2504 |
2506 Label seq_string /* 5 */, external_string /* 7 */, | 2505 Label check_underlying; // (4) |
2507 check_underlying /* 4 */, not_seq_nor_cons /* 6 */, | 2506 Label seq_string; // (5) |
2508 not_long_external /* 8 */; | 2507 Label not_seq_nor_cons; // (6) |
| 2508 Label external_string; // (7) |
| 2509 Label not_long_external; // (8) |
2509 | 2510 |
2510 // (1) Sequential string? If yes, go to (5). | 2511 // (1) Sequential string? If yes, go to (5). |
2511 __ And(a1, | 2512 __ And(a1, |
2512 a0, | 2513 a0, |
2513 Operand(kIsNotStringMask | | 2514 Operand(kIsNotStringMask | |
2514 kStringRepresentationMask | | 2515 kStringRepresentationMask | |
2515 kShortExternalStringMask)); | 2516 kShortExternalStringMask)); |
2516 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 2517 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); |
2517 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5). | 2518 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5). |
2518 | 2519 |
2519 // (2) Anything but sequential or cons? If yes, go to (6). | 2520 // (2) Anything but sequential or cons? If yes, go to (6). |
2520 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 2521 STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
2521 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | 2522 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
2522 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | 2523 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); |
2523 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | 2524 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); |
2524 // Go to (6). | 2525 // Go to (6). |
2525 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag)); | 2526 __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag)); |
2526 | 2527 |
2527 // (3) Cons string. Check that it's flat. | 2528 // (3) Cons string. Check that it's flat. |
2528 // Replace subject with first string and reload instance type. | 2529 // Replace subject with first string and reload instance type. |
2529 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 2530 __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
2530 __ LoadRoot(a1, Heap::kempty_stringRootIndex); | 2531 __ LoadRoot(a1, Heap::kempty_stringRootIndex); |
2531 __ Branch(&runtime, ne, a0, Operand(a1)); | 2532 __ Branch(&runtime, ne, a0, Operand(a1)); |
2532 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 2533 __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
2533 | 2534 |
2534 // (4) Is subject external? If yes, go to (7). | 2535 // (4) Is subject external? If yes, go to (7). |
2535 __ bind(&check_underlying); | 2536 __ bind(&check_underlying); |
2536 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 2537 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
2537 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); | 2538 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
2538 STATIC_ASSERT(kSeqStringTag == 0); | 2539 STATIC_ASSERT(kSeqStringTag == 0); |
2539 __ And(at, a0, Operand(kStringRepresentationMask)); | 2540 __ And(at, a0, Operand(kStringRepresentationMask)); |
2540 // The underlying external string is never a short external string. | 2541 // The underlying external string is never a short external string. |
2541 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); | 2542 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); |
2542 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); | 2543 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); |
2543 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7). | 2544 __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7). |
2544 | 2545 |
2545 // (5) Sequential string. Load regexp code according to encoding. | 2546 // (5) Sequential string. Load regexp code according to encoding. |
2546 __ bind(&seq_string); | 2547 __ bind(&seq_string); |
2547 // subject: sequential subject string (or look-alike, external string) | 2548 // subject: sequential subject string (or look-alike, external string) |
2548 // a3: original subject string | 2549 // a3: original subject string |
2549 // Load previous index and check range before a3 is overwritten. We have to | 2550 // Load previous index and check range before a3 is overwritten. We have to |
2550 // use a3 instead of subject here because subject might have been only made | 2551 // use a3 instead of subject here because subject might have been only made |
2551 // to look like a sequential string when it actually is an external string. | 2552 // to look like a sequential string when it actually is an external string. |
2552 __ lw(a1, MemOperand(sp, kPreviousIndexOffset)); | 2553 __ ld(a1, MemOperand(sp, kPreviousIndexOffset)); |
2553 __ JumpIfNotSmi(a1, &runtime); | 2554 __ JumpIfNotSmi(a1, &runtime); |
2554 __ lw(a3, FieldMemOperand(a3, String::kLengthOffset)); | 2555 __ ld(a3, FieldMemOperand(a3, String::kLengthOffset)); |
2555 __ Branch(&runtime, ls, a3, Operand(a1)); | 2556 __ Branch(&runtime, ls, a3, Operand(a1)); |
2556 __ sra(a1, a1, kSmiTagSize); // Untag the Smi. | 2557 __ SmiUntag(a1); |
2557 | 2558 |
2558 STATIC_ASSERT(kStringEncodingMask == 4); | 2559 STATIC_ASSERT(kStringEncodingMask == 4); |
2559 STATIC_ASSERT(kOneByteStringTag == 4); | 2560 STATIC_ASSERT(kOneByteStringTag == 4); |
2560 STATIC_ASSERT(kTwoByteStringTag == 0); | 2561 STATIC_ASSERT(kTwoByteStringTag == 0); |
2561 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. | 2562 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. |
2562 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); | 2563 __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); |
2563 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). | 2564 __ dsra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). |
2564 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); | 2565 __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); |
2565 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. | 2566 __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. |
2566 | 2567 |
2567 // (E) Carry on. String handling is done. | 2568 // (E) Carry on. String handling is done. |
2568 // t9: irregexp code | 2569 // t9: irregexp code |
2569 // Check that the irregexp code has been generated for the actual string | 2570 // Check that the irregexp code has been generated for the actual string |
2570 // encoding. If it has, the field contains a code object otherwise it contains | 2571 // encoding. If it has, the field contains a code object otherwise it contains |
2571 // a smi (code flushing support). | 2572 // a smi (code flushing support). |
2572 __ JumpIfSmi(t9, &runtime); | 2573 __ JumpIfSmi(t9, &runtime); |
2573 | 2574 |
2574 // a1: previous index | 2575 // a1: previous index |
2575 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); | 2576 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); |
2576 // t9: code | 2577 // t9: code |
2577 // subject: Subject string | 2578 // subject: Subject string |
2578 // regexp_data: RegExp data (FixedArray) | 2579 // regexp_data: RegExp data (FixedArray) |
2579 // All checks done. Now push arguments for native regexp code. | 2580 // All checks done. Now push arguments for native regexp code. |
2580 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), | 2581 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), |
2581 1, a0, a2); | 2582 1, a0, a2); |
2582 | 2583 |
2583 // Isolates: note we add an additional parameter here (isolate pointer). | 2584 // Isolates: note we add an additional parameter here (isolate pointer). |
2584 const int kRegExpExecuteArguments = 9; | 2585 const int kRegExpExecuteArguments = 9; |
2585 const int kParameterRegisters = 4; | 2586 const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4; |
2586 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 2587 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
2587 | 2588 |
2588 // Stack pointer now points to cell where return address is to be written. | 2589 // Stack pointer now points to cell where return address is to be written. |
2589 // Arguments are before that on the stack or in registers, meaning we | 2590 // Arguments are before that on the stack or in registers, meaning we |
2590 // treat the return address as argument 5. Thus every argument after that | 2591 // treat the return address as argument 5. Thus every argument after that |
2591 // needs to be shifted back by 1. Since DirectCEntryStub will handle | 2592 // needs to be shifted back by 1. Since DirectCEntryStub will handle |
2592 // allocating space for the c argument slots, we don't need to calculate | 2593 // allocating space for the c argument slots, we don't need to calculate |
2593 // that into the argument positions on the stack. This is how the stack will | 2594 // that into the argument positions on the stack. This is how the stack will |
2594 // look (sp meaning the value of sp at this moment): | 2595 // look (sp meaning the value of sp at this moment): |
2595 // [sp + 5] - Argument 9 | 2596 // Abi n64: |
2596 // [sp + 4] - Argument 8 | 2597 // [sp + 1] - Argument 9 |
2597 // [sp + 3] - Argument 7 | 2598 // [sp + 0] - saved ra |
2598 // [sp + 2] - Argument 6 | 2599 // Abi O32: |
2599 // [sp + 1] - Argument 5 | 2600 // [sp + 5] - Argument 9 |
2600 // [sp + 0] - saved ra | 2601 // [sp + 4] - Argument 8 |
| 2602 // [sp + 3] - Argument 7 |
| 2603 // [sp + 2] - Argument 6 |
| 2604 // [sp + 1] - Argument 5 |
| 2605 // [sp + 0] - saved ra |
2601 | 2606 |
2602 // Argument 9: Pass current isolate address. | 2607 if (kMipsAbi == kN64) { |
2603 // CFunctionArgumentOperand handles MIPS stack argument slots. | 2608 // Argument 9: Pass current isolate address. |
2604 __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); | 2609 __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); |
2605 __ sw(a0, MemOperand(sp, 5 * kPointerSize)); | 2610 __ sd(a0, MemOperand(sp, 1 * kPointerSize)); |
2606 | 2611 |
2607 // Argument 8: Indicate that this is a direct call from JavaScript. | 2612 // Argument 8: Indicate that this is a direct call from JavaScript. |
2608 __ li(a0, Operand(1)); | 2613 __ li(a7, Operand(1)); |
2609 __ sw(a0, MemOperand(sp, 4 * kPointerSize)); | |
2610 | 2614 |
2611 // Argument 7: Start (high end) of backtracking stack memory area. | 2615 // Argument 7: Start (high end) of backtracking stack memory area. |
2612 __ li(a0, Operand(address_of_regexp_stack_memory_address)); | 2616 __ li(a0, Operand(address_of_regexp_stack_memory_address)); |
2613 __ lw(a0, MemOperand(a0, 0)); | 2617 __ ld(a0, MemOperand(a0, 0)); |
2614 __ li(a2, Operand(address_of_regexp_stack_memory_size)); | 2618 __ li(a2, Operand(address_of_regexp_stack_memory_size)); |
2615 __ lw(a2, MemOperand(a2, 0)); | 2619 __ ld(a2, MemOperand(a2, 0)); |
2616 __ addu(a0, a0, a2); | 2620 __ daddu(a6, a0, a2); |
2617 __ sw(a0, MemOperand(sp, 3 * kPointerSize)); | |
2618 | 2621 |
2619 // Argument 6: Set the number of capture registers to zero to force global | 2622 // Argument 6: Set the number of capture registers to zero to force global |
2620 // regexps to behave as non-global. This does not affect non-global regexps. | 2623 // regexps to behave as non-global. This does not affect non-global regexps. |
2621 __ mov(a0, zero_reg); | 2624 __ mov(a5, zero_reg); |
2622 __ sw(a0, MemOperand(sp, 2 * kPointerSize)); | |
2623 | 2625 |
2624 // Argument 5: static offsets vector buffer. | 2626 // Argument 5: static offsets vector buffer. |
2625 __ li(a0, Operand( | 2627 __ li(a4, Operand( |
2626 ExternalReference::address_of_static_offsets_vector(isolate()))); | 2628 ExternalReference::address_of_static_offsets_vector(isolate()))); |
2627 __ sw(a0, MemOperand(sp, 1 * kPointerSize)); | 2629 } else { // O32. |
| 2630 ASSERT(kMipsAbi == kO32); |
| 2631 |
| 2632 // Argument 9: Pass current isolate address. |
| 2633 // CFunctionArgumentOperand handles MIPS stack argument slots. |
| 2634 __ li(a0, Operand(ExternalReference::isolate_address(isolate()))); |
| 2635 __ sd(a0, MemOperand(sp, 5 * kPointerSize)); |
| 2636 |
| 2637 // Argument 8: Indicate that this is a direct call from JavaScript. |
| 2638 __ li(a0, Operand(1)); |
| 2639 __ sd(a0, MemOperand(sp, 4 * kPointerSize)); |
| 2640 |
| 2641 // Argument 7: Start (high end) of backtracking stack memory area. |
| 2642 __ li(a0, Operand(address_of_regexp_stack_memory_address)); |
| 2643 __ ld(a0, MemOperand(a0, 0)); |
| 2644 __ li(a2, Operand(address_of_regexp_stack_memory_size)); |
| 2645 __ ld(a2, MemOperand(a2, 0)); |
| 2646 __ daddu(a0, a0, a2); |
| 2647 __ sd(a0, MemOperand(sp, 3 * kPointerSize)); |
| 2648 |
| 2649 // Argument 6: Set the number of capture registers to zero to force global |
| 2650 // regexps to behave as non-global. This does not affect non-global regexps. |
| 2651 __ mov(a0, zero_reg); |
| 2652 __ sd(a0, MemOperand(sp, 2 * kPointerSize)); |
| 2653 |
| 2654 // Argument 5: static offsets vector buffer. |
| 2655 __ li(a0, Operand( |
| 2656 ExternalReference::address_of_static_offsets_vector(isolate()))); |
| 2657 __ sd(a0, MemOperand(sp, 1 * kPointerSize)); |
| 2658 } |
2628 | 2659 |
2629 // For arguments 4 and 3 get string length, calculate start of string data | 2660 // For arguments 4 and 3 get string length, calculate start of string data |
2630 // and calculate the shift of the index (0 for ASCII and 1 for two byte). | 2661 // and calculate the shift of the index (0 for ASCII and 1 for two byte). |
2631 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 2662 __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); |
2632 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. | 2663 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. |
2633 // Load the length from the original subject string from the previous stack | 2664 // Load the length from the original subject string from the previous stack |
2634 // frame. Therefore we have to use fp, which points exactly to two pointer | 2665 // frame. Therefore we have to use fp, which points exactly to two pointer |
2635 // sizes below the previous sp. (Because creating a new stack frame pushes | 2666 // sizes below the previous sp. (Because creating a new stack frame pushes |
2636 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) | 2667 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) |
2637 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 2668 __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); |
2638 // If slice offset is not 0, load the length from the original sliced string. | 2669 // If slice offset is not 0, load the length from the original sliced string. |
2639 // Argument 4, a3: End of string data | 2670 // Argument 4, a3: End of string data |
2640 // Argument 3, a2: Start of string data | 2671 // Argument 3, a2: Start of string data |
2641 // Prepare start and end index of the input. | 2672 // Prepare start and end index of the input. |
2642 __ sllv(t1, t0, a3); | 2673 __ dsllv(t1, t0, a3); |
2643 __ addu(t0, t2, t1); | 2674 __ daddu(t0, t2, t1); |
2644 __ sllv(t1, a1, a3); | 2675 __ dsllv(t1, a1, a3); |
2645 __ addu(a2, t0, t1); | 2676 __ daddu(a2, t0, t1); |
2646 | 2677 |
2647 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset)); | 2678 __ ld(t2, FieldMemOperand(subject, String::kLengthOffset)); |
2648 __ sra(t2, t2, kSmiTagSize); | 2679 |
2649 __ sllv(t1, t2, a3); | 2680 __ SmiUntag(t2); |
2650 __ addu(a3, t0, t1); | 2681 __ dsllv(t1, t2, a3); |
| 2682 __ daddu(a3, t0, t1); |
2651 // Argument 2 (a1): Previous index. | 2683 // Argument 2 (a1): Previous index. |
2652 // Already there | 2684 // Already there |
2653 | 2685 |
2654 // Argument 1 (a0): Subject string. | 2686 // Argument 1 (a0): Subject string. |
2655 __ mov(a0, subject); | 2687 __ mov(a0, subject); |
2656 | 2688 |
2657 // Locate the code entry and call it. | 2689 // Locate the code entry and call it. |
2658 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); | 2690 __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); |
2659 DirectCEntryStub stub(isolate()); | 2691 DirectCEntryStub stub(isolate()); |
2660 stub.GenerateCall(masm, t9); | 2692 stub.GenerateCall(masm, t9); |
2661 | 2693 |
2662 __ LeaveExitFrame(false, no_reg, true); | 2694 __ LeaveExitFrame(false, no_reg, true); |
2663 | 2695 |
2664 // v0: result | 2696 // v0: result |
2665 // subject: subject string (callee saved) | 2697 // subject: subject string (callee saved) |
2666 // regexp_data: RegExp data (callee saved) | 2698 // regexp_data: RegExp data (callee saved) |
2667 // last_match_info_elements: Last match info elements (callee saved) | 2699 // last_match_info_elements: Last match info elements (callee saved) |
2668 // Check the result. | 2700 // Check the result. |
2669 Label success; | 2701 Label success; |
2670 __ Branch(&success, eq, v0, Operand(1)); | 2702 __ Branch(&success, eq, v0, Operand(1)); |
2671 // We expect exactly one result since we force the called regexp to behave | 2703 // We expect exactly one result since we force the called regexp to behave |
2672 // as non-global. | 2704 // as non-global. |
2673 Label failure; | 2705 Label failure; |
2674 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); | 2706 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); |
2675 // If not exception it can only be retry. Handle that in the runtime system. | 2707 // If not exception it can only be retry. Handle that in the runtime system. |
2676 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 2708 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
2677 // Result must now be exception. If there is no pending exception already a | 2709 // Result must now be exception. If there is no pending exception already a |
2678 // stack overflow (on the backtrack stack) was detected in RegExp code but | 2710 // stack overflow (on the backtrack stack) was detected in RegExp code but |
2679 // haven't created the exception yet. Handle that in the runtime system. | 2711 // haven't created the exception yet. Handle that in the runtime system. |
2680 // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 2712 // TODO(592): Rerunning the RegExp to get the stack overflow exception. |
2681 __ li(a1, Operand(isolate()->factory()->the_hole_value())); | 2713 __ li(a1, Operand(isolate()->factory()->the_hole_value())); |
2682 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 2714 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
2683 isolate()))); | 2715 isolate()))); |
2684 __ lw(v0, MemOperand(a2, 0)); | 2716 __ ld(v0, MemOperand(a2, 0)); |
2685 __ Branch(&runtime, eq, v0, Operand(a1)); | 2717 __ Branch(&runtime, eq, v0, Operand(a1)); |
2686 | 2718 |
2687 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception. | 2719 __ sd(a1, MemOperand(a2, 0)); // Clear pending exception. |
2688 | 2720 |
2689 // Check if the exception is a termination. If so, throw as uncatchable. | 2721 // Check if the exception is a termination. If so, throw as uncatchable. |
2690 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex); | 2722 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex); |
2691 Label termination_exception; | 2723 Label termination_exception; |
2692 __ Branch(&termination_exception, eq, v0, Operand(a0)); | 2724 __ Branch(&termination_exception, eq, v0, Operand(a0)); |
2693 | 2725 |
2694 __ Throw(v0); | 2726 __ Throw(v0); |
2695 | 2727 |
2696 __ bind(&termination_exception); | 2728 __ bind(&termination_exception); |
2697 __ ThrowUncatchable(v0); | 2729 __ ThrowUncatchable(v0); |
2698 | 2730 |
2699 __ bind(&failure); | 2731 __ bind(&failure); |
2700 // For failure and exception return null. | 2732 // For failure and exception return null. |
2701 __ li(v0, Operand(isolate()->factory()->null_value())); | 2733 __ li(v0, Operand(isolate()->factory()->null_value())); |
2702 __ DropAndRet(4); | 2734 __ DropAndRet(4); |
2703 | 2735 |
2704 // Process the result from the native regexp code. | 2736 // Process the result from the native regexp code. |
2705 __ bind(&success); | 2737 __ bind(&success); |
2706 __ lw(a1, | 2738 |
2707 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 2739 __ lw(a1, UntagSmiFieldMemOperand( |
| 2740 regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
2708 // Calculate number of capture registers (number_of_captures + 1) * 2. | 2741 // Calculate number of capture registers (number_of_captures + 1) * 2. |
2709 // Multiplying by 2 comes for free since r1 is smi-tagged. | 2742 __ Daddu(a1, a1, Operand(1)); |
2710 STATIC_ASSERT(kSmiTag == 0); | 2743 __ dsll(a1, a1, 1); // Multiply by 2. |
2711 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | |
2712 __ Addu(a1, a1, Operand(2)); // a1 was a smi. | |
2713 | 2744 |
2714 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset)); | 2745 __ ld(a0, MemOperand(sp, kLastMatchInfoOffset)); |
2715 __ JumpIfSmi(a0, &runtime); | 2746 __ JumpIfSmi(a0, &runtime); |
2716 __ GetObjectType(a0, a2, a2); | 2747 __ GetObjectType(a0, a2, a2); |
2717 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE)); | 2748 __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE)); |
2718 // Check that the JSArray is in fast case. | 2749 // Check that the JSArray is in fast case. |
2719 __ lw(last_match_info_elements, | 2750 __ ld(last_match_info_elements, |
2720 FieldMemOperand(a0, JSArray::kElementsOffset)); | 2751 FieldMemOperand(a0, JSArray::kElementsOffset)); |
2721 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 2752 __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
2722 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); | 2753 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); |
2723 __ Branch(&runtime, ne, a0, Operand(at)); | 2754 __ Branch(&runtime, ne, a0, Operand(at)); |
2724 // Check that the last match info has space for the capture registers and the | 2755 // Check that the last match info has space for the capture registers and the |
2725 // additional information. | 2756 // additional information. |
2726 __ lw(a0, | 2757 __ ld(a0, |
2727 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 2758 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
2728 __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead)); | 2759 __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead)); |
2729 __ sra(at, a0, kSmiTagSize); | 2760 |
| 2761 __ SmiUntag(at, a0); |
2730 __ Branch(&runtime, gt, a2, Operand(at)); | 2762 __ Branch(&runtime, gt, a2, Operand(at)); |
2731 | 2763 |
2732 // a1: number of capture registers | 2764 // a1: number of capture registers |
2733 // subject: subject string | 2765 // subject: subject string |
2734 // Store the capture count. | 2766 // Store the capture count. |
2735 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi. | 2767 __ SmiTag(a2, a1); // To smi. |
2736 __ sw(a2, FieldMemOperand(last_match_info_elements, | 2768 __ sd(a2, FieldMemOperand(last_match_info_elements, |
2737 RegExpImpl::kLastCaptureCountOffset)); | 2769 RegExpImpl::kLastCaptureCountOffset)); |
2738 // Store last subject and last input. | 2770 // Store last subject and last input. |
2739 __ sw(subject, | 2771 __ sd(subject, |
2740 FieldMemOperand(last_match_info_elements, | 2772 FieldMemOperand(last_match_info_elements, |
2741 RegExpImpl::kLastSubjectOffset)); | 2773 RegExpImpl::kLastSubjectOffset)); |
2742 __ mov(a2, subject); | 2774 __ mov(a2, subject); |
2743 __ RecordWriteField(last_match_info_elements, | 2775 __ RecordWriteField(last_match_info_elements, |
2744 RegExpImpl::kLastSubjectOffset, | 2776 RegExpImpl::kLastSubjectOffset, |
2745 subject, | 2777 subject, |
2746 t3, | 2778 a7, |
2747 kRAHasNotBeenSaved, | 2779 kRAHasNotBeenSaved, |
2748 kDontSaveFPRegs); | 2780 kDontSaveFPRegs); |
2749 __ mov(subject, a2); | 2781 __ mov(subject, a2); |
2750 __ sw(subject, | 2782 __ sd(subject, |
2751 FieldMemOperand(last_match_info_elements, | 2783 FieldMemOperand(last_match_info_elements, |
2752 RegExpImpl::kLastInputOffset)); | 2784 RegExpImpl::kLastInputOffset)); |
2753 __ RecordWriteField(last_match_info_elements, | 2785 __ RecordWriteField(last_match_info_elements, |
2754 RegExpImpl::kLastInputOffset, | 2786 RegExpImpl::kLastInputOffset, |
2755 subject, | 2787 subject, |
2756 t3, | 2788 a7, |
2757 kRAHasNotBeenSaved, | 2789 kRAHasNotBeenSaved, |
2758 kDontSaveFPRegs); | 2790 kDontSaveFPRegs); |
2759 | 2791 |
2760 // Get the static offsets vector filled by the native regexp code. | 2792 // Get the static offsets vector filled by the native regexp code. |
2761 ExternalReference address_of_static_offsets_vector = | 2793 ExternalReference address_of_static_offsets_vector = |
2762 ExternalReference::address_of_static_offsets_vector(isolate()); | 2794 ExternalReference::address_of_static_offsets_vector(isolate()); |
2763 __ li(a2, Operand(address_of_static_offsets_vector)); | 2795 __ li(a2, Operand(address_of_static_offsets_vector)); |
2764 | 2796 |
2765 // a1: number of capture registers | 2797 // a1: number of capture registers |
2766 // a2: offsets vector | 2798 // a2: offsets vector |
2767 Label next_capture, done; | 2799 Label next_capture, done; |
2768 // Capture register counter starts from number of capture registers and | 2800 // Capture register counter starts from number of capture registers and |
2769 // counts down until wrapping after zero. | 2801 // counts down until wrapping after zero. |
2770 __ Addu(a0, | 2802 __ Daddu(a0, |
2771 last_match_info_elements, | 2803 last_match_info_elements, |
2772 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | 2804 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
2773 __ bind(&next_capture); | 2805 __ bind(&next_capture); |
2774 __ Subu(a1, a1, Operand(1)); | 2806 __ Dsubu(a1, a1, Operand(1)); |
2775 __ Branch(&done, lt, a1, Operand(zero_reg)); | 2807 __ Branch(&done, lt, a1, Operand(zero_reg)); |
2776 // Read the value from the static offsets vector buffer. | 2808 // Read the value from the static offsets vector buffer. |
2777 __ lw(a3, MemOperand(a2, 0)); | 2809 __ lw(a3, MemOperand(a2, 0)); |
2778 __ addiu(a2, a2, kPointerSize); | 2810 __ daddiu(a2, a2, kIntSize); |
2779 // Store the smi value in the last match info. | 2811 // Store the smi value in the last match info. |
2780 __ sll(a3, a3, kSmiTagSize); // Convert to Smi. | 2812 __ SmiTag(a3); |
2781 __ sw(a3, MemOperand(a0, 0)); | 2813 __ sd(a3, MemOperand(a0, 0)); |
2782 __ Branch(&next_capture, USE_DELAY_SLOT); | 2814 __ Branch(&next_capture, USE_DELAY_SLOT); |
2783 __ addiu(a0, a0, kPointerSize); // In branch delay slot. | 2815 __ daddiu(a0, a0, kPointerSize); // In branch delay slot. |
2784 | 2816 |
2785 __ bind(&done); | 2817 __ bind(&done); |
2786 | 2818 |
2787 // Return last match info. | 2819 // Return last match info. |
2788 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset)); | 2820 __ ld(v0, MemOperand(sp, kLastMatchInfoOffset)); |
2789 __ DropAndRet(4); | 2821 __ DropAndRet(4); |
2790 | 2822 |
2791 // Do the runtime call to execute the regexp. | 2823 // Do the runtime call to execute the regexp. |
2792 __ bind(&runtime); | 2824 __ bind(&runtime); |
2793 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); | 2825 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); |
2794 | 2826 |
2795 // Deferred code for string handling. | 2827 // Deferred code for string handling. |
2796 // (6) Not a long external string? If yes, go to (8). | 2828 // (6) Not a long external string? If yes, go to (8). |
2797 __ bind(¬_seq_nor_cons); | 2829 __ bind(¬_seq_nor_cons); |
2798 // Go to (8). | 2830 // Go to (8). |
2799 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag)); | 2831 __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag)); |
2800 | 2832 |
2801 // (7) External string. Make it, offset-wise, look like a sequential string. | 2833 // (7) External string. Make it, offset-wise, look like a sequential string. |
2802 __ bind(&external_string); | 2834 __ bind(&external_string); |
2803 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 2835 __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
2804 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); | 2836 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
2805 if (FLAG_debug_code) { | 2837 if (FLAG_debug_code) { |
2806 // Assert that we do not have a cons or slice (indirect strings) here. | 2838 // Assert that we do not have a cons or slice (indirect strings) here. |
2807 // Sequential strings have already been ruled out. | 2839 // Sequential strings have already been ruled out. |
2808 __ And(at, a0, Operand(kIsIndirectStringMask)); | 2840 __ And(at, a0, Operand(kIsIndirectStringMask)); |
2809 __ Assert(eq, | 2841 __ Assert(eq, |
2810 kExternalStringExpectedButNotFound, | 2842 kExternalStringExpectedButNotFound, |
2811 at, | 2843 at, |
2812 Operand(zero_reg)); | 2844 Operand(zero_reg)); |
2813 } | 2845 } |
2814 __ lw(subject, | 2846 __ ld(subject, |
2815 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | 2847 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); |
2816 // Move the pointer so that offset-wise, it looks like a sequential string. | 2848 // Move the pointer so that offset-wise, it looks like a sequential string. |
2817 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 2849 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
2818 __ Subu(subject, | 2850 __ Dsubu(subject, |
2819 subject, | 2851 subject, |
2820 SeqTwoByteString::kHeaderSize - kHeapObjectTag); | 2852 SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
2821 __ jmp(&seq_string); // Go to (5). | 2853 __ jmp(&seq_string); // Go to (5). |
2822 | 2854 |
2823 // (8) Short external string or not a string? If yes, bail out to runtime. | 2855 // (8) Short external string or not a string? If yes, bail out to runtime. |
2824 __ bind(¬_long_external); | 2856 __ bind(¬_long_external); |
2825 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); | 2857 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); |
2826 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); | 2858 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); |
2827 __ Branch(&runtime, ne, at, Operand(zero_reg)); | 2859 __ Branch(&runtime, ne, at, Operand(zero_reg)); |
2828 | 2860 |
2829 // (9) Sliced string. Replace subject with parent. Go to (4). | 2861 // (9) Sliced string. Replace subject with parent. Go to (4). |
2830 // Load offset into t0 and replace subject string with parent. | 2862 // Load offset into t0 and replace subject string with parent. |
2831 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 2863 __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); |
2832 __ sra(t0, t0, kSmiTagSize); | 2864 __ SmiUntag(t0); |
2833 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 2865 __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
2834 __ jmp(&check_underlying); // Go to (4). | 2866 __ jmp(&check_underlying); // Go to (4). |
2835 #endif // V8_INTERPRETED_REGEXP | 2867 #endif // V8_INTERPRETED_REGEXP |
2836 } | 2868 } |
2837 | 2869 |
2838 | 2870 |
2839 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 2871 static void GenerateRecordCallTarget(MacroAssembler* masm) { |
2840 // Cache the called function in a feedback vector slot. Cache states | 2872 // Cache the called function in a feedback vector slot. Cache states |
2841 // are uninitialized, monomorphic (indicated by a JSFunction), and | 2873 // are uninitialized, monomorphic (indicated by a JSFunction), and |
2842 // megamorphic. | 2874 // megamorphic. |
2843 // a0 : number of arguments to the construct function | 2875 // a0 : number of arguments to the construct function |
2844 // a1 : the function to call | 2876 // a1 : the function to call |
2845 // a2 : Feedback vector | 2877 // a2 : Feedback vector |
2846 // a3 : slot in feedback vector (Smi) | 2878 // a3 : slot in feedback vector (Smi) |
2847 Label initialize, done, miss, megamorphic, not_array_function; | 2879 Label initialize, done, miss, megamorphic, not_array_function; |
2848 | 2880 |
2849 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 2881 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), |
2850 masm->isolate()->heap()->megamorphic_symbol()); | 2882 masm->isolate()->heap()->megamorphic_symbol()); |
2851 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 2883 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), |
2852 masm->isolate()->heap()->uninitialized_symbol()); | 2884 masm->isolate()->heap()->uninitialized_symbol()); |
2853 | 2885 |
2854 // Load the cache state into t0. | 2886 // Load the cache state into a4. |
2855 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2887 __ dsrl(a4, a3, 32 - kPointerSizeLog2); |
2856 __ Addu(t0, a2, Operand(t0)); | 2888 __ Daddu(a4, a2, Operand(a4)); |
2857 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2889 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); |
2858 | 2890 |
2859 // A monomorphic cache hit or an already megamorphic state: invoke the | 2891 // A monomorphic cache hit or an already megamorphic state: invoke the |
2860 // function without changing the state. | 2892 // function without changing the state. |
2861 __ Branch(&done, eq, t0, Operand(a1)); | 2893 __ Branch(&done, eq, a4, Operand(a1)); |
2862 | 2894 |
2863 if (!FLAG_pretenuring_call_new) { | 2895 if (!FLAG_pretenuring_call_new) { |
2864 // If we came here, we need to see if we are the array function. | 2896 // If we came here, we need to see if we are the array function. |
2865 // If we didn't have a matching function, and we didn't find the megamorph | 2897 // If we didn't have a matching function, and we didn't find the megamorph |
2866 // sentinel, then we have in the slot either some other function or an | 2898 // sentinel, then we have in the slot either some other function or an |
2867 // AllocationSite. Do a map check on the object in a3. | 2899 // AllocationSite. Do a map check on the object in a3. |
2868 __ lw(t1, FieldMemOperand(t0, 0)); | 2900 __ ld(a5, FieldMemOperand(a4, 0)); |
2869 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 2901 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
2870 __ Branch(&miss, ne, t1, Operand(at)); | 2902 __ Branch(&miss, ne, a5, Operand(at)); |
2871 | 2903 |
2872 // Make sure the function is the Array() function | 2904 // Make sure the function is the Array() function |
2873 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); | 2905 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); |
2874 __ Branch(&megamorphic, ne, a1, Operand(t0)); | 2906 __ Branch(&megamorphic, ne, a1, Operand(a4)); |
2875 __ jmp(&done); | 2907 __ jmp(&done); |
2876 } | 2908 } |
2877 | 2909 |
2878 __ bind(&miss); | 2910 __ bind(&miss); |
2879 | 2911 |
2880 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 2912 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
2881 // megamorphic. | 2913 // megamorphic. |
2882 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); | 2914 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); |
2883 __ Branch(&initialize, eq, t0, Operand(at)); | 2915 __ Branch(&initialize, eq, a4, Operand(at)); |
2884 // MegamorphicSentinel is an immortal immovable object (undefined) so no | 2916 // MegamorphicSentinel is an immortal immovable object (undefined) so no |
2885 // write-barrier is needed. | 2917 // write-barrier is needed. |
2886 __ bind(&megamorphic); | 2918 __ bind(&megamorphic); |
2887 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2919 __ dsrl(a4, a3, 32- kPointerSizeLog2); |
2888 __ Addu(t0, a2, Operand(t0)); | 2920 __ Daddu(a4, a2, Operand(a4)); |
2889 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); | 2921 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); |
2890 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2922 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize)); |
2891 __ jmp(&done); | 2923 __ jmp(&done); |
2892 | 2924 |
2893 // An uninitialized cache is patched with the function. | 2925 // An uninitialized cache is patched with the function. |
2894 __ bind(&initialize); | 2926 __ bind(&initialize); |
2895 if (!FLAG_pretenuring_call_new) { | 2927 if (!FLAG_pretenuring_call_new) { |
2896 // Make sure the function is the Array() function. | 2928 // Make sure the function is the Array() function. |
2897 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); | 2929 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); |
2898 __ Branch(¬_array_function, ne, a1, Operand(t0)); | 2930 __ Branch(¬_array_function, ne, a1, Operand(a4)); |
2899 | 2931 |
2900 // The target function is the Array constructor, | 2932 // The target function is the Array constructor, |
2901 // Create an AllocationSite if we don't already have it, store it in the | 2933 // Create an AllocationSite if we don't already have it, store it in the |
2902 // slot. | 2934 // slot. |
2903 { | 2935 { |
2904 FrameScope scope(masm, StackFrame::INTERNAL); | 2936 FrameScope scope(masm, StackFrame::INTERNAL); |
2905 const RegList kSavedRegs = | 2937 const RegList kSavedRegs = |
2906 1 << 4 | // a0 | 2938 1 << 4 | // a0 |
2907 1 << 5 | // a1 | 2939 1 << 5 | // a1 |
2908 1 << 6 | // a2 | 2940 1 << 6 | // a2 |
2909 1 << 7; // a3 | 2941 1 << 7; // a3 |
2910 | 2942 |
2911 // Arguments register must be smi-tagged to call out. | 2943 // Arguments register must be smi-tagged to call out. |
2912 __ SmiTag(a0); | 2944 __ SmiTag(a0); |
2913 __ MultiPush(kSavedRegs); | 2945 __ MultiPush(kSavedRegs); |
2914 | 2946 |
2915 CreateAllocationSiteStub create_stub(masm->isolate()); | 2947 CreateAllocationSiteStub create_stub(masm->isolate()); |
2916 __ CallStub(&create_stub); | 2948 __ CallStub(&create_stub); |
2917 | 2949 |
2918 __ MultiPop(kSavedRegs); | 2950 __ MultiPop(kSavedRegs); |
2919 __ SmiUntag(a0); | 2951 __ SmiUntag(a0); |
2920 } | 2952 } |
2921 __ Branch(&done); | 2953 __ Branch(&done); |
2922 | 2954 |
2923 __ bind(¬_array_function); | 2955 __ bind(¬_array_function); |
2924 } | 2956 } |
2925 | 2957 |
2926 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2958 __ dsrl(a4, a3, 32 - kPointerSizeLog2); |
2927 __ Addu(t0, a2, Operand(t0)); | 2959 __ Daddu(a4, a2, Operand(a4)); |
2928 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 2960 __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
2929 __ sw(a1, MemOperand(t0, 0)); | 2961 __ sd(a1, MemOperand(a4, 0)); |
2930 | 2962 |
2931 __ Push(t0, a2, a1); | 2963 __ Push(a4, a2, a1); |
2932 __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs, | 2964 __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs, |
2933 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 2965 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
2934 __ Pop(t0, a2, a1); | 2966 __ Pop(a4, a2, a1); |
2935 | 2967 |
2936 __ bind(&done); | 2968 __ bind(&done); |
2937 } | 2969 } |
2938 | 2970 |
2939 | 2971 |
2940 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { | 2972 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { |
2941 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 2973 __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
2942 __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset)); | |
2943 | 2974 |
2944 // Do not transform the receiver for strict mode functions. | 2975 // Do not transform the receiver for strict mode functions. |
2945 int32_t strict_mode_function_mask = | 2976 int32_t strict_mode_function_mask = |
2946 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); | 2977 1 << SharedFunctionInfo::kStrictModeBitWithinByte ; |
2947 // Do not transform the receiver for native (Compilerhints already in a3). | 2978 // Do not transform the receiver for native (Compilerhints already in a3). |
2948 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); | 2979 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte; |
2949 __ And(at, t0, Operand(strict_mode_function_mask | native_mask)); | 2980 |
| 2981 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset)); |
| 2982 __ And(at, a4, Operand(strict_mode_function_mask)); |
| 2983 __ Branch(cont, ne, at, Operand(zero_reg)); |
| 2984 __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset)); |
| 2985 __ And(at, a4, Operand(native_mask)); |
2950 __ Branch(cont, ne, at, Operand(zero_reg)); | 2986 __ Branch(cont, ne, at, Operand(zero_reg)); |
2951 } | 2987 } |
2952 | 2988 |
2953 | 2989 |
2954 static void EmitSlowCase(MacroAssembler* masm, | 2990 static void EmitSlowCase(MacroAssembler* masm, |
2955 int argc, | 2991 int argc, |
2956 Label* non_function) { | 2992 Label* non_function) { |
2957 // Check for function proxy. | 2993 // Check for function proxy. |
2958 __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE)); | 2994 __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE)); |
2959 __ push(a1); // put proxy as additional argument | 2995 __ push(a1); // put proxy as additional argument |
2960 __ li(a0, Operand(argc + 1, RelocInfo::NONE32)); | 2996 __ li(a0, Operand(argc + 1, RelocInfo::NONE32)); |
2961 __ mov(a2, zero_reg); | 2997 __ mov(a2, zero_reg); |
2962 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); | 2998 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); |
2963 { | 2999 { |
2964 Handle<Code> adaptor = | 3000 Handle<Code> adaptor = |
2965 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 3001 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
2966 __ Jump(adaptor, RelocInfo::CODE_TARGET); | 3002 __ Jump(adaptor, RelocInfo::CODE_TARGET); |
2967 } | 3003 } |
2968 | 3004 |
2969 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 3005 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
2970 // of the original receiver from the call site). | 3006 // of the original receiver from the call site). |
2971 __ bind(non_function); | 3007 __ bind(non_function); |
2972 __ sw(a1, MemOperand(sp, argc * kPointerSize)); | 3008 __ sd(a1, MemOperand(sp, argc * kPointerSize)); |
2973 __ li(a0, Operand(argc)); // Set up the number of arguments. | 3009 __ li(a0, Operand(argc)); // Set up the number of arguments. |
2974 __ mov(a2, zero_reg); | 3010 __ mov(a2, zero_reg); |
2975 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); | 3011 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); |
2976 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 3012 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
2977 RelocInfo::CODE_TARGET); | 3013 RelocInfo::CODE_TARGET); |
2978 } | 3014 } |
2979 | 3015 |
2980 | 3016 |
2981 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { | 3017 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { |
2982 // Wrap the receiver and patch it back onto the stack. | 3018 // Wrap the receiver and patch it back onto the stack. |
2983 { FrameScope frame_scope(masm, StackFrame::INTERNAL); | 3019 { FrameScope frame_scope(masm, StackFrame::INTERNAL); |
2984 __ Push(a1, a3); | 3020 __ Push(a1, a3); |
2985 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | 3021 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
2986 __ pop(a1); | 3022 __ pop(a1); |
2987 } | 3023 } |
2988 __ Branch(USE_DELAY_SLOT, cont); | 3024 __ Branch(USE_DELAY_SLOT, cont); |
2989 __ sw(v0, MemOperand(sp, argc * kPointerSize)); | 3025 __ sd(v0, MemOperand(sp, argc * kPointerSize)); |
2990 } | 3026 } |
2991 | 3027 |
2992 | 3028 |
2993 static void CallFunctionNoFeedback(MacroAssembler* masm, | 3029 static void CallFunctionNoFeedback(MacroAssembler* masm, |
2994 int argc, bool needs_checks, | 3030 int argc, bool needs_checks, |
2995 bool call_as_method) { | 3031 bool call_as_method) { |
2996 // a1 : the function to call | 3032 // a1 : the function to call |
2997 Label slow, non_function, wrap, cont; | 3033 Label slow, non_function, wrap, cont; |
2998 | 3034 |
2999 if (needs_checks) { | 3035 if (needs_checks) { |
3000 // Check that the function is really a JavaScript function. | 3036 // Check that the function is really a JavaScript function. |
3001 // a1: pushed function (to be verified) | 3037 // a1: pushed function (to be verified) |
3002 __ JumpIfSmi(a1, &non_function); | 3038 __ JumpIfSmi(a1, &non_function); |
3003 | 3039 |
3004 // Goto slow case if we do not have a function. | 3040 // Goto slow case if we do not have a function. |
3005 __ GetObjectType(a1, t0, t0); | 3041 __ GetObjectType(a1, a4, a4); |
3006 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE)); | 3042 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); |
3007 } | 3043 } |
3008 | 3044 |
3009 // Fast-case: Invoke the function now. | 3045 // Fast-case: Invoke the function now. |
3010 // a1: pushed function | 3046 // a1: pushed function |
3011 ParameterCount actual(argc); | 3047 ParameterCount actual(argc); |
3012 | 3048 |
3013 if (call_as_method) { | 3049 if (call_as_method) { |
3014 if (needs_checks) { | 3050 if (needs_checks) { |
3015 EmitContinueIfStrictOrNative(masm, &cont); | 3051 EmitContinueIfStrictOrNative(masm, &cont); |
3016 } | 3052 } |
3017 | 3053 |
3018 // Compute the receiver in sloppy mode. | 3054 // Compute the receiver in sloppy mode. |
3019 __ lw(a3, MemOperand(sp, argc * kPointerSize)); | 3055 __ ld(a3, MemOperand(sp, argc * kPointerSize)); |
3020 | 3056 |
3021 if (needs_checks) { | 3057 if (needs_checks) { |
3022 __ JumpIfSmi(a3, &wrap); | 3058 __ JumpIfSmi(a3, &wrap); |
3023 __ GetObjectType(a3, t0, t0); | 3059 __ GetObjectType(a3, a4, a4); |
3024 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 3060 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
3025 } else { | 3061 } else { |
3026 __ jmp(&wrap); | 3062 __ jmp(&wrap); |
3027 } | 3063 } |
3028 | 3064 |
3029 __ bind(&cont); | 3065 __ bind(&cont); |
3030 } | 3066 } |
3031 | |
3032 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); | 3067 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); |
3033 | 3068 |
3034 if (needs_checks) { | 3069 if (needs_checks) { |
3035 // Slow-case: Non-function called. | 3070 // Slow-case: Non-function called. |
3036 __ bind(&slow); | 3071 __ bind(&slow); |
3037 EmitSlowCase(masm, argc, &non_function); | 3072 EmitSlowCase(masm, argc, &non_function); |
3038 } | 3073 } |
3039 | 3074 |
3040 if (call_as_method) { | 3075 if (call_as_method) { |
3041 __ bind(&wrap); | 3076 __ bind(&wrap); |
3042 // Wrap the receiver and patch it back onto the stack. | 3077 // Wrap the receiver and patch it back onto the stack. |
3043 EmitWrapCase(masm, argc, &cont); | 3078 EmitWrapCase(masm, argc, &cont); |
3044 } | 3079 } |
3045 } | 3080 } |
3046 | 3081 |
3047 | 3082 |
3048 void CallFunctionStub::Generate(MacroAssembler* masm) { | 3083 void CallFunctionStub::Generate(MacroAssembler* masm) { |
3049 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); | 3084 CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod()); |
3050 } | 3085 } |
3051 | 3086 |
3052 | 3087 |
3053 void CallConstructStub::Generate(MacroAssembler* masm) { | 3088 void CallConstructStub::Generate(MacroAssembler* masm) { |
3054 // a0 : number of arguments | 3089 // a0 : number of arguments |
3055 // a1 : the function to call | 3090 // a1 : the function to call |
3056 // a2 : feedback vector | 3091 // a2 : feedback vector |
3057 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi) | 3092 // a3 : (only if a2 is not undefined) slot in feedback vector (Smi) |
3058 Label slow, non_function_call; | 3093 Label slow, non_function_call; |
3059 | |
3060 // Check that the function is not a smi. | 3094 // Check that the function is not a smi. |
3061 __ JumpIfSmi(a1, &non_function_call); | 3095 __ JumpIfSmi(a1, &non_function_call); |
3062 // Check that the function is a JSFunction. | 3096 // Check that the function is a JSFunction. |
3063 __ GetObjectType(a1, t0, t0); | 3097 __ GetObjectType(a1, a4, a4); |
3064 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE)); | 3098 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); |
3065 | 3099 |
3066 if (RecordCallTarget()) { | 3100 if (RecordCallTarget()) { |
3067 GenerateRecordCallTarget(masm); | 3101 GenerateRecordCallTarget(masm); |
3068 | 3102 |
3069 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 3103 __ dsrl(at, a3, 32 - kPointerSizeLog2); |
3070 __ Addu(t1, a2, at); | 3104 __ Daddu(a5, a2, at); |
3071 if (FLAG_pretenuring_call_new) { | 3105 if (FLAG_pretenuring_call_new) { |
3072 // Put the AllocationSite from the feedback vector into a2. | 3106 // Put the AllocationSite from the feedback vector into a2. |
3073 // By adding kPointerSize we encode that we know the AllocationSite | 3107 // By adding kPointerSize we encode that we know the AllocationSite |
3074 // entry is at the feedback vector slot given by a3 + 1. | 3108 // entry is at the feedback vector slot given by a3 + 1. |
3075 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize)); | 3109 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); |
3076 } else { | 3110 } else { |
3077 Label feedback_register_initialized; | 3111 Label feedback_register_initialized; |
3078 // Put the AllocationSite from the feedback vector into a2, or undefined. | 3112 // Put the AllocationSite from the feedback vector into a2, or undefined. |
3079 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); | 3113 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize)); |
3080 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset)); | 3114 __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset)); |
3081 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 3115 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
3082 __ Branch(&feedback_register_initialized, eq, t1, Operand(at)); | 3116 __ Branch(&feedback_register_initialized, eq, a5, Operand(at)); |
3083 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 3117 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
3084 __ bind(&feedback_register_initialized); | 3118 __ bind(&feedback_register_initialized); |
3085 } | 3119 } |
3086 | 3120 |
3087 __ AssertUndefinedOrAllocationSite(a2, t1); | 3121 __ AssertUndefinedOrAllocationSite(a2, a5); |
3088 } | 3122 } |
3089 | 3123 |
3090 // Jump to the function-specific construct stub. | 3124 // Jump to the function-specific construct stub. |
3091 Register jmp_reg = t0; | 3125 Register jmp_reg = a4; |
3092 __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 3126 __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
3093 __ lw(jmp_reg, FieldMemOperand(jmp_reg, | 3127 __ ld(jmp_reg, FieldMemOperand(jmp_reg, |
3094 SharedFunctionInfo::kConstructStubOffset)); | 3128 SharedFunctionInfo::kConstructStubOffset)); |
3095 __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3129 __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); |
3096 __ Jump(at); | 3130 __ Jump(at); |
3097 | 3131 |
3098 // a0: number of arguments | 3132 // a0: number of arguments |
3099 // a1: called object | 3133 // a1: called object |
3100 // t0: object type | 3134 // a4: object type |
3101 Label do_call; | 3135 Label do_call; |
3102 __ bind(&slow); | 3136 __ bind(&slow); |
3103 __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE)); | 3137 __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE)); |
3104 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | 3138 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); |
3105 __ jmp(&do_call); | 3139 __ jmp(&do_call); |
3106 | 3140 |
3107 __ bind(&non_function_call); | 3141 __ bind(&non_function_call); |
3108 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | 3142 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
3109 __ bind(&do_call); | 3143 __ bind(&do_call); |
3110 // Set expected number of arguments to zero (not changing r0). | 3144 // Set expected number of arguments to zero (not changing r0). |
3111 __ li(a2, Operand(0, RelocInfo::NONE32)); | 3145 __ li(a2, Operand(0, RelocInfo::NONE32)); |
3112 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 3146 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
3113 RelocInfo::CODE_TARGET); | 3147 RelocInfo::CODE_TARGET); |
3114 } | 3148 } |
3115 | 3149 |
3116 | 3150 |
| 3151 // StringCharCodeAtGenerator. |
| 3152 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 3153 Label flat_string; |
| 3154 Label ascii_string; |
| 3155 Label got_char_code; |
| 3156 Label sliced_string; |
| 3157 |
| 3158 ASSERT(!a4.is(index_)); |
| 3159 ASSERT(!a4.is(result_)); |
| 3160 ASSERT(!a4.is(object_)); |
| 3161 |
| 3162 // If the receiver is a smi trigger the non-string case. |
| 3163 __ JumpIfSmi(object_, receiver_not_string_); |
| 3164 |
| 3165 // Fetch the instance type of the receiver into result register. |
| 3166 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3167 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3168 // If the receiver is not a string trigger the non-string case. |
| 3169 __ And(a4, result_, Operand(kIsNotStringMask)); |
| 3170 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); |
| 3171 |
| 3172 // If the index is non-smi trigger the non-smi case. |
| 3173 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 3174 |
| 3175 __ bind(&got_smi_index_); |
| 3176 |
| 3177 // Check for index out of range. |
| 3178 __ ld(a4, FieldMemOperand(object_, String::kLengthOffset)); |
| 3179 __ Branch(index_out_of_range_, ls, a4, Operand(index_)); |
| 3180 |
| 3181 __ SmiUntag(index_); |
| 3182 |
| 3183 StringCharLoadGenerator::Generate(masm, |
| 3184 object_, |
| 3185 index_, |
| 3186 result_, |
| 3187 &call_runtime_); |
| 3188 |
| 3189 __ SmiTag(result_); |
| 3190 __ bind(&exit_); |
| 3191 } |
| 3192 |
| 3193 |
3117 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { | 3194 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { |
3118 __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 3195 __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
3119 __ lw(vector, FieldMemOperand(vector, | 3196 __ ld(vector, FieldMemOperand(vector, |
3120 JSFunction::kSharedFunctionInfoOffset)); | 3197 JSFunction::kSharedFunctionInfoOffset)); |
3121 __ lw(vector, FieldMemOperand(vector, | 3198 __ ld(vector, FieldMemOperand(vector, |
3122 SharedFunctionInfo::kFeedbackVectorOffset)); | 3199 SharedFunctionInfo::kFeedbackVectorOffset)); |
3123 } | 3200 } |
3124 | 3201 |
3125 | 3202 |
3126 void CallIC_ArrayStub::Generate(MacroAssembler* masm) { | 3203 void CallIC_ArrayStub::Generate(MacroAssembler* masm) { |
3127 // a1 - function | 3204 // a1 - function |
3128 // a3 - slot id | 3205 // a3 - slot id |
3129 Label miss; | 3206 Label miss; |
3130 | 3207 |
3131 EmitLoadTypeFeedbackVector(masm, a2); | 3208 EmitLoadTypeFeedbackVector(masm, a2); |
3132 | 3209 |
3133 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); | 3210 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); |
3134 __ Branch(&miss, ne, a1, Operand(at)); | 3211 __ Branch(&miss, ne, a1, Operand(at)); |
3135 | 3212 |
3136 __ li(a0, Operand(arg_count())); | 3213 __ li(a0, Operand(arg_count())); |
3137 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 3214 __ dsrl(at, a3, 32 - kPointerSizeLog2); |
3138 __ Addu(at, a2, Operand(at)); | 3215 __ Daddu(at, a2, Operand(at)); |
3139 __ lw(a2, FieldMemOperand(at, FixedArray::kHeaderSize)); | 3216 __ ld(a2, FieldMemOperand(at, FixedArray::kHeaderSize)); |
3140 // Verify that a2 contains an AllocationSite | 3217 // Verify that a2 contains an AllocationSite |
3141 __ AssertUndefinedOrAllocationSite(a2, at); | 3218 __ AssertUndefinedOrAllocationSite(a2, at); |
3142 ArrayConstructorStub stub(masm->isolate(), arg_count()); | 3219 ArrayConstructorStub stub(masm->isolate(), arg_count()); |
3143 __ TailCallStub(&stub); | 3220 __ TailCallStub(&stub); |
3144 | 3221 |
3145 __ bind(&miss); | 3222 __ bind(&miss); |
3146 GenerateMiss(masm, IC::kCallIC_Customization_Miss); | 3223 GenerateMiss(masm, IC::kCallIC_Customization_Miss); |
3147 | 3224 |
3148 // The slow case, we need this no matter what to complete a call after a miss. | 3225 // The slow case, we need this no matter what to complete a call after a miss. |
3149 CallFunctionNoFeedback(masm, | 3226 CallFunctionNoFeedback(masm, |
3150 arg_count(), | 3227 arg_count(), |
3151 true, | 3228 true, |
3152 CallAsMethod()); | 3229 CallAsMethod()); |
3153 | 3230 |
3154 // Unreachable. | 3231 // Unreachable. |
3155 __ stop("Unexpected code address"); | 3232 __ stop("Unexpected code address"); |
3156 } | 3233 } |
3157 | 3234 |
3158 | 3235 |
3159 void CallICStub::Generate(MacroAssembler* masm) { | 3236 void CallICStub::Generate(MacroAssembler* masm) { |
3160 // r1 - function | 3237 // a1 - function |
3161 // r3 - slot id (Smi) | 3238 // a3 - slot id (Smi) |
3162 Label extra_checks_or_miss, slow_start; | 3239 Label extra_checks_or_miss, slow_start; |
3163 Label slow, non_function, wrap, cont; | 3240 Label slow, non_function, wrap, cont; |
3164 Label have_js_function; | 3241 Label have_js_function; |
3165 int argc = state_.arg_count(); | 3242 int argc = state_.arg_count(); |
3166 ParameterCount actual(argc); | 3243 ParameterCount actual(argc); |
3167 | 3244 |
3168 EmitLoadTypeFeedbackVector(masm, a2); | 3245 EmitLoadTypeFeedbackVector(masm, a2); |
3169 | 3246 |
3170 // The checks. First, does r1 match the recorded monomorphic target? | 3247 // The checks. First, does r1 match the recorded monomorphic target? |
3171 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 3248 __ dsrl(a4, a3, 32 - kPointerSizeLog2); |
3172 __ Addu(t0, a2, Operand(t0)); | 3249 __ Daddu(a4, a2, Operand(a4)); |
3173 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 3250 __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize)); |
3174 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0)); | 3251 __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4)); |
3175 | 3252 |
3176 __ bind(&have_js_function); | 3253 __ bind(&have_js_function); |
3177 if (state_.CallAsMethod()) { | 3254 if (state_.CallAsMethod()) { |
3178 EmitContinueIfStrictOrNative(masm, &cont); | 3255 EmitContinueIfStrictOrNative(masm, &cont); |
3179 // Compute the receiver in sloppy mode. | 3256 // Compute the receiver in sloppy mode. |
3180 __ lw(a3, MemOperand(sp, argc * kPointerSize)); | 3257 __ ld(a3, MemOperand(sp, argc * kPointerSize)); |
3181 | 3258 |
3182 __ JumpIfSmi(a3, &wrap); | 3259 __ JumpIfSmi(a3, &wrap); |
3183 __ GetObjectType(a3, t0, t0); | 3260 __ GetObjectType(a3, a4, a4); |
3184 __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 3261 __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
3185 | 3262 |
3186 __ bind(&cont); | 3263 __ bind(&cont); |
3187 } | 3264 } |
3188 | 3265 |
3189 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); | 3266 __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper()); |
3190 | 3267 |
3191 __ bind(&slow); | 3268 __ bind(&slow); |
3192 EmitSlowCase(masm, argc, &non_function); | 3269 EmitSlowCase(masm, argc, &non_function); |
3193 | 3270 |
3194 if (state_.CallAsMethod()) { | 3271 if (state_.CallAsMethod()) { |
3195 __ bind(&wrap); | 3272 __ bind(&wrap); |
3196 EmitWrapCase(masm, argc, &cont); | 3273 EmitWrapCase(masm, argc, &cont); |
3197 } | 3274 } |
3198 | 3275 |
3199 __ bind(&extra_checks_or_miss); | 3276 __ bind(&extra_checks_or_miss); |
3200 Label miss; | 3277 Label miss; |
3201 | 3278 |
3202 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); | 3279 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); |
3203 __ Branch(&slow_start, eq, t0, Operand(at)); | 3280 __ Branch(&slow_start, eq, a4, Operand(at)); |
3204 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); | 3281 __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex); |
3205 __ Branch(&miss, eq, t0, Operand(at)); | 3282 __ Branch(&miss, eq, a4, Operand(at)); |
3206 | 3283 |
3207 if (!FLAG_trace_ic) { | 3284 if (!FLAG_trace_ic) { |
3208 // We are going megamorphic, and we don't want to visit the runtime. | 3285 // We are going megamorphic, and we don't want to visit the runtime. |
3209 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 3286 __ dsrl(a4, a3, 32 - kPointerSizeLog2); |
3210 __ Addu(t0, a2, Operand(t0)); | 3287 __ Daddu(a4, a2, Operand(a4)); |
3211 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); | 3288 __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex); |
3212 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 3289 __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize)); |
3213 __ Branch(&slow_start); | 3290 __ Branch(&slow_start); |
3214 } | 3291 } |
3215 | 3292 |
3216 // We are here because tracing is on or we are going monomorphic. | 3293 // We are here because tracing is on or we are going monomorphic. |
3217 __ bind(&miss); | 3294 __ bind(&miss); |
3218 GenerateMiss(masm, IC::kCallIC_Miss); | 3295 GenerateMiss(masm, IC::kCallIC_Miss); |
3219 | 3296 |
3220 // the slow case | 3297 // the slow case |
3221 __ bind(&slow_start); | 3298 __ bind(&slow_start); |
3222 // Check that the function is really a JavaScript function. | 3299 // Check that the function is really a JavaScript function. |
3223 // r1: pushed function (to be verified) | 3300 // r1: pushed function (to be verified) |
3224 __ JumpIfSmi(a1, &non_function); | 3301 __ JumpIfSmi(a1, &non_function); |
3225 | 3302 |
3226 // Goto slow case if we do not have a function. | 3303 // Goto slow case if we do not have a function. |
3227 __ GetObjectType(a1, t0, t0); | 3304 __ GetObjectType(a1, a4, a4); |
3228 __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE)); | 3305 __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE)); |
3229 __ Branch(&have_js_function); | 3306 __ Branch(&have_js_function); |
3230 } | 3307 } |
3231 | 3308 |
3232 | 3309 |
3233 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { | 3310 void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { |
3234 // Get the receiver of the function from the stack; 1 ~ return address. | 3311 // Get the receiver of the function from the stack; 1 ~ return address. |
3235 __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); | 3312 __ ld(a4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); |
3236 | 3313 |
3237 { | 3314 { |
3238 FrameScope scope(masm, StackFrame::INTERNAL); | 3315 FrameScope scope(masm, StackFrame::INTERNAL); |
3239 | 3316 |
3240 // Push the receiver and the function and feedback info. | 3317 // Push the receiver and the function and feedback info. |
3241 __ Push(t0, a1, a2, a3); | 3318 __ Push(a4, a1, a2, a3); |
3242 | 3319 |
3243 // Call the entry. | 3320 // Call the entry. |
3244 ExternalReference miss = ExternalReference(IC_Utility(id), | 3321 ExternalReference miss = ExternalReference(IC_Utility(id), |
3245 masm->isolate()); | 3322 masm->isolate()); |
3246 __ CallExternalReference(miss, 4); | 3323 __ CallExternalReference(miss, 4); |
3247 | 3324 |
3248 // Move result to a1 and exit the internal frame. | 3325 // Move result to a1 and exit the internal frame. |
3249 __ mov(a1, v0); | 3326 __ mov(a1, v0); |
3250 } | 3327 } |
3251 } | 3328 } |
3252 | 3329 |
3253 | 3330 |
3254 // StringCharCodeAtGenerator. | |
3255 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | |
3256 Label flat_string; | |
3257 Label ascii_string; | |
3258 Label got_char_code; | |
3259 Label sliced_string; | |
3260 | |
3261 ASSERT(!t0.is(index_)); | |
3262 ASSERT(!t0.is(result_)); | |
3263 ASSERT(!t0.is(object_)); | |
3264 | |
3265 // If the receiver is a smi trigger the non-string case. | |
3266 __ JumpIfSmi(object_, receiver_not_string_); | |
3267 | |
3268 // Fetch the instance type of the receiver into result register. | |
3269 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
3270 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
3271 // If the receiver is not a string trigger the non-string case. | |
3272 __ And(t0, result_, Operand(kIsNotStringMask)); | |
3273 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg)); | |
3274 | |
3275 // If the index is non-smi trigger the non-smi case. | |
3276 __ JumpIfNotSmi(index_, &index_not_smi_); | |
3277 | |
3278 __ bind(&got_smi_index_); | |
3279 | |
3280 // Check for index out of range. | |
3281 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset)); | |
3282 __ Branch(index_out_of_range_, ls, t0, Operand(index_)); | |
3283 | |
3284 __ sra(index_, index_, kSmiTagSize); | |
3285 | |
3286 StringCharLoadGenerator::Generate(masm, | |
3287 object_, | |
3288 index_, | |
3289 result_, | |
3290 &call_runtime_); | |
3291 | |
3292 __ sll(result_, result_, kSmiTagSize); | |
3293 __ bind(&exit_); | |
3294 } | |
3295 | |
3296 | |
3297 void StringCharCodeAtGenerator::GenerateSlow( | 3331 void StringCharCodeAtGenerator::GenerateSlow( |
3298 MacroAssembler* masm, | 3332 MacroAssembler* masm, |
3299 const RuntimeCallHelper& call_helper) { | 3333 const RuntimeCallHelper& call_helper) { |
3300 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); | 3334 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); |
3301 | 3335 |
3302 // Index is not a smi. | 3336 // Index is not a smi. |
3303 __ bind(&index_not_smi_); | 3337 __ bind(&index_not_smi_); |
3304 // If index is a heap number, try converting it to an integer. | 3338 // If index is a heap number, try converting it to an integer. |
3305 __ CheckMap(index_, | 3339 __ CheckMap(index_, |
3306 result_, | 3340 result_, |
(...skipping 10 matching lines...) Expand all Loading... |
3317 // NumberToSmi discards numbers that are not exact integers. | 3351 // NumberToSmi discards numbers that are not exact integers. |
3318 __ CallRuntime(Runtime::kNumberToSmi, 1); | 3352 __ CallRuntime(Runtime::kNumberToSmi, 1); |
3319 } | 3353 } |
3320 | 3354 |
3321 // Save the conversion result before the pop instructions below | 3355 // Save the conversion result before the pop instructions below |
3322 // have a chance to overwrite it. | 3356 // have a chance to overwrite it. |
3323 | 3357 |
3324 __ Move(index_, v0); | 3358 __ Move(index_, v0); |
3325 __ pop(object_); | 3359 __ pop(object_); |
3326 // Reload the instance type. | 3360 // Reload the instance type. |
3327 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3361 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
3328 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3362 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
3329 call_helper.AfterCall(masm); | 3363 call_helper.AfterCall(masm); |
3330 // If index is still not a smi, it must be out of range. | 3364 // If index is still not a smi, it must be out of range. |
3331 __ JumpIfNotSmi(index_, index_out_of_range_); | 3365 __ JumpIfNotSmi(index_, index_out_of_range_); |
3332 // Otherwise, return to the fast path. | 3366 // Otherwise, return to the fast path. |
3333 __ Branch(&got_smi_index_); | 3367 __ Branch(&got_smi_index_); |
3334 | 3368 |
3335 // Call runtime. We get here when the receiver is a string and the | 3369 // Call runtime. We get here when the receiver is a string and the |
3336 // index is a number, but the code of getting the actual character | 3370 // index is a number, but the code of getting the actual character |
3337 // is too complex (e.g., when the string needs to be flattened). | 3371 // is too complex (e.g., when the string needs to be flattened). |
3338 __ bind(&call_runtime_); | 3372 __ bind(&call_runtime_); |
3339 call_helper.BeforeCall(masm); | 3373 call_helper.BeforeCall(masm); |
3340 __ sll(index_, index_, kSmiTagSize); | 3374 __ SmiTag(index_); |
3341 __ Push(object_, index_); | 3375 __ Push(object_, index_); |
3342 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); | 3376 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); |
3343 | 3377 |
3344 __ Move(result_, v0); | 3378 __ Move(result_, v0); |
3345 | 3379 |
3346 call_helper.AfterCall(masm); | 3380 call_helper.AfterCall(masm); |
3347 __ jmp(&exit_); | 3381 __ jmp(&exit_); |
3348 | 3382 |
3349 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 3383 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
3350 } | 3384 } |
3351 | 3385 |
3352 | 3386 |
3353 // ------------------------------------------------------------------------- | 3387 // ------------------------------------------------------------------------- |
3354 // StringCharFromCodeGenerator | 3388 // StringCharFromCodeGenerator |
3355 | 3389 |
3356 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 3390 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
3357 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 3391 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
3358 | 3392 |
3359 ASSERT(!t0.is(result_)); | 3393 ASSERT(!a4.is(result_)); |
3360 ASSERT(!t0.is(code_)); | 3394 ASSERT(!a4.is(code_)); |
3361 | 3395 |
3362 STATIC_ASSERT(kSmiTag == 0); | 3396 STATIC_ASSERT(kSmiTag == 0); |
3363 STATIC_ASSERT(kSmiShiftSize == 0); | |
3364 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 3397 ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
3365 __ And(t0, | 3398 __ And(a4, |
3366 code_, | 3399 code_, |
3367 Operand(kSmiTagMask | | 3400 Operand(kSmiTagMask | |
3368 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 3401 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
3369 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); | 3402 __ Branch(&slow_case_, ne, a4, Operand(zero_reg)); |
| 3403 |
3370 | 3404 |
3371 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 3405 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
3372 // At this point code register contains smi tagged ASCII char code. | 3406 // At this point code register contains smi tagged ASCII char code. |
3373 STATIC_ASSERT(kSmiTag == 0); | 3407 STATIC_ASSERT(kSmiTag == 0); |
3374 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); | 3408 __ SmiScale(a4, code_, kPointerSizeLog2); |
3375 __ Addu(result_, result_, t0); | 3409 __ Daddu(result_, result_, a4); |
3376 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 3410 __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
3377 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | 3411 __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); |
3378 __ Branch(&slow_case_, eq, result_, Operand(t0)); | 3412 __ Branch(&slow_case_, eq, result_, Operand(a4)); |
3379 __ bind(&exit_); | 3413 __ bind(&exit_); |
3380 } | 3414 } |
3381 | 3415 |
3382 | 3416 |
3383 void StringCharFromCodeGenerator::GenerateSlow( | 3417 void StringCharFromCodeGenerator::GenerateSlow( |
3384 MacroAssembler* masm, | 3418 MacroAssembler* masm, |
3385 const RuntimeCallHelper& call_helper) { | 3419 const RuntimeCallHelper& call_helper) { |
3386 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); | 3420 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); |
3387 | 3421 |
3388 __ bind(&slow_case_); | 3422 __ bind(&slow_case_); |
(...skipping 28 matching lines...) Expand all Loading... |
3417 kDestinationOfCopyNotAligned, | 3451 kDestinationOfCopyNotAligned, |
3418 scratch, | 3452 scratch, |
3419 Operand(zero_reg)); | 3453 Operand(zero_reg)); |
3420 } | 3454 } |
3421 | 3455 |
3422 // Assumes word reads and writes are little endian. | 3456 // Assumes word reads and writes are little endian. |
3423 // Nothing to do for zero characters. | 3457 // Nothing to do for zero characters. |
3424 Label done; | 3458 Label done; |
3425 | 3459 |
3426 if (encoding == String::TWO_BYTE_ENCODING) { | 3460 if (encoding == String::TWO_BYTE_ENCODING) { |
3427 __ Addu(count, count, count); | 3461 __ Daddu(count, count, count); |
3428 } | 3462 } |
3429 | 3463 |
3430 Register limit = count; // Read until dest equals this. | 3464 Register limit = count; // Read until dest equals this. |
3431 __ Addu(limit, dest, Operand(count)); | 3465 __ Daddu(limit, dest, Operand(count)); |
3432 | 3466 |
3433 Label loop_entry, loop; | 3467 Label loop_entry, loop; |
3434 // Copy bytes from src to dest until dest hits limit. | 3468 // Copy bytes from src to dest until dest hits limit. |
3435 __ Branch(&loop_entry); | 3469 __ Branch(&loop_entry); |
3436 __ bind(&loop); | 3470 __ bind(&loop); |
3437 __ lbu(scratch, MemOperand(src)); | 3471 __ lbu(scratch, MemOperand(src)); |
3438 __ Addu(src, src, Operand(1)); | 3472 __ daddiu(src, src, 1); |
3439 __ sb(scratch, MemOperand(dest)); | 3473 __ sb(scratch, MemOperand(dest)); |
3440 __ Addu(dest, dest, Operand(1)); | 3474 __ daddiu(dest, dest, 1); |
3441 __ bind(&loop_entry); | 3475 __ bind(&loop_entry); |
3442 __ Branch(&loop, lt, dest, Operand(limit)); | 3476 __ Branch(&loop, lt, dest, Operand(limit)); |
3443 | 3477 |
3444 __ bind(&done); | 3478 __ bind(&done); |
3445 } | 3479 } |
3446 | 3480 |
3447 | 3481 |
3448 void StringHelper::GenerateHashInit(MacroAssembler* masm, | 3482 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
3449 Register hash, | 3483 Register hash, |
3450 Register character) { | 3484 Register character) { |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3508 // nothing can be assumed about the arguments. It is tested that: | 3542 // nothing can be assumed about the arguments. It is tested that: |
3509 // "string" is a sequential string, | 3543 // "string" is a sequential string, |
3510 // both "from" and "to" are smis, and | 3544 // both "from" and "to" are smis, and |
3511 // 0 <= from <= to <= string.length. | 3545 // 0 <= from <= to <= string.length. |
3512 // If any of these assumptions fail, we call the runtime system. | 3546 // If any of these assumptions fail, we call the runtime system. |
3513 | 3547 |
3514 const int kToOffset = 0 * kPointerSize; | 3548 const int kToOffset = 0 * kPointerSize; |
3515 const int kFromOffset = 1 * kPointerSize; | 3549 const int kFromOffset = 1 * kPointerSize; |
3516 const int kStringOffset = 2 * kPointerSize; | 3550 const int kStringOffset = 2 * kPointerSize; |
3517 | 3551 |
3518 __ lw(a2, MemOperand(sp, kToOffset)); | 3552 __ ld(a2, MemOperand(sp, kToOffset)); |
3519 __ lw(a3, MemOperand(sp, kFromOffset)); | 3553 __ ld(a3, MemOperand(sp, kFromOffset)); |
3520 STATIC_ASSERT(kFromOffset == kToOffset + 4); | 3554 // Does not needed? |
| 3555 // STATIC_ASSERT(kFromOffset == kToOffset + 4); |
3521 STATIC_ASSERT(kSmiTag == 0); | 3556 STATIC_ASSERT(kSmiTag == 0); |
3522 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 3557 // Does not needed? |
| 3558 // STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
3523 | 3559 |
3524 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is | 3560 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is |
3525 // safe in this case. | 3561 // safe in this case. |
3526 __ UntagAndJumpIfNotSmi(a2, a2, &runtime); | 3562 __ JumpIfNotSmi(a2, &runtime); |
3527 __ UntagAndJumpIfNotSmi(a3, a3, &runtime); | 3563 __ JumpIfNotSmi(a3, &runtime); |
3528 // Both a2 and a3 are untagged integers. | 3564 // Both a2 and a3 are untagged integers. |
3529 | 3565 |
| 3566 __ SmiUntag(a2, a2); |
| 3567 __ SmiUntag(a3, a3); |
3530 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. | 3568 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. |
3531 | 3569 |
3532 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to. | 3570 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to. |
3533 __ Subu(a2, a2, a3); | 3571 __ Dsubu(a2, a2, a3); |
3534 | 3572 |
3535 // Make sure first argument is a string. | 3573 // Make sure first argument is a string. |
3536 __ lw(v0, MemOperand(sp, kStringOffset)); | 3574 __ ld(v0, MemOperand(sp, kStringOffset)); |
3537 __ JumpIfSmi(v0, &runtime); | 3575 __ JumpIfSmi(v0, &runtime); |
3538 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | 3576 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
3539 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); | 3577 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
3540 __ And(t0, a1, Operand(kIsNotStringMask)); | 3578 __ And(a4, a1, Operand(kIsNotStringMask)); |
3541 | 3579 |
3542 __ Branch(&runtime, ne, t0, Operand(zero_reg)); | 3580 __ Branch(&runtime, ne, a4, Operand(zero_reg)); |
3543 | 3581 |
3544 Label single_char; | 3582 Label single_char; |
3545 __ Branch(&single_char, eq, a2, Operand(1)); | 3583 __ Branch(&single_char, eq, a2, Operand(1)); |
3546 | 3584 |
3547 // Short-cut for the case of trivial substring. | 3585 // Short-cut for the case of trivial substring. |
3548 Label return_v0; | 3586 Label return_v0; |
3549 // v0: original string | 3587 // v0: original string |
3550 // a2: result string length | 3588 // a2: result string length |
3551 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset)); | 3589 __ ld(a4, FieldMemOperand(v0, String::kLengthOffset)); |
3552 __ sra(t0, t0, 1); | 3590 __ SmiUntag(a4); |
3553 // Return original string. | 3591 // Return original string. |
3554 __ Branch(&return_v0, eq, a2, Operand(t0)); | 3592 __ Branch(&return_v0, eq, a2, Operand(a4)); |
3555 // Longer than original string's length or negative: unsafe arguments. | 3593 // Longer than original string's length or negative: unsafe arguments. |
3556 __ Branch(&runtime, hi, a2, Operand(t0)); | 3594 __ Branch(&runtime, hi, a2, Operand(a4)); |
3557 // Shorter than original string's length: an actual substring. | 3595 // Shorter than original string's length: an actual substring. |
3558 | 3596 |
3559 // Deal with different string types: update the index if necessary | 3597 // Deal with different string types: update the index if necessary |
3560 // and put the underlying string into t1. | 3598 // and put the underlying string into a5. |
3561 // v0: original string | 3599 // v0: original string |
3562 // a1: instance type | 3600 // a1: instance type |
3563 // a2: length | 3601 // a2: length |
3564 // a3: from index (untagged) | 3602 // a3: from index (untagged) |
3565 Label underlying_unpacked, sliced_string, seq_or_external_string; | 3603 Label underlying_unpacked, sliced_string, seq_or_external_string; |
3566 // If the string is not indirect, it can only be sequential or external. | 3604 // If the string is not indirect, it can only be sequential or external. |
3567 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | 3605 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); |
3568 STATIC_ASSERT(kIsIndirectStringMask != 0); | 3606 STATIC_ASSERT(kIsIndirectStringMask != 0); |
3569 __ And(t0, a1, Operand(kIsIndirectStringMask)); | 3607 __ And(a4, a1, Operand(kIsIndirectStringMask)); |
3570 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg)); | 3608 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg)); |
3571 // t0 is used as a scratch register and can be overwritten in either case. | 3609 // a4 is used as a scratch register and can be overwritten in either case. |
3572 __ And(t0, a1, Operand(kSlicedNotConsMask)); | 3610 __ And(a4, a1, Operand(kSlicedNotConsMask)); |
3573 __ Branch(&sliced_string, ne, t0, Operand(zero_reg)); | 3611 __ Branch(&sliced_string, ne, a4, Operand(zero_reg)); |
3574 // Cons string. Check whether it is flat, then fetch first part. | 3612 // Cons string. Check whether it is flat, then fetch first part. |
3575 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset)); | 3613 __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset)); |
3576 __ LoadRoot(t0, Heap::kempty_stringRootIndex); | 3614 __ LoadRoot(a4, Heap::kempty_stringRootIndex); |
3577 __ Branch(&runtime, ne, t1, Operand(t0)); | 3615 __ Branch(&runtime, ne, a5, Operand(a4)); |
3578 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); | 3616 __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset)); |
3579 // Update instance type. | 3617 // Update instance type. |
3580 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); | 3618 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset)); |
3581 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); | 3619 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
3582 __ jmp(&underlying_unpacked); | 3620 __ jmp(&underlying_unpacked); |
3583 | 3621 |
3584 __ bind(&sliced_string); | 3622 __ bind(&sliced_string); |
3585 // Sliced string. Fetch parent and correct start index by offset. | 3623 // Sliced string. Fetch parent and correct start index by offset. |
3586 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); | 3624 __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset)); |
3587 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset)); | 3625 __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset)); |
3588 __ sra(t0, t0, 1); // Add offset to index. | 3626 __ SmiUntag(a4); // Add offset to index. |
3589 __ Addu(a3, a3, t0); | 3627 __ Daddu(a3, a3, a4); |
3590 // Update instance type. | 3628 // Update instance type. |
3591 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); | 3629 __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset)); |
3592 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); | 3630 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
3593 __ jmp(&underlying_unpacked); | 3631 __ jmp(&underlying_unpacked); |
3594 | 3632 |
3595 __ bind(&seq_or_external_string); | 3633 __ bind(&seq_or_external_string); |
3596 // Sequential or external string. Just move string to the expected register. | 3634 // Sequential or external string. Just move string to the expected register. |
3597 __ mov(t1, v0); | 3635 __ mov(a5, v0); |
3598 | 3636 |
3599 __ bind(&underlying_unpacked); | 3637 __ bind(&underlying_unpacked); |
3600 | 3638 |
3601 if (FLAG_string_slices) { | 3639 if (FLAG_string_slices) { |
3602 Label copy_routine; | 3640 Label copy_routine; |
3603 // t1: underlying subject string | 3641 // a5: underlying subject string |
3604 // a1: instance type of underlying subject string | 3642 // a1: instance type of underlying subject string |
3605 // a2: length | 3643 // a2: length |
3606 // a3: adjusted start index (untagged) | 3644 // a3: adjusted start index (untagged) |
3607 // Short slice. Copy instead of slicing. | 3645 // Short slice. Copy instead of slicing. |
3608 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength)); | 3646 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength)); |
3609 // Allocate new sliced string. At this point we do not reload the instance | 3647 // Allocate new sliced string. At this point we do not reload the instance |
3610 // type including the string encoding because we simply rely on the info | 3648 // type including the string encoding because we simply rely on the info |
3611 // provided by the original string. It does not matter if the original | 3649 // provided by the original string. It does not matter if the original |
3612 // string's encoding is wrong because we always have to recheck encoding of | 3650 // string's encoding is wrong because we always have to recheck encoding of |
3613 // the newly created string's parent anyways due to externalized strings. | 3651 // the newly created string's parent anyways due to externalized strings. |
3614 Label two_byte_slice, set_slice_header; | 3652 Label two_byte_slice, set_slice_header; |
3615 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 3653 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); |
3616 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 3654 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
3617 __ And(t0, a1, Operand(kStringEncodingMask)); | 3655 __ And(a4, a1, Operand(kStringEncodingMask)); |
3618 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg)); | 3656 __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg)); |
3619 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime); | 3657 __ AllocateAsciiSlicedString(v0, a2, a6, a7, &runtime); |
3620 __ jmp(&set_slice_header); | 3658 __ jmp(&set_slice_header); |
3621 __ bind(&two_byte_slice); | 3659 __ bind(&two_byte_slice); |
3622 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime); | 3660 __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime); |
3623 __ bind(&set_slice_header); | 3661 __ bind(&set_slice_header); |
3624 __ sll(a3, a3, 1); | 3662 __ SmiTag(a3); |
3625 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); | 3663 __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset)); |
3626 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset)); | 3664 __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset)); |
3627 __ jmp(&return_v0); | 3665 __ jmp(&return_v0); |
3628 | 3666 |
3629 __ bind(©_routine); | 3667 __ bind(©_routine); |
3630 } | 3668 } |
3631 | 3669 |
3632 // t1: underlying subject string | 3670 // a5: underlying subject string |
3633 // a1: instance type of underlying subject string | 3671 // a1: instance type of underlying subject string |
3634 // a2: length | 3672 // a2: length |
3635 // a3: adjusted start index (untagged) | 3673 // a3: adjusted start index (untagged) |
3636 Label two_byte_sequential, sequential_string, allocate_result; | 3674 Label two_byte_sequential, sequential_string, allocate_result; |
3637 STATIC_ASSERT(kExternalStringTag != 0); | 3675 STATIC_ASSERT(kExternalStringTag != 0); |
3638 STATIC_ASSERT(kSeqStringTag == 0); | 3676 STATIC_ASSERT(kSeqStringTag == 0); |
3639 __ And(t0, a1, Operand(kExternalStringTag)); | 3677 __ And(a4, a1, Operand(kExternalStringTag)); |
3640 __ Branch(&sequential_string, eq, t0, Operand(zero_reg)); | 3678 __ Branch(&sequential_string, eq, a4, Operand(zero_reg)); |
3641 | 3679 |
3642 // Handle external string. | 3680 // Handle external string. |
3643 // Rule out short external strings. | 3681 // Rule out short external strings. |
3644 STATIC_ASSERT(kShortExternalStringTag != 0); | 3682 STATIC_ASSERT(kShortExternalStringTag != 0); |
3645 __ And(t0, a1, Operand(kShortExternalStringTag)); | 3683 __ And(a4, a1, Operand(kShortExternalStringTag)); |
3646 __ Branch(&runtime, ne, t0, Operand(zero_reg)); | 3684 __ Branch(&runtime, ne, a4, Operand(zero_reg)); |
3647 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset)); | 3685 __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset)); |
3648 // t1 already points to the first character of underlying string. | 3686 // a5 already points to the first character of underlying string. |
3649 __ jmp(&allocate_result); | 3687 __ jmp(&allocate_result); |
3650 | 3688 |
3651 __ bind(&sequential_string); | 3689 __ bind(&sequential_string); |
3652 // Locate first character of underlying subject string. | 3690 // Locate first character of underlying subject string. |
3653 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 3691 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
3654 __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 3692 __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
3655 | 3693 |
3656 __ bind(&allocate_result); | 3694 __ bind(&allocate_result); |
3657 // Sequential acii string. Allocate the result. | 3695 // Sequential acii string. Allocate the result. |
3658 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 3696 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); |
3659 __ And(t0, a1, Operand(kStringEncodingMask)); | 3697 __ And(a4, a1, Operand(kStringEncodingMask)); |
3660 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg)); | 3698 __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg)); |
3661 | 3699 |
3662 // Allocate and copy the resulting ASCII string. | 3700 // Allocate and copy the resulting ASCII string. |
3663 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime); | 3701 __ AllocateAsciiString(v0, a2, a4, a6, a7, &runtime); |
3664 | 3702 |
3665 // Locate first character of substring to copy. | 3703 // Locate first character of substring to copy. |
3666 __ Addu(t1, t1, a3); | 3704 __ Daddu(a5, a5, a3); |
3667 | 3705 |
3668 // Locate first character of result. | 3706 // Locate first character of result. |
3669 __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 3707 __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
3670 | 3708 |
3671 // v0: result string | 3709 // v0: result string |
3672 // a1: first character of result string | 3710 // a1: first character of result string |
3673 // a2: result string length | 3711 // a2: result string length |
3674 // t1: first character of substring to copy | 3712 // a5: first character of substring to copy |
3675 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 3713 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
3676 StringHelper::GenerateCopyCharacters( | 3714 StringHelper::GenerateCopyCharacters( |
3677 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING); | 3715 masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING); |
3678 __ jmp(&return_v0); | 3716 __ jmp(&return_v0); |
3679 | 3717 |
3680 // Allocate and copy the resulting two-byte string. | 3718 // Allocate and copy the resulting two-byte string. |
3681 __ bind(&two_byte_sequential); | 3719 __ bind(&two_byte_sequential); |
3682 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime); | 3720 __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime); |
3683 | 3721 |
3684 // Locate first character of substring to copy. | 3722 // Locate first character of substring to copy. |
3685 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 3723 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
3686 __ sll(t0, a3, 1); | 3724 __ dsll(a4, a3, 1); |
3687 __ Addu(t1, t1, t0); | 3725 __ Daddu(a5, a5, a4); |
3688 // Locate first character of result. | 3726 // Locate first character of result. |
3689 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 3727 __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
3690 | 3728 |
3691 // v0: result string. | 3729 // v0: result string. |
3692 // a1: first character of result. | 3730 // a1: first character of result. |
3693 // a2: result length. | 3731 // a2: result length. |
3694 // t1: first character of substring to copy. | 3732 // a5: first character of substring to copy. |
3695 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 3733 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
3696 StringHelper::GenerateCopyCharacters( | 3734 StringHelper::GenerateCopyCharacters( |
3697 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING); | 3735 masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING); |
3698 | 3736 |
3699 __ bind(&return_v0); | 3737 __ bind(&return_v0); |
3700 Counters* counters = isolate()->counters(); | 3738 Counters* counters = isolate()->counters(); |
3701 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); | 3739 __ IncrementCounter(counters->sub_string_native(), 1, a3, a4); |
3702 __ DropAndRet(3); | 3740 __ DropAndRet(3); |
3703 | 3741 |
3704 // Just jump to runtime to create the sub string. | 3742 // Just jump to runtime to create the sub string. |
3705 __ bind(&runtime); | 3743 __ bind(&runtime); |
3706 __ TailCallRuntime(Runtime::kSubString, 3, 1); | 3744 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
3707 | 3745 |
3708 __ bind(&single_char); | 3746 __ bind(&single_char); |
3709 // v0: original string | 3747 // v0: original string |
3710 // a1: instance type | 3748 // a1: instance type |
3711 // a2: length | 3749 // a2: length |
3712 // a3: from index (untagged) | 3750 // a3: from index (untagged) |
3713 __ SmiTag(a3, a3); | |
3714 StringCharAtGenerator generator( | 3751 StringCharAtGenerator generator( |
3715 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); | 3752 v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); |
3716 generator.GenerateFast(masm); | 3753 generator.GenerateFast(masm); |
3717 __ DropAndRet(3); | 3754 __ DropAndRet(3); |
3718 generator.SkipSlow(masm, &runtime); | 3755 generator.SkipSlow(masm, &runtime); |
3719 } | 3756 } |
3720 | 3757 |
3721 | 3758 |
3722 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, | 3759 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
3723 Register left, | 3760 Register left, |
3724 Register right, | 3761 Register right, |
3725 Register scratch1, | 3762 Register scratch1, |
3726 Register scratch2, | 3763 Register scratch2, |
3727 Register scratch3) { | 3764 Register scratch3) { |
3728 Register length = scratch1; | 3765 Register length = scratch1; |
3729 | 3766 |
3730 // Compare lengths. | 3767 // Compare lengths. |
3731 Label strings_not_equal, check_zero_length; | 3768 Label strings_not_equal, check_zero_length; |
3732 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); | 3769 __ ld(length, FieldMemOperand(left, String::kLengthOffset)); |
3733 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 3770 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
3734 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); | 3771 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); |
3735 __ bind(&strings_not_equal); | 3772 __ bind(&strings_not_equal); |
3736 ASSERT(is_int16(NOT_EQUAL)); | 3773 // Can not put li in delayslot, it has multi instructions. |
3737 __ Ret(USE_DELAY_SLOT); | |
3738 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); | 3774 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
| 3775 __ Ret(); |
3739 | 3776 |
3740 // Check if the length is zero. | 3777 // Check if the length is zero. |
3741 Label compare_chars; | 3778 Label compare_chars; |
3742 __ bind(&check_zero_length); | 3779 __ bind(&check_zero_length); |
3743 STATIC_ASSERT(kSmiTag == 0); | 3780 STATIC_ASSERT(kSmiTag == 0); |
3744 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); | 3781 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); |
3745 ASSERT(is_int16(EQUAL)); | 3782 ASSERT(is_int16((intptr_t)Smi::FromInt(EQUAL))); |
3746 __ Ret(USE_DELAY_SLOT); | 3783 __ Ret(USE_DELAY_SLOT); |
3747 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 3784 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
3748 | 3785 |
3749 // Compare characters. | 3786 // Compare characters. |
3750 __ bind(&compare_chars); | 3787 __ bind(&compare_chars); |
3751 | 3788 |
3752 GenerateAsciiCharsCompareLoop(masm, | 3789 GenerateAsciiCharsCompareLoop(masm, |
3753 left, right, length, scratch2, scratch3, v0, | 3790 left, right, length, scratch2, scratch3, v0, |
3754 &strings_not_equal); | 3791 &strings_not_equal); |
3755 | 3792 |
3756 // Characters are equal. | 3793 // Characters are equal. |
3757 __ Ret(USE_DELAY_SLOT); | 3794 __ Ret(USE_DELAY_SLOT); |
3758 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 3795 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
3759 } | 3796 } |
3760 | 3797 |
3761 | 3798 |
3762 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 3799 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
3763 Register left, | 3800 Register left, |
3764 Register right, | 3801 Register right, |
3765 Register scratch1, | 3802 Register scratch1, |
3766 Register scratch2, | 3803 Register scratch2, |
3767 Register scratch3, | 3804 Register scratch3, |
3768 Register scratch4) { | 3805 Register scratch4) { |
3769 Label result_not_equal, compare_lengths; | 3806 Label result_not_equal, compare_lengths; |
3770 // Find minimum length and length difference. | 3807 // Find minimum length and length difference. |
3771 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 3808 __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
3772 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 3809 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
3773 __ Subu(scratch3, scratch1, Operand(scratch2)); | 3810 __ Dsubu(scratch3, scratch1, Operand(scratch2)); |
3774 Register length_delta = scratch3; | 3811 Register length_delta = scratch3; |
3775 __ slt(scratch4, scratch2, scratch1); | 3812 __ slt(scratch4, scratch2, scratch1); |
3776 __ Movn(scratch1, scratch2, scratch4); | 3813 __ Movn(scratch1, scratch2, scratch4); |
3777 Register min_length = scratch1; | 3814 Register min_length = scratch1; |
3778 STATIC_ASSERT(kSmiTag == 0); | 3815 STATIC_ASSERT(kSmiTag == 0); |
3779 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); | 3816 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
3780 | 3817 |
3781 // Compare loop. | 3818 // Compare loop. |
3782 GenerateAsciiCharsCompareLoop(masm, | 3819 GenerateAsciiCharsCompareLoop(masm, |
3783 left, right, min_length, scratch2, scratch4, v0, | 3820 left, right, min_length, scratch2, scratch4, v0, |
(...skipping 26 matching lines...) Expand all Loading... |
3810 Register right, | 3847 Register right, |
3811 Register length, | 3848 Register length, |
3812 Register scratch1, | 3849 Register scratch1, |
3813 Register scratch2, | 3850 Register scratch2, |
3814 Register scratch3, | 3851 Register scratch3, |
3815 Label* chars_not_equal) { | 3852 Label* chars_not_equal) { |
3816 // Change index to run from -length to -1 by adding length to string | 3853 // Change index to run from -length to -1 by adding length to string |
3817 // start. This means that loop ends when index reaches zero, which | 3854 // start. This means that loop ends when index reaches zero, which |
3818 // doesn't need an additional compare. | 3855 // doesn't need an additional compare. |
3819 __ SmiUntag(length); | 3856 __ SmiUntag(length); |
3820 __ Addu(scratch1, length, | 3857 __ Daddu(scratch1, length, |
3821 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 3858 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
3822 __ Addu(left, left, Operand(scratch1)); | 3859 __ Daddu(left, left, Operand(scratch1)); |
3823 __ Addu(right, right, Operand(scratch1)); | 3860 __ Daddu(right, right, Operand(scratch1)); |
3824 __ Subu(length, zero_reg, length); | 3861 __ Dsubu(length, zero_reg, length); |
3825 Register index = length; // index = -length; | 3862 Register index = length; // index = -length; |
3826 | 3863 |
3827 | 3864 |
3828 // Compare loop. | 3865 // Compare loop. |
3829 Label loop; | 3866 Label loop; |
3830 __ bind(&loop); | 3867 __ bind(&loop); |
3831 __ Addu(scratch3, left, index); | 3868 __ Daddu(scratch3, left, index); |
3832 __ lbu(scratch1, MemOperand(scratch3)); | 3869 __ lbu(scratch1, MemOperand(scratch3)); |
3833 __ Addu(scratch3, right, index); | 3870 __ Daddu(scratch3, right, index); |
3834 __ lbu(scratch2, MemOperand(scratch3)); | 3871 __ lbu(scratch2, MemOperand(scratch3)); |
3835 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); | 3872 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); |
3836 __ Addu(index, index, 1); | 3873 __ Daddu(index, index, 1); |
3837 __ Branch(&loop, ne, index, Operand(zero_reg)); | 3874 __ Branch(&loop, ne, index, Operand(zero_reg)); |
3838 } | 3875 } |
3839 | 3876 |
3840 | 3877 |
3841 void StringCompareStub::Generate(MacroAssembler* masm) { | 3878 void StringCompareStub::Generate(MacroAssembler* masm) { |
3842 Label runtime; | 3879 Label runtime; |
3843 | 3880 |
3844 Counters* counters = isolate()->counters(); | 3881 Counters* counters = isolate()->counters(); |
3845 | 3882 |
3846 // Stack frame on entry. | 3883 // Stack frame on entry. |
3847 // sp[0]: right string | 3884 // sp[0]: right string |
3848 // sp[4]: left string | 3885 // sp[4]: left string |
3849 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left. | 3886 __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left. |
3850 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right. | 3887 __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right. |
3851 | 3888 |
3852 Label not_same; | 3889 Label not_same; |
3853 __ Branch(¬_same, ne, a0, Operand(a1)); | 3890 __ Branch(¬_same, ne, a0, Operand(a1)); |
3854 STATIC_ASSERT(EQUAL == 0); | 3891 STATIC_ASSERT(EQUAL == 0); |
3855 STATIC_ASSERT(kSmiTag == 0); | 3892 STATIC_ASSERT(kSmiTag == 0); |
3856 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 3893 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
3857 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); | 3894 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); |
3858 __ DropAndRet(2); | 3895 __ DropAndRet(2); |
3859 | 3896 |
3860 __ bind(¬_same); | 3897 __ bind(¬_same); |
3861 | 3898 |
3862 // Check that both objects are sequential ASCII strings. | 3899 // Check that both objects are sequential ASCII strings. |
3863 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); | 3900 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); |
3864 | 3901 |
3865 // Compare flat ASCII strings natively. Remove arguments from stack first. | 3902 // Compare flat ASCII strings natively. Remove arguments from stack first. |
3866 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); | 3903 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); |
3867 __ Addu(sp, sp, Operand(2 * kPointerSize)); | 3904 __ Daddu(sp, sp, Operand(2 * kPointerSize)); |
3868 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); | 3905 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, a4, a5); |
3869 | 3906 |
3870 __ bind(&runtime); | 3907 __ bind(&runtime); |
3871 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 3908 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
3872 } | 3909 } |
3873 | 3910 |
3874 | 3911 |
3875 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 3912 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { |
3876 // ----------- S t a t e ------------- | 3913 // ----------- S t a t e ------------- |
3877 // -- a1 : left | 3914 // -- a1 : left |
3878 // -- a0 : right | 3915 // -- a0 : right |
3879 // -- ra : return address | 3916 // -- ra : return address |
3880 // ----------------------------------- | 3917 // ----------------------------------- |
3881 | 3918 |
3882 // Load a2 with the allocation site. We stick an undefined dummy value here | 3919 // Load a2 with the allocation site. We stick an undefined dummy value here |
3883 // and replace it with the real allocation site later when we instantiate this | 3920 // and replace it with the real allocation site later when we instantiate this |
3884 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | 3921 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). |
3885 __ li(a2, handle(isolate()->heap()->undefined_value())); | 3922 __ li(a2, handle(isolate()->heap()->undefined_value())); |
3886 | 3923 |
3887 // Make sure that we actually patched the allocation site. | 3924 // Make sure that we actually patched the allocation site. |
3888 if (FLAG_debug_code) { | 3925 if (FLAG_debug_code) { |
3889 __ And(at, a2, Operand(kSmiTagMask)); | 3926 __ And(at, a2, Operand(kSmiTagMask)); |
3890 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg)); | 3927 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg)); |
3891 __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset)); | 3928 __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset)); |
3892 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 3929 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
3893 __ Assert(eq, kExpectedAllocationSite, t0, Operand(at)); | 3930 __ Assert(eq, kExpectedAllocationSite, a4, Operand(at)); |
3894 } | 3931 } |
3895 | 3932 |
3896 // Tail call into the stub that handles binary operations with allocation | 3933 // Tail call into the stub that handles binary operations with allocation |
3897 // sites. | 3934 // sites. |
3898 BinaryOpWithAllocationSiteStub stub(isolate(), state_); | 3935 BinaryOpWithAllocationSiteStub stub(isolate(), state_); |
3899 __ TailCallStub(&stub); | 3936 __ TailCallStub(&stub); |
3900 } | 3937 } |
3901 | 3938 |
3902 | 3939 |
3903 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 3940 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
3904 ASSERT(state_ == CompareIC::SMI); | 3941 ASSERT(state_ == CompareIC::SMI); |
3905 Label miss; | 3942 Label miss; |
3906 __ Or(a2, a1, a0); | 3943 __ Or(a2, a1, a0); |
3907 __ JumpIfNotSmi(a2, &miss); | 3944 __ JumpIfNotSmi(a2, &miss); |
3908 | 3945 |
3909 if (GetCondition() == eq) { | 3946 if (GetCondition() == eq) { |
3910 // For equality we do not care about the sign of the result. | 3947 // For equality we do not care about the sign of the result. |
3911 __ Ret(USE_DELAY_SLOT); | 3948 __ Ret(USE_DELAY_SLOT); |
3912 __ Subu(v0, a0, a1); | 3949 __ Dsubu(v0, a0, a1); |
3913 } else { | 3950 } else { |
3914 // Untag before subtracting to avoid handling overflow. | 3951 // Untag before subtracting to avoid handling overflow. |
3915 __ SmiUntag(a1); | 3952 __ SmiUntag(a1); |
3916 __ SmiUntag(a0); | 3953 __ SmiUntag(a0); |
3917 __ Ret(USE_DELAY_SLOT); | 3954 __ Ret(USE_DELAY_SLOT); |
3918 __ Subu(v0, a1, a0); | 3955 __ Dsubu(v0, a1, a0); |
3919 } | 3956 } |
3920 | 3957 |
3921 __ bind(&miss); | 3958 __ bind(&miss); |
3922 GenerateMiss(masm); | 3959 GenerateMiss(masm); |
3923 } | 3960 } |
3924 | 3961 |
3925 | 3962 |
3926 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 3963 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
3927 ASSERT(state_ == CompareIC::NUMBER); | 3964 ASSERT(state_ == CompareIC::NUMBER); |
3928 | 3965 |
3929 Label generic_stub; | 3966 Label generic_stub; |
3930 Label unordered, maybe_undefined1, maybe_undefined2; | 3967 Label unordered, maybe_undefined1, maybe_undefined2; |
3931 Label miss; | 3968 Label miss; |
3932 | 3969 |
3933 if (left_ == CompareIC::SMI) { | 3970 if (left_ == CompareIC::SMI) { |
3934 __ JumpIfNotSmi(a1, &miss); | 3971 __ JumpIfNotSmi(a1, &miss); |
3935 } | 3972 } |
3936 if (right_ == CompareIC::SMI) { | 3973 if (right_ == CompareIC::SMI) { |
3937 __ JumpIfNotSmi(a0, &miss); | 3974 __ JumpIfNotSmi(a0, &miss); |
3938 } | 3975 } |
3939 | 3976 |
3940 // Inlining the double comparison and falling back to the general compare | 3977 // Inlining the double comparison and falling back to the general compare |
3941 // stub if NaN is involved. | 3978 // stub if NaN is involved. |
3942 // Load left and right operand. | 3979 // Load left and right operand. |
3943 Label done, left, left_smi, right_smi; | 3980 Label done, left, left_smi, right_smi; |
3944 __ JumpIfSmi(a0, &right_smi); | 3981 __ JumpIfSmi(a0, &right_smi); |
3945 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 3982 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
3946 DONT_DO_SMI_CHECK); | 3983 DONT_DO_SMI_CHECK); |
3947 __ Subu(a2, a0, Operand(kHeapObjectTag)); | 3984 __ Dsubu(a2, a0, Operand(kHeapObjectTag)); |
3948 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 3985 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
3949 __ Branch(&left); | 3986 __ Branch(&left); |
3950 __ bind(&right_smi); | 3987 __ bind(&right_smi); |
3951 __ SmiUntag(a2, a0); // Can't clobber a0 yet. | 3988 __ SmiUntag(a2, a0); // Can't clobber a0 yet. |
3952 FPURegister single_scratch = f6; | 3989 FPURegister single_scratch = f6; |
3953 __ mtc1(a2, single_scratch); | 3990 __ mtc1(a2, single_scratch); |
3954 __ cvt_d_w(f2, single_scratch); | 3991 __ cvt_d_w(f2, single_scratch); |
3955 | 3992 |
3956 __ bind(&left); | 3993 __ bind(&left); |
3957 __ JumpIfSmi(a1, &left_smi); | 3994 __ JumpIfSmi(a1, &left_smi); |
3958 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 3995 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
3959 DONT_DO_SMI_CHECK); | 3996 DONT_DO_SMI_CHECK); |
3960 __ Subu(a2, a1, Operand(kHeapObjectTag)); | 3997 __ Dsubu(a2, a1, Operand(kHeapObjectTag)); |
3961 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); | 3998 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
3962 __ Branch(&done); | 3999 __ Branch(&done); |
3963 __ bind(&left_smi); | 4000 __ bind(&left_smi); |
3964 __ SmiUntag(a2, a1); // Can't clobber a1 yet. | 4001 __ SmiUntag(a2, a1); // Can't clobber a1 yet. |
3965 single_scratch = f8; | 4002 single_scratch = f8; |
3966 __ mtc1(a2, single_scratch); | 4003 __ mtc1(a2, single_scratch); |
3967 __ cvt_d_w(f0, single_scratch); | 4004 __ cvt_d_w(f0, single_scratch); |
3968 | 4005 |
3969 __ bind(&done); | 4006 __ bind(&done); |
3970 | 4007 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4023 // Registers containing left and right operands respectively. | 4060 // Registers containing left and right operands respectively. |
4024 Register left = a1; | 4061 Register left = a1; |
4025 Register right = a0; | 4062 Register right = a0; |
4026 Register tmp1 = a2; | 4063 Register tmp1 = a2; |
4027 Register tmp2 = a3; | 4064 Register tmp2 = a3; |
4028 | 4065 |
4029 // Check that both operands are heap objects. | 4066 // Check that both operands are heap objects. |
4030 __ JumpIfEitherSmi(left, right, &miss); | 4067 __ JumpIfEitherSmi(left, right, &miss); |
4031 | 4068 |
4032 // Check that both operands are internalized strings. | 4069 // Check that both operands are internalized strings. |
4033 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4070 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4034 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4071 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4035 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4072 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4036 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4073 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4037 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 4074 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
4038 __ Or(tmp1, tmp1, Operand(tmp2)); | 4075 __ Or(tmp1, tmp1, Operand(tmp2)); |
4039 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 4076 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
4040 __ Branch(&miss, ne, at, Operand(zero_reg)); | 4077 __ Branch(&miss, ne, at, Operand(zero_reg)); |
4041 | 4078 |
4042 // Make sure a0 is non-zero. At this point input operands are | 4079 // Make sure a0 is non-zero. At this point input operands are |
4043 // guaranteed to be non-zero. | 4080 // guaranteed to be non-zero. |
4044 ASSERT(right.is(a0)); | 4081 ASSERT(right.is(a0)); |
(...skipping 20 matching lines...) Expand all Loading... |
4065 Register left = a1; | 4102 Register left = a1; |
4066 Register right = a0; | 4103 Register right = a0; |
4067 Register tmp1 = a2; | 4104 Register tmp1 = a2; |
4068 Register tmp2 = a3; | 4105 Register tmp2 = a3; |
4069 | 4106 |
4070 // Check that both operands are heap objects. | 4107 // Check that both operands are heap objects. |
4071 __ JumpIfEitherSmi(left, right, &miss); | 4108 __ JumpIfEitherSmi(left, right, &miss); |
4072 | 4109 |
4073 // Check that both operands are unique names. This leaves the instance | 4110 // Check that both operands are unique names. This leaves the instance |
4074 // types loaded in tmp1 and tmp2. | 4111 // types loaded in tmp1 and tmp2. |
4075 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4112 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4076 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4113 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4077 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4114 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4078 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4115 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4079 | 4116 |
4080 __ JumpIfNotUniqueName(tmp1, &miss); | 4117 __ JumpIfNotUniqueName(tmp1, &miss); |
4081 __ JumpIfNotUniqueName(tmp2, &miss); | 4118 __ JumpIfNotUniqueName(tmp2, &miss); |
4082 | 4119 |
4083 // Use a0 as result | 4120 // Use a0 as result |
4084 __ mov(v0, a0); | 4121 __ mov(v0, a0); |
4085 | 4122 |
4086 // Unique names are compared by identity. | 4123 // Unique names are compared by identity. |
(...skipping 17 matching lines...) Expand all Loading... |
4104 ASSERT(state_ == CompareIC::STRING); | 4141 ASSERT(state_ == CompareIC::STRING); |
4105 Label miss; | 4142 Label miss; |
4106 | 4143 |
4107 bool equality = Token::IsEqualityOp(op_); | 4144 bool equality = Token::IsEqualityOp(op_); |
4108 | 4145 |
4109 // Registers containing left and right operands respectively. | 4146 // Registers containing left and right operands respectively. |
4110 Register left = a1; | 4147 Register left = a1; |
4111 Register right = a0; | 4148 Register right = a0; |
4112 Register tmp1 = a2; | 4149 Register tmp1 = a2; |
4113 Register tmp2 = a3; | 4150 Register tmp2 = a3; |
4114 Register tmp3 = t0; | 4151 Register tmp3 = a4; |
4115 Register tmp4 = t1; | 4152 Register tmp4 = a5; |
4116 Register tmp5 = t2; | 4153 Register tmp5 = a6; |
4117 | 4154 |
4118 // Check that both operands are heap objects. | 4155 // Check that both operands are heap objects. |
4119 __ JumpIfEitherSmi(left, right, &miss); | 4156 __ JumpIfEitherSmi(left, right, &miss); |
4120 | 4157 |
4121 // Check that both operands are strings. This leaves the instance | 4158 // Check that both operands are strings. This leaves the instance |
4122 // types loaded in tmp1 and tmp2. | 4159 // types loaded in tmp1 and tmp2. |
4123 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 4160 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4124 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 4161 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4125 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 4162 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4126 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 4163 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4127 STATIC_ASSERT(kNotStringTag != 0); | 4164 STATIC_ASSERT(kNotStringTag != 0); |
4128 __ Or(tmp3, tmp1, tmp2); | 4165 __ Or(tmp3, tmp1, tmp2); |
4129 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); | 4166 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); |
4130 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); | 4167 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); |
4131 | 4168 |
4132 // Fast check for identical strings. | 4169 // Fast check for identical strings. |
4133 Label left_ne_right; | 4170 Label left_ne_right; |
4134 STATIC_ASSERT(EQUAL == 0); | 4171 STATIC_ASSERT(EQUAL == 0); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4192 __ And(a2, a1, Operand(a0)); | 4229 __ And(a2, a1, Operand(a0)); |
4193 __ JumpIfSmi(a2, &miss); | 4230 __ JumpIfSmi(a2, &miss); |
4194 | 4231 |
4195 __ GetObjectType(a0, a2, a2); | 4232 __ GetObjectType(a0, a2, a2); |
4196 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4233 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
4197 __ GetObjectType(a1, a2, a2); | 4234 __ GetObjectType(a1, a2, a2); |
4198 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); | 4235 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
4199 | 4236 |
4200 ASSERT(GetCondition() == eq); | 4237 ASSERT(GetCondition() == eq); |
4201 __ Ret(USE_DELAY_SLOT); | 4238 __ Ret(USE_DELAY_SLOT); |
4202 __ subu(v0, a0, a1); | 4239 __ dsubu(v0, a0, a1); |
4203 | 4240 |
4204 __ bind(&miss); | 4241 __ bind(&miss); |
4205 GenerateMiss(masm); | 4242 GenerateMiss(masm); |
4206 } | 4243 } |
4207 | 4244 |
4208 | 4245 |
4209 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 4246 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
4210 Label miss; | 4247 Label miss; |
4211 __ And(a2, a1, a0); | 4248 __ And(a2, a1, a0); |
4212 __ JumpIfSmi(a2, &miss); | 4249 __ JumpIfSmi(a2, &miss); |
4213 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); | 4250 __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); |
4214 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); | 4251 __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); |
4215 __ Branch(&miss, ne, a2, Operand(known_map_)); | 4252 __ Branch(&miss, ne, a2, Operand(known_map_)); |
4216 __ Branch(&miss, ne, a3, Operand(known_map_)); | 4253 __ Branch(&miss, ne, a3, Operand(known_map_)); |
4217 | 4254 |
4218 __ Ret(USE_DELAY_SLOT); | 4255 __ Ret(USE_DELAY_SLOT); |
4219 __ subu(v0, a0, a1); | 4256 __ dsubu(v0, a0, a1); |
4220 | 4257 |
4221 __ bind(&miss); | 4258 __ bind(&miss); |
4222 GenerateMiss(masm); | 4259 GenerateMiss(masm); |
4223 } | 4260 } |
4224 | 4261 |
4225 | 4262 |
4226 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 4263 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
4227 { | 4264 { |
4228 // Call the runtime system in a fresh internal frame. | 4265 // Call the runtime system in a fresh internal frame. |
4229 ExternalReference miss = | 4266 ExternalReference miss = |
4230 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); | 4267 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); |
4231 FrameScope scope(masm, StackFrame::INTERNAL); | 4268 FrameScope scope(masm, StackFrame::INTERNAL); |
4232 __ Push(a1, a0); | 4269 __ Push(a1, a0); |
4233 __ Push(ra, a1, a0); | 4270 __ Push(ra, a1, a0); |
4234 __ li(t0, Operand(Smi::FromInt(op_))); | 4271 __ li(a4, Operand(Smi::FromInt(op_))); |
4235 __ addiu(sp, sp, -kPointerSize); | 4272 __ daddiu(sp, sp, -kPointerSize); |
4236 __ CallExternalReference(miss, 3, USE_DELAY_SLOT); | 4273 __ CallExternalReference(miss, 3, USE_DELAY_SLOT); |
4237 __ sw(t0, MemOperand(sp)); // In the delay slot. | 4274 __ sd(a4, MemOperand(sp)); // In the delay slot. |
4238 // Compute the entry point of the rewritten stub. | 4275 // Compute the entry point of the rewritten stub. |
4239 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4276 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
4240 // Restore registers. | 4277 // Restore registers. |
4241 __ Pop(a1, a0, ra); | 4278 __ Pop(a1, a0, ra); |
4242 } | 4279 } |
4243 __ Jump(a2); | 4280 __ Jump(a2); |
4244 } | 4281 } |
4245 | 4282 |
4246 | 4283 |
4247 void DirectCEntryStub::Generate(MacroAssembler* masm) { | 4284 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
4248 // Make place for arguments to fit C calling convention. Most of the callers | 4285 // Make place for arguments to fit C calling convention. Most of the callers |
4249 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame | 4286 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame |
4250 // so they handle stack restoring and we don't have to do that here. | 4287 // so they handle stack restoring and we don't have to do that here. |
4251 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping | 4288 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping |
4252 // kCArgsSlotsSize stack space after the call. | 4289 // kCArgsSlotsSize stack space after the call. |
4253 __ Subu(sp, sp, Operand(kCArgsSlotsSize)); | 4290 __ daddiu(sp, sp, -kCArgsSlotsSize); |
4254 // Place the return address on the stack, making the call | 4291 // Place the return address on the stack, making the call |
4255 // GC safe. The RegExp backend also relies on this. | 4292 // GC safe. The RegExp backend also relies on this. |
4256 __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); | 4293 __ sd(ra, MemOperand(sp, kCArgsSlotsSize)); |
4257 __ Call(t9); // Call the C++ function. | 4294 __ Call(t9); // Call the C++ function. |
4258 __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); | 4295 __ ld(t9, MemOperand(sp, kCArgsSlotsSize)); |
4259 | 4296 |
4260 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | 4297 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
4261 // In case of an error the return address may point to a memory area | 4298 // In case of an error the return address may point to a memory area |
4262 // filled with kZapValue by the GC. | 4299 // filled with kZapValue by the GC. |
4263 // Dereference the address and check for this. | 4300 // Dereference the address and check for this. |
4264 __ lw(t0, MemOperand(t9)); | 4301 __ Uld(a4, MemOperand(t9)); |
4265 __ Assert(ne, kReceivedInvalidReturnAddress, t0, | 4302 __ Assert(ne, kReceivedInvalidReturnAddress, a4, |
4266 Operand(reinterpret_cast<uint32_t>(kZapValue))); | 4303 Operand(reinterpret_cast<uint64_t>(kZapValue))); |
4267 } | 4304 } |
4268 __ Jump(t9); | 4305 __ Jump(t9); |
4269 } | 4306 } |
4270 | 4307 |
4271 | 4308 |
4272 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 4309 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
4273 Register target) { | 4310 Register target) { |
4274 intptr_t loc = | 4311 intptr_t loc = |
4275 reinterpret_cast<intptr_t>(GetCode().location()); | 4312 reinterpret_cast<intptr_t>(GetCode().location()); |
4276 __ Move(t9, target); | 4313 __ Move(t9, target); |
(...skipping 13 matching lines...) Expand all Loading... |
4290 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 4327 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
4291 // not equal to the name and kProbes-th slot is not used (its name is the | 4328 // not equal to the name and kProbes-th slot is not used (its name is the |
4292 // undefined value), it guarantees the hash table doesn't contain the | 4329 // undefined value), it guarantees the hash table doesn't contain the |
4293 // property. It's true even if some slots represent deleted properties | 4330 // property. It's true even if some slots represent deleted properties |
4294 // (their names are the hole value). | 4331 // (their names are the hole value). |
4295 for (int i = 0; i < kInlinedProbes; i++) { | 4332 for (int i = 0; i < kInlinedProbes; i++) { |
4296 // scratch0 points to properties hash. | 4333 // scratch0 points to properties hash. |
4297 // Compute the masked index: (hash + i + i * i) & mask. | 4334 // Compute the masked index: (hash + i + i * i) & mask. |
4298 Register index = scratch0; | 4335 Register index = scratch0; |
4299 // Capacity is smi 2^n. | 4336 // Capacity is smi 2^n. |
4300 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); | 4337 __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset)); |
4301 __ Subu(index, index, Operand(1)); | 4338 __ Dsubu(index, index, Operand(1)); |
4302 __ And(index, index, Operand( | 4339 __ And(index, index, |
4303 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); | 4340 Operand(name->Hash() + NameDictionary::GetProbeOffset(i))); |
4304 | 4341 |
4305 // Scale the index by multiplying by the entry size. | 4342 // Scale the index by multiplying by the entry size. |
4306 ASSERT(NameDictionary::kEntrySize == 3); | 4343 ASSERT(NameDictionary::kEntrySize == 3); |
4307 __ sll(at, index, 1); | 4344 __ dsll(at, index, 1); |
4308 __ Addu(index, index, at); | 4345 __ Daddu(index, index, at); // index *= 3. |
4309 | 4346 |
4310 Register entity_name = scratch0; | 4347 Register entity_name = scratch0; |
4311 // Having undefined at this place means the name is not contained. | 4348 // Having undefined at this place means the name is not contained. |
4312 ASSERT_EQ(kSmiTagSize, 1); | 4349 ASSERT_EQ(kSmiTagSize, 1); |
4313 Register tmp = properties; | 4350 Register tmp = properties; |
4314 __ sll(scratch0, index, 1); | 4351 |
4315 __ Addu(tmp, properties, scratch0); | 4352 __ dsll(scratch0, index, kPointerSizeLog2); |
4316 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 4353 __ Daddu(tmp, properties, scratch0); |
| 4354 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
4317 | 4355 |
4318 ASSERT(!tmp.is(entity_name)); | 4356 ASSERT(!tmp.is(entity_name)); |
4319 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 4357 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
4320 __ Branch(done, eq, entity_name, Operand(tmp)); | 4358 __ Branch(done, eq, entity_name, Operand(tmp)); |
4321 | 4359 |
4322 // Load the hole ready for use below: | 4360 // Load the hole ready for use below: |
4323 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 4361 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
4324 | 4362 |
4325 // Stop if found the property. | 4363 // Stop if found the property. |
4326 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); | 4364 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); |
4327 | 4365 |
4328 Label good; | 4366 Label good; |
4329 __ Branch(&good, eq, entity_name, Operand(tmp)); | 4367 __ Branch(&good, eq, entity_name, Operand(tmp)); |
4330 | 4368 |
4331 // Check if the entry name is not a unique name. | 4369 // Check if the entry name is not a unique name. |
4332 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | 4370 __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
4333 __ lbu(entity_name, | 4371 __ lbu(entity_name, |
4334 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | 4372 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
4335 __ JumpIfNotUniqueName(entity_name, miss); | 4373 __ JumpIfNotUniqueName(entity_name, miss); |
4336 __ bind(&good); | 4374 __ bind(&good); |
4337 | 4375 |
4338 // Restore the properties. | 4376 // Restore the properties. |
4339 __ lw(properties, | 4377 __ ld(properties, |
4340 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 4378 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
4341 } | 4379 } |
4342 | 4380 |
4343 const int spill_mask = | 4381 const int spill_mask = |
4344 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() | | 4382 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() | |
4345 a2.bit() | a1.bit() | a0.bit() | v0.bit()); | 4383 a2.bit() | a1.bit() | a0.bit() | v0.bit()); |
4346 | 4384 |
4347 __ MultiPush(spill_mask); | 4385 __ MultiPush(spill_mask); |
4348 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 4386 __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
4349 __ li(a1, Operand(Handle<Name>(name))); | 4387 __ li(a1, Operand(Handle<Name>(name))); |
4350 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); | 4388 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); |
4351 __ CallStub(&stub); | 4389 __ CallStub(&stub); |
4352 __ mov(at, v0); | 4390 __ mov(at, v0); |
4353 __ MultiPop(spill_mask); | 4391 __ MultiPop(spill_mask); |
4354 | 4392 |
4355 __ Branch(done, eq, at, Operand(zero_reg)); | 4393 __ Branch(done, eq, at, Operand(zero_reg)); |
4356 __ Branch(miss, ne, at, Operand(zero_reg)); | 4394 __ Branch(miss, ne, at, Operand(zero_reg)); |
4357 } | 4395 } |
4358 | 4396 |
(...skipping 10 matching lines...) Expand all Loading... |
4369 Register scratch1, | 4407 Register scratch1, |
4370 Register scratch2) { | 4408 Register scratch2) { |
4371 ASSERT(!elements.is(scratch1)); | 4409 ASSERT(!elements.is(scratch1)); |
4372 ASSERT(!elements.is(scratch2)); | 4410 ASSERT(!elements.is(scratch2)); |
4373 ASSERT(!name.is(scratch1)); | 4411 ASSERT(!name.is(scratch1)); |
4374 ASSERT(!name.is(scratch2)); | 4412 ASSERT(!name.is(scratch2)); |
4375 | 4413 |
4376 __ AssertName(name); | 4414 __ AssertName(name); |
4377 | 4415 |
4378 // Compute the capacity mask. | 4416 // Compute the capacity mask. |
4379 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 4417 __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
4380 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int | 4418 __ SmiUntag(scratch1); |
4381 __ Subu(scratch1, scratch1, Operand(1)); | 4419 __ Dsubu(scratch1, scratch1, Operand(1)); |
4382 | 4420 |
4383 // Generate an unrolled loop that performs a few probes before | 4421 // Generate an unrolled loop that performs a few probes before |
4384 // giving up. Measurements done on Gmail indicate that 2 probes | 4422 // giving up. Measurements done on Gmail indicate that 2 probes |
4385 // cover ~93% of loads from dictionaries. | 4423 // cover ~93% of loads from dictionaries. |
4386 for (int i = 0; i < kInlinedProbes; i++) { | 4424 for (int i = 0; i < kInlinedProbes; i++) { |
4387 // Compute the masked index: (hash + i + i * i) & mask. | 4425 // Compute the masked index: (hash + i + i * i) & mask. |
4388 __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 4426 __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
4389 if (i > 0) { | 4427 if (i > 0) { |
4390 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4428 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
4391 // the hash in a separate instruction. The value hash + i + i * i is right | 4429 // the hash in a separate instruction. The value hash + i + i * i is right |
4392 // shifted in the following and instruction. | 4430 // shifted in the following and instruction. |
4393 ASSERT(NameDictionary::GetProbeOffset(i) < | 4431 ASSERT(NameDictionary::GetProbeOffset(i) < |
4394 1 << (32 - Name::kHashFieldOffset)); | 4432 1 << (32 - Name::kHashFieldOffset)); |
4395 __ Addu(scratch2, scratch2, Operand( | 4433 __ Daddu(scratch2, scratch2, Operand( |
4396 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4434 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
4397 } | 4435 } |
4398 __ srl(scratch2, scratch2, Name::kHashShift); | 4436 __ dsrl(scratch2, scratch2, Name::kHashShift); |
4399 __ And(scratch2, scratch1, scratch2); | 4437 __ And(scratch2, scratch1, scratch2); |
4400 | 4438 |
4401 // Scale the index by multiplying by the element size. | 4439 // Scale the index by multiplying by the element size. |
4402 ASSERT(NameDictionary::kEntrySize == 3); | 4440 ASSERT(NameDictionary::kEntrySize == 3); |
4403 // scratch2 = scratch2 * 3. | 4441 // scratch2 = scratch2 * 3. |
4404 | 4442 |
4405 __ sll(at, scratch2, 1); | 4443 __ dsll(at, scratch2, 1); |
4406 __ Addu(scratch2, scratch2, at); | 4444 __ Daddu(scratch2, scratch2, at); |
4407 | 4445 |
4408 // Check if the key is identical to the name. | 4446 // Check if the key is identical to the name. |
4409 __ sll(at, scratch2, 2); | 4447 __ dsll(at, scratch2, kPointerSizeLog2); |
4410 __ Addu(scratch2, elements, at); | 4448 __ Daddu(scratch2, elements, at); |
4411 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); | 4449 __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset)); |
4412 __ Branch(done, eq, name, Operand(at)); | 4450 __ Branch(done, eq, name, Operand(at)); |
4413 } | 4451 } |
4414 | 4452 |
4415 const int spill_mask = | 4453 const int spill_mask = |
4416 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | | 4454 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | |
4417 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & | 4455 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & |
4418 ~(scratch1.bit() | scratch2.bit()); | 4456 ~(scratch1.bit() | scratch2.bit()); |
4419 | 4457 |
4420 __ MultiPush(spill_mask); | 4458 __ MultiPush(spill_mask); |
4421 if (name.is(a0)) { | 4459 if (name.is(a0)) { |
4422 ASSERT(!elements.is(a1)); | 4460 ASSERT(!elements.is(a1)); |
4423 __ Move(a1, name); | 4461 __ Move(a1, name); |
4424 __ Move(a0, elements); | 4462 __ Move(a0, elements); |
4425 } else { | 4463 } else { |
4426 __ Move(a0, elements); | 4464 __ Move(a0, elements); |
(...skipping 20 matching lines...) Expand all Loading... |
4447 // index: will hold an index of entry if lookup is successful. | 4485 // index: will hold an index of entry if lookup is successful. |
4448 // might alias with result_. | 4486 // might alias with result_. |
4449 // Returns: | 4487 // Returns: |
4450 // result_ is zero if lookup failed, non zero otherwise. | 4488 // result_ is zero if lookup failed, non zero otherwise. |
4451 | 4489 |
4452 Register result = v0; | 4490 Register result = v0; |
4453 Register dictionary = a0; | 4491 Register dictionary = a0; |
4454 Register key = a1; | 4492 Register key = a1; |
4455 Register index = a2; | 4493 Register index = a2; |
4456 Register mask = a3; | 4494 Register mask = a3; |
4457 Register hash = t0; | 4495 Register hash = a4; |
4458 Register undefined = t1; | 4496 Register undefined = a5; |
4459 Register entry_key = t2; | 4497 Register entry_key = a6; |
4460 | 4498 |
4461 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 4499 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
4462 | 4500 |
4463 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 4501 __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
4464 __ sra(mask, mask, kSmiTagSize); | 4502 __ SmiUntag(mask); |
4465 __ Subu(mask, mask, Operand(1)); | 4503 __ Dsubu(mask, mask, Operand(1)); |
4466 | 4504 |
4467 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 4505 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
4468 | 4506 |
4469 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 4507 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
4470 | 4508 |
4471 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 4509 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
4472 // Compute the masked index: (hash + i + i * i) & mask. | 4510 // Compute the masked index: (hash + i + i * i) & mask. |
4473 // Capacity is smi 2^n. | 4511 // Capacity is smi 2^n. |
4474 if (i > 0) { | 4512 if (i > 0) { |
4475 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4513 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
4476 // the hash in a separate instruction. The value hash + i + i * i is right | 4514 // the hash in a separate instruction. The value hash + i + i * i is right |
4477 // shifted in the following and instruction. | 4515 // shifted in the following and instruction. |
4478 ASSERT(NameDictionary::GetProbeOffset(i) < | 4516 ASSERT(NameDictionary::GetProbeOffset(i) < |
4479 1 << (32 - Name::kHashFieldOffset)); | 4517 1 << (32 - Name::kHashFieldOffset)); |
4480 __ Addu(index, hash, Operand( | 4518 __ Daddu(index, hash, Operand( |
4481 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4519 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
4482 } else { | 4520 } else { |
4483 __ mov(index, hash); | 4521 __ mov(index, hash); |
4484 } | 4522 } |
4485 __ srl(index, index, Name::kHashShift); | 4523 __ dsrl(index, index, Name::kHashShift); |
4486 __ And(index, mask, index); | 4524 __ And(index, mask, index); |
4487 | 4525 |
4488 // Scale the index by multiplying by the entry size. | 4526 // Scale the index by multiplying by the entry size. |
4489 ASSERT(NameDictionary::kEntrySize == 3); | 4527 ASSERT(NameDictionary::kEntrySize == 3); |
4490 // index *= 3. | 4528 // index *= 3. |
4491 __ mov(at, index); | 4529 __ mov(at, index); |
4492 __ sll(index, index, 1); | 4530 __ dsll(index, index, 1); |
4493 __ Addu(index, index, at); | 4531 __ Daddu(index, index, at); |
4494 | 4532 |
4495 | 4533 |
4496 ASSERT_EQ(kSmiTagSize, 1); | 4534 ASSERT_EQ(kSmiTagSize, 1); |
4497 __ sll(index, index, 2); | 4535 __ dsll(index, index, kPointerSizeLog2); |
4498 __ Addu(index, index, dictionary); | 4536 __ Daddu(index, index, dictionary); |
4499 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4537 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
4500 | 4538 |
4501 // Having undefined at this place means the name is not contained. | 4539 // Having undefined at this place means the name is not contained. |
4502 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); | 4540 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
4503 | 4541 |
4504 // Stop if found the property. | 4542 // Stop if found the property. |
4505 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); | 4543 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
4506 | 4544 |
4507 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | 4545 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
4508 // Check if the entry name is not a unique name. | 4546 // Check if the entry name is not a unique name. |
4509 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 4547 __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
4510 __ lbu(entry_key, | 4548 __ lbu(entry_key, |
4511 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | 4549 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
4512 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); | 4550 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); |
4513 } | 4551 } |
4514 } | 4552 } |
4515 | 4553 |
4516 __ bind(&maybe_in_dictionary); | 4554 __ bind(&maybe_in_dictionary); |
4517 // If we are doing negative lookup then probing failure should be | 4555 // If we are doing negative lookup then probing failure should be |
4518 // treated as a lookup success. For positive lookup probing failure | 4556 // treated as a lookup success. For positive lookup probing failure |
4519 // should be treated as lookup failure. | 4557 // should be treated as lookup failure. |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4583 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); | 4621 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); |
4584 } | 4622 } |
4585 | 4623 |
4586 | 4624 |
4587 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 4625 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
4588 regs_.Save(masm); | 4626 regs_.Save(masm); |
4589 | 4627 |
4590 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 4628 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
4591 Label dont_need_remembered_set; | 4629 Label dont_need_remembered_set; |
4592 | 4630 |
4593 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 4631 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
4594 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. | 4632 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. |
4595 regs_.scratch0(), | 4633 regs_.scratch0(), |
4596 &dont_need_remembered_set); | 4634 &dont_need_remembered_set); |
4597 | 4635 |
4598 __ CheckPageFlag(regs_.object(), | 4636 __ CheckPageFlag(regs_.object(), |
4599 regs_.scratch0(), | 4637 regs_.scratch0(), |
4600 1 << MemoryChunk::SCAN_ON_SCAVENGE, | 4638 1 << MemoryChunk::SCAN_ON_SCAVENGE, |
4601 ne, | 4639 ne, |
4602 &dont_need_remembered_set); | 4640 &dont_need_remembered_set); |
4603 | 4641 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4647 | 4685 |
4648 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 4686 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
4649 MacroAssembler* masm, | 4687 MacroAssembler* masm, |
4650 OnNoNeedToInformIncrementalMarker on_no_need, | 4688 OnNoNeedToInformIncrementalMarker on_no_need, |
4651 Mode mode) { | 4689 Mode mode) { |
4652 Label on_black; | 4690 Label on_black; |
4653 Label need_incremental; | 4691 Label need_incremental; |
4654 Label need_incremental_pop_scratch; | 4692 Label need_incremental_pop_scratch; |
4655 | 4693 |
4656 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); | 4694 __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); |
4657 __ lw(regs_.scratch1(), | 4695 __ ld(regs_.scratch1(), |
4658 MemOperand(regs_.scratch0(), | 4696 MemOperand(regs_.scratch0(), |
4659 MemoryChunk::kWriteBarrierCounterOffset)); | 4697 MemoryChunk::kWriteBarrierCounterOffset)); |
4660 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1)); | 4698 __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1)); |
4661 __ sw(regs_.scratch1(), | 4699 __ sd(regs_.scratch1(), |
4662 MemOperand(regs_.scratch0(), | 4700 MemOperand(regs_.scratch0(), |
4663 MemoryChunk::kWriteBarrierCounterOffset)); | 4701 MemoryChunk::kWriteBarrierCounterOffset)); |
4664 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg)); | 4702 __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg)); |
4665 | 4703 |
4666 // Let's look at the color of the object: If it is not black we don't have | 4704 // Let's look at the color of the object: If it is not black we don't have |
4667 // to inform the incremental marker. | 4705 // to inform the incremental marker. |
4668 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | 4706 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
4669 | 4707 |
4670 regs_.Restore(masm); | 4708 regs_.Restore(masm); |
4671 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 4709 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
4672 __ RememberedSetHelper(object_, | 4710 __ RememberedSetHelper(object_, |
4673 address_, | 4711 address_, |
4674 value_, | 4712 value_, |
4675 save_fp_regs_mode_, | 4713 save_fp_regs_mode_, |
4676 MacroAssembler::kReturnAtEnd); | 4714 MacroAssembler::kReturnAtEnd); |
4677 } else { | 4715 } else { |
4678 __ Ret(); | 4716 __ Ret(); |
4679 } | 4717 } |
4680 | 4718 |
4681 __ bind(&on_black); | 4719 __ bind(&on_black); |
4682 | 4720 |
4683 // Get the value from the slot. | 4721 // Get the value from the slot. |
4684 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 4722 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
4685 | 4723 |
4686 if (mode == INCREMENTAL_COMPACTION) { | 4724 if (mode == INCREMENTAL_COMPACTION) { |
4687 Label ensure_not_white; | 4725 Label ensure_not_white; |
4688 | 4726 |
4689 __ CheckPageFlag(regs_.scratch0(), // Contains value. | 4727 __ CheckPageFlag(regs_.scratch0(), // Contains value. |
4690 regs_.scratch1(), // Scratch. | 4728 regs_.scratch1(), // Scratch. |
4691 MemoryChunk::kEvacuationCandidateMask, | 4729 MemoryChunk::kEvacuationCandidateMask, |
4692 eq, | 4730 eq, |
4693 &ensure_not_white); | 4731 &ensure_not_white); |
4694 | 4732 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4730 // Fall through when we need to inform the incremental marker. | 4768 // Fall through when we need to inform the incremental marker. |
4731 } | 4769 } |
4732 | 4770 |
4733 | 4771 |
4734 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | 4772 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
4735 // ----------- S t a t e ------------- | 4773 // ----------- S t a t e ------------- |
4736 // -- a0 : element value to store | 4774 // -- a0 : element value to store |
4737 // -- a3 : element index as smi | 4775 // -- a3 : element index as smi |
4738 // -- sp[0] : array literal index in function as smi | 4776 // -- sp[0] : array literal index in function as smi |
4739 // -- sp[4] : array literal | 4777 // -- sp[4] : array literal |
4740 // clobbers a1, a2, t0 | 4778 // clobbers a1, a2, a4 |
4741 // ----------------------------------- | 4779 // ----------------------------------- |
4742 | 4780 |
4743 Label element_done; | 4781 Label element_done; |
4744 Label double_elements; | 4782 Label double_elements; |
4745 Label smi_element; | 4783 Label smi_element; |
4746 Label slow_elements; | 4784 Label slow_elements; |
4747 Label fast_elements; | 4785 Label fast_elements; |
4748 | 4786 |
4749 // Get array literal index, array literal and its map. | 4787 // Get array literal index, array literal and its map. |
4750 __ lw(t0, MemOperand(sp, 0 * kPointerSize)); | 4788 __ ld(a4, MemOperand(sp, 0 * kPointerSize)); |
4751 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); | 4789 __ ld(a1, MemOperand(sp, 1 * kPointerSize)); |
4752 __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset)); | 4790 __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset)); |
4753 | 4791 |
4754 __ CheckFastElements(a2, t1, &double_elements); | 4792 __ CheckFastElements(a2, a5, &double_elements); |
4755 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements | 4793 // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements |
4756 __ JumpIfSmi(a0, &smi_element); | 4794 __ JumpIfSmi(a0, &smi_element); |
4757 __ CheckFastSmiElements(a2, t1, &fast_elements); | 4795 __ CheckFastSmiElements(a2, a5, &fast_elements); |
4758 | 4796 |
4759 // Store into the array literal requires a elements transition. Call into | 4797 // Store into the array literal requires a elements transition. Call into |
4760 // the runtime. | 4798 // the runtime. |
4761 __ bind(&slow_elements); | 4799 __ bind(&slow_elements); |
4762 // call. | 4800 // call. |
4763 __ Push(a1, a3, a0); | 4801 __ Push(a1, a3, a0); |
4764 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 4802 __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
4765 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset)); | 4803 __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset)); |
4766 __ Push(t1, t0); | 4804 __ Push(a5, a4); |
4767 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | 4805 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
4768 | 4806 |
4769 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 4807 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
4770 __ bind(&fast_elements); | 4808 __ bind(&fast_elements); |
4771 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); | 4809 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); |
4772 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); | 4810 __ SmiScale(a6, a3, kPointerSizeLog2); |
4773 __ Addu(t2, t1, t2); | 4811 __ Daddu(a6, a5, a6); |
4774 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4812 __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
4775 __ sw(a0, MemOperand(t2, 0)); | 4813 __ sd(a0, MemOperand(a6, 0)); |
4776 // Update the write barrier for the array store. | 4814 // Update the write barrier for the array store. |
4777 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs, | 4815 __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs, |
4778 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 4816 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
4779 __ Ret(USE_DELAY_SLOT); | 4817 __ Ret(USE_DELAY_SLOT); |
4780 __ mov(v0, a0); | 4818 __ mov(v0, a0); |
4781 | 4819 |
4782 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | 4820 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
4783 // and value is Smi. | 4821 // and value is Smi. |
4784 __ bind(&smi_element); | 4822 __ bind(&smi_element); |
4785 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); | 4823 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); |
4786 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); | 4824 __ SmiScale(a6, a3, kPointerSizeLog2); |
4787 __ Addu(t2, t1, t2); | 4825 __ Daddu(a6, a5, a6); |
4788 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize)); | 4826 __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize)); |
4789 __ Ret(USE_DELAY_SLOT); | 4827 __ Ret(USE_DELAY_SLOT); |
4790 __ mov(v0, a0); | 4828 __ mov(v0, a0); |
4791 | 4829 |
4792 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. | 4830 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. |
4793 __ bind(&double_elements); | 4831 __ bind(&double_elements); |
4794 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); | 4832 __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset)); |
4795 __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements); | 4833 __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements); |
4796 __ Ret(USE_DELAY_SLOT); | 4834 __ Ret(USE_DELAY_SLOT); |
4797 __ mov(v0, a0); | 4835 __ mov(v0, a0); |
4798 } | 4836 } |
4799 | 4837 |
4800 | 4838 |
4801 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 4839 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
4802 CEntryStub ces(isolate(), 1, kSaveFPRegs); | 4840 CEntryStub ces(isolate(), 1, kSaveFPRegs); |
4803 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); | 4841 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); |
4804 int parameter_count_offset = | 4842 int parameter_count_offset = |
4805 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 4843 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
4806 __ lw(a1, MemOperand(fp, parameter_count_offset)); | 4844 __ ld(a1, MemOperand(fp, parameter_count_offset)); |
4807 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | 4845 if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
4808 __ Addu(a1, a1, Operand(1)); | 4846 __ Daddu(a1, a1, Operand(1)); |
4809 } | 4847 } |
4810 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 4848 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
4811 __ sll(a1, a1, kPointerSizeLog2); | 4849 __ dsll(a1, a1, kPointerSizeLog2); |
4812 __ Ret(USE_DELAY_SLOT); | 4850 __ Ret(USE_DELAY_SLOT); |
4813 __ Addu(sp, sp, a1); | 4851 __ Daddu(sp, sp, a1); |
4814 } | 4852 } |
4815 | 4853 |
4816 | 4854 |
4817 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 4855 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
4818 if (masm->isolate()->function_entry_hook() != NULL) { | 4856 if (masm->isolate()->function_entry_hook() != NULL) { |
4819 ProfileEntryHookStub stub(masm->isolate()); | 4857 ProfileEntryHookStub stub(masm->isolate()); |
4820 __ push(ra); | 4858 __ push(ra); |
4821 __ CallStub(&stub); | 4859 __ CallStub(&stub); |
4822 __ pop(ra); | 4860 __ pop(ra); |
4823 } | 4861 } |
(...skipping 11 matching lines...) Expand all Loading... |
4835 kJSCallerSaved | // Caller saved registers. | 4873 kJSCallerSaved | // Caller saved registers. |
4836 s5.bit(); // Saved stack pointer. | 4874 s5.bit(); // Saved stack pointer. |
4837 | 4875 |
4838 // We also save ra, so the count here is one higher than the mask indicates. | 4876 // We also save ra, so the count here is one higher than the mask indicates. |
4839 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; | 4877 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; |
4840 | 4878 |
4841 // Save all caller-save registers as this may be called from anywhere. | 4879 // Save all caller-save registers as this may be called from anywhere. |
4842 __ MultiPush(kSavedRegs | ra.bit()); | 4880 __ MultiPush(kSavedRegs | ra.bit()); |
4843 | 4881 |
4844 // Compute the function's address for the first argument. | 4882 // Compute the function's address for the first argument. |
4845 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); | 4883 __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); |
4846 | 4884 |
4847 // The caller's return address is above the saved temporaries. | 4885 // The caller's return address is above the saved temporaries. |
4848 // Grab that for the second argument to the hook. | 4886 // Grab that for the second argument to the hook. |
4849 __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); | 4887 __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize)); |
4850 | 4888 |
4851 // Align the stack if necessary. | 4889 // Align the stack if necessary. |
4852 int frame_alignment = masm->ActivationFrameAlignment(); | 4890 int frame_alignment = masm->ActivationFrameAlignment(); |
4853 if (frame_alignment > kPointerSize) { | 4891 if (frame_alignment > kPointerSize) { |
4854 __ mov(s5, sp); | 4892 __ mov(s5, sp); |
4855 ASSERT(IsPowerOf2(frame_alignment)); | 4893 ASSERT(IsPowerOf2(frame_alignment)); |
4856 __ And(sp, sp, Operand(-frame_alignment)); | 4894 __ And(sp, sp, Operand(-frame_alignment)); |
4857 } | 4895 } |
4858 __ Subu(sp, sp, kCArgsSlotsSize); | 4896 |
4859 #if defined(V8_HOST_ARCH_MIPS) | 4897 __ Dsubu(sp, sp, kCArgsSlotsSize); |
4860 int32_t entry_hook = | 4898 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) |
4861 reinterpret_cast<int32_t>(isolate()->function_entry_hook()); | 4899 int64_t entry_hook = |
| 4900 reinterpret_cast<int64_t>(isolate()->function_entry_hook()); |
4862 __ li(t9, Operand(entry_hook)); | 4901 __ li(t9, Operand(entry_hook)); |
4863 #else | 4902 #else |
4864 // Under the simulator we need to indirect the entry hook through a | 4903 // Under the simulator we need to indirect the entry hook through a |
4865 // trampoline function at a known address. | 4904 // trampoline function at a known address. |
4866 // It additionally takes an isolate as a third parameter. | 4905 // It additionally takes an isolate as a third parameter. |
4867 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); | 4906 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); |
4868 | 4907 |
4869 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | 4908 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); |
4870 __ li(t9, Operand(ExternalReference(&dispatcher, | 4909 __ li(t9, Operand(ExternalReference(&dispatcher, |
4871 ExternalReference::BUILTIN_CALL, | 4910 ExternalReference::BUILTIN_CALL, |
4872 isolate()))); | 4911 isolate()))); |
4873 #endif | 4912 #endif |
4874 // Call C function through t9 to conform ABI for PIC. | 4913 // Call C function through t9 to conform ABI for PIC. |
4875 __ Call(t9); | 4914 __ Call(t9); |
4876 | 4915 |
4877 // Restore the stack pointer if needed. | 4916 // Restore the stack pointer if needed. |
4878 if (frame_alignment > kPointerSize) { | 4917 if (frame_alignment > kPointerSize) { |
4879 __ mov(sp, s5); | 4918 __ mov(sp, s5); |
4880 } else { | 4919 } else { |
4881 __ Addu(sp, sp, kCArgsSlotsSize); | 4920 __ Daddu(sp, sp, kCArgsSlotsSize); |
4882 } | 4921 } |
4883 | 4922 |
4884 // Also pop ra to get Ret(0). | 4923 // Also pop ra to get Ret(0). |
4885 __ MultiPop(kSavedRegs | ra.bit()); | 4924 __ MultiPop(kSavedRegs | ra.bit()); |
4886 __ Ret(); | 4925 __ Ret(); |
4887 } | 4926 } |
4888 | 4927 |
4889 | 4928 |
4890 template<class T> | 4929 template<class T> |
4891 static void CreateArrayDispatch(MacroAssembler* masm, | 4930 static void CreateArrayDispatch(MacroAssembler* masm, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4923 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 4962 ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
4924 ASSERT(FAST_ELEMENTS == 2); | 4963 ASSERT(FAST_ELEMENTS == 2); |
4925 ASSERT(FAST_HOLEY_ELEMENTS == 3); | 4964 ASSERT(FAST_HOLEY_ELEMENTS == 3); |
4926 ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 4965 ASSERT(FAST_DOUBLE_ELEMENTS == 4); |
4927 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 4966 ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
4928 | 4967 |
4929 // is the low bit set? If so, we are holey and that is good. | 4968 // is the low bit set? If so, we are holey and that is good. |
4930 __ And(at, a3, Operand(1)); | 4969 __ And(at, a3, Operand(1)); |
4931 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); | 4970 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); |
4932 } | 4971 } |
4933 | |
4934 // look at the first argument | 4972 // look at the first argument |
4935 __ lw(t1, MemOperand(sp, 0)); | 4973 __ ld(a5, MemOperand(sp, 0)); |
4936 __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); | 4974 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); |
4937 | 4975 |
4938 if (mode == DISABLE_ALLOCATION_SITES) { | 4976 if (mode == DISABLE_ALLOCATION_SITES) { |
4939 ElementsKind initial = GetInitialFastElementsKind(); | 4977 ElementsKind initial = GetInitialFastElementsKind(); |
4940 ElementsKind holey_initial = GetHoleyElementsKind(initial); | 4978 ElementsKind holey_initial = GetHoleyElementsKind(initial); |
4941 | 4979 |
4942 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), | 4980 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), |
4943 holey_initial, | 4981 holey_initial, |
4944 DISABLE_ALLOCATION_SITES); | 4982 DISABLE_ALLOCATION_SITES); |
4945 __ TailCallStub(&stub_holey); | 4983 __ TailCallStub(&stub_holey); |
4946 | 4984 |
4947 __ bind(&normal_sequence); | 4985 __ bind(&normal_sequence); |
4948 ArraySingleArgumentConstructorStub stub(masm->isolate(), | 4986 ArraySingleArgumentConstructorStub stub(masm->isolate(), |
4949 initial, | 4987 initial, |
4950 DISABLE_ALLOCATION_SITES); | 4988 DISABLE_ALLOCATION_SITES); |
4951 __ TailCallStub(&stub); | 4989 __ TailCallStub(&stub); |
4952 } else if (mode == DONT_OVERRIDE) { | 4990 } else if (mode == DONT_OVERRIDE) { |
4953 // We are going to create a holey array, but our kind is non-holey. | 4991 // We are going to create a holey array, but our kind is non-holey. |
4954 // Fix kind and retry (only if we have an allocation site in the slot). | 4992 // Fix kind and retry (only if we have an allocation site in the slot). |
4955 __ Addu(a3, a3, Operand(1)); | 4993 __ Daddu(a3, a3, Operand(1)); |
4956 | 4994 |
4957 if (FLAG_debug_code) { | 4995 if (FLAG_debug_code) { |
4958 __ lw(t1, FieldMemOperand(a2, 0)); | 4996 __ ld(a5, FieldMemOperand(a2, 0)); |
4959 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 4997 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
4960 __ Assert(eq, kExpectedAllocationSite, t1, Operand(at)); | 4998 __ Assert(eq, kExpectedAllocationSite, a5, Operand(at)); |
4961 } | 4999 } |
4962 | 5000 |
4963 // Save the resulting elements kind in type info. We can't just store a3 | 5001 // Save the resulting elements kind in type info. We can't just store a3 |
4964 // in the AllocationSite::transition_info field because elements kind is | 5002 // in the AllocationSite::transition_info field because elements kind is |
4965 // restricted to a portion of the field...upper bits need to be left alone. | 5003 // restricted to a portion of the field...upper bits need to be left alone. |
4966 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 5004 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); |
4967 __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 5005 __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
4968 __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); | 5006 __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); |
4969 __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 5007 __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
4970 | 5008 |
4971 | 5009 |
4972 __ bind(&normal_sequence); | 5010 __ bind(&normal_sequence); |
4973 int last_index = GetSequenceIndexFromFastElementsKind( | 5011 int last_index = GetSequenceIndexFromFastElementsKind( |
4974 TERMINAL_FAST_ELEMENTS_KIND); | 5012 TERMINAL_FAST_ELEMENTS_KIND); |
4975 for (int i = 0; i <= last_index; ++i) { | 5013 for (int i = 0; i <= last_index; ++i) { |
4976 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 5014 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
4977 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); | 5015 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); |
4978 __ TailCallStub(&stub, eq, a3, Operand(kind)); | 5016 __ TailCallStub(&stub, eq, a3, Operand(kind)); |
4979 } | 5017 } |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5061 // -- a2 : AllocationSite or undefined | 5099 // -- a2 : AllocationSite or undefined |
5062 // -- sp[0] : return address | 5100 // -- sp[0] : return address |
5063 // -- sp[4] : last argument | 5101 // -- sp[4] : last argument |
5064 // ----------------------------------- | 5102 // ----------------------------------- |
5065 | 5103 |
5066 if (FLAG_debug_code) { | 5104 if (FLAG_debug_code) { |
5067 // The array construct code is only set for the global and natives | 5105 // The array construct code is only set for the global and natives |
5068 // builtin Array functions which always have maps. | 5106 // builtin Array functions which always have maps. |
5069 | 5107 |
5070 // Initial map for the builtin Array function should be a map. | 5108 // Initial map for the builtin Array function should be a map. |
5071 __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 5109 __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
5072 // Will both indicate a NULL and a Smi. | 5110 // Will both indicate a NULL and a Smi. |
5073 __ SmiTst(t0, at); | 5111 __ SmiTst(a4, at); |
5074 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, | 5112 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, |
5075 at, Operand(zero_reg)); | 5113 at, Operand(zero_reg)); |
5076 __ GetObjectType(t0, t0, t1); | 5114 __ GetObjectType(a4, a4, a5); |
5077 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, | 5115 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, |
5078 t1, Operand(MAP_TYPE)); | 5116 a5, Operand(MAP_TYPE)); |
5079 | 5117 |
5080 // We should either have undefined in a2 or a valid AllocationSite | 5118 // We should either have undefined in a2 or a valid AllocationSite |
5081 __ AssertUndefinedOrAllocationSite(a2, t0); | 5119 __ AssertUndefinedOrAllocationSite(a2, a4); |
5082 } | 5120 } |
5083 | 5121 |
5084 Label no_info; | 5122 Label no_info; |
5085 // Get the elements kind and case on that. | 5123 // Get the elements kind and case on that. |
5086 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 5124 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
5087 __ Branch(&no_info, eq, a2, Operand(at)); | 5125 __ Branch(&no_info, eq, a2, Operand(at)); |
5088 | 5126 |
5089 __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 5127 __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
5090 __ SmiUntag(a3); | 5128 __ SmiUntag(a3); |
5091 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 5129 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); |
5092 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask)); | 5130 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask)); |
5093 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 5131 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); |
5094 | 5132 |
5095 __ bind(&no_info); | 5133 __ bind(&no_info); |
5096 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | 5134 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); |
5097 } | 5135 } |
5098 | 5136 |
5099 | 5137 |
5100 void InternalArrayConstructorStub::GenerateCase( | 5138 void InternalArrayConstructorStub::GenerateCase( |
5101 MacroAssembler* masm, ElementsKind kind) { | 5139 MacroAssembler* masm, ElementsKind kind) { |
5102 | 5140 |
5103 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); | 5141 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); |
5104 __ TailCallStub(&stub0, lo, a0, Operand(1)); | 5142 __ TailCallStub(&stub0, lo, a0, Operand(1)); |
5105 | 5143 |
5106 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); | 5144 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); |
5107 __ TailCallStub(&stubN, hi, a0, Operand(1)); | 5145 __ TailCallStub(&stubN, hi, a0, Operand(1)); |
5108 | 5146 |
5109 if (IsFastPackedElementsKind(kind)) { | 5147 if (IsFastPackedElementsKind(kind)) { |
5110 // We might need to create a holey array | 5148 // We might need to create a holey array |
5111 // look at the first argument. | 5149 // look at the first argument. |
5112 __ lw(at, MemOperand(sp, 0)); | 5150 __ ld(at, MemOperand(sp, 0)); |
5113 | 5151 |
5114 InternalArraySingleArgumentConstructorStub | 5152 InternalArraySingleArgumentConstructorStub |
5115 stub1_holey(isolate(), GetHoleyElementsKind(kind)); | 5153 stub1_holey(isolate(), GetHoleyElementsKind(kind)); |
5116 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); | 5154 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); |
5117 } | 5155 } |
5118 | 5156 |
5119 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); | 5157 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); |
5120 __ TailCallStub(&stub1); | 5158 __ TailCallStub(&stub1); |
5121 } | 5159 } |
5122 | 5160 |
5123 | 5161 |
5124 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 5162 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { |
5125 // ----------- S t a t e ------------- | 5163 // ----------- S t a t e ------------- |
5126 // -- a0 : argc | 5164 // -- a0 : argc |
5127 // -- a1 : constructor | 5165 // -- a1 : constructor |
5128 // -- sp[0] : return address | 5166 // -- sp[0] : return address |
5129 // -- sp[4] : last argument | 5167 // -- sp[4] : last argument |
5130 // ----------------------------------- | 5168 // ----------------------------------- |
5131 | 5169 |
5132 if (FLAG_debug_code) { | 5170 if (FLAG_debug_code) { |
5133 // The array construct code is only set for the global and natives | 5171 // The array construct code is only set for the global and natives |
5134 // builtin Array functions which always have maps. | 5172 // builtin Array functions which always have maps. |
5135 | 5173 |
5136 // Initial map for the builtin Array function should be a map. | 5174 // Initial map for the builtin Array function should be a map. |
5137 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 5175 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
5138 // Will both indicate a NULL and a Smi. | 5176 // Will both indicate a NULL and a Smi. |
5139 __ SmiTst(a3, at); | 5177 __ SmiTst(a3, at); |
5140 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, | 5178 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, |
5141 at, Operand(zero_reg)); | 5179 at, Operand(zero_reg)); |
5142 __ GetObjectType(a3, a3, t0); | 5180 __ GetObjectType(a3, a3, a4); |
5143 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, | 5181 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, |
5144 t0, Operand(MAP_TYPE)); | 5182 a4, Operand(MAP_TYPE)); |
5145 } | 5183 } |
5146 | 5184 |
5147 // Figure out the right elements kind. | 5185 // Figure out the right elements kind. |
5148 __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 5186 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
5149 | 5187 |
5150 // Load the map's "bit field 2" into a3. We only need the first byte, | 5188 // Load the map's "bit field 2" into a3. We only need the first byte, |
5151 // but the following bit field extraction takes care of that anyway. | 5189 // but the following bit field extraction takes care of that anyway. |
5152 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); | 5190 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); |
5153 // Retrieve elements_kind from bit field 2. | 5191 // Retrieve elements_kind from bit field 2. |
5154 __ DecodeField<Map::ElementsKindBits>(a3); | 5192 __ DecodeField<Map::ElementsKindBits>(a3); |
5155 | 5193 |
5156 if (FLAG_debug_code) { | 5194 if (FLAG_debug_code) { |
5157 Label done; | 5195 Label done; |
5158 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS)); | 5196 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS)); |
5159 __ Assert( | 5197 __ Assert( |
5160 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, | 5198 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, |
5161 a3, Operand(FAST_HOLEY_ELEMENTS)); | 5199 a3, Operand(FAST_HOLEY_ELEMENTS)); |
5162 __ bind(&done); | 5200 __ bind(&done); |
5163 } | 5201 } |
5164 | 5202 |
5165 Label fast_elements_case; | 5203 Label fast_elements_case; |
5166 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS)); | 5204 __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS)); |
5167 GenerateCase(masm, FAST_HOLEY_ELEMENTS); | 5205 GenerateCase(masm, FAST_HOLEY_ELEMENTS); |
5168 | 5206 |
5169 __ bind(&fast_elements_case); | 5207 __ bind(&fast_elements_case); |
5170 GenerateCase(masm, FAST_ELEMENTS); | 5208 GenerateCase(masm, FAST_ELEMENTS); |
5171 } | 5209 } |
5172 | 5210 |
5173 | 5211 |
5174 void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 5212 void CallApiFunctionStub::Generate(MacroAssembler* masm) { |
5175 // ----------- S t a t e ------------- | 5213 // ----------- S t a t e ------------- |
5176 // -- a0 : callee | 5214 // -- a0 : callee |
5177 // -- t0 : call_data | 5215 // -- a4 : call_data |
5178 // -- a2 : holder | 5216 // -- a2 : holder |
5179 // -- a1 : api_function_address | 5217 // -- a1 : api_function_address |
5180 // -- cp : context | 5218 // -- cp : context |
5181 // -- | 5219 // -- |
5182 // -- sp[0] : last argument | 5220 // -- sp[0] : last argument |
5183 // -- ... | 5221 // -- ... |
5184 // -- sp[(argc - 1)* 4] : first argument | 5222 // -- sp[(argc - 1)* 4] : first argument |
5185 // -- sp[argc * 4] : receiver | 5223 // -- sp[argc * 4] : receiver |
5186 // ----------------------------------- | 5224 // ----------------------------------- |
5187 | 5225 |
5188 Register callee = a0; | 5226 Register callee = a0; |
5189 Register call_data = t0; | 5227 Register call_data = a4; |
5190 Register holder = a2; | 5228 Register holder = a2; |
5191 Register api_function_address = a1; | 5229 Register api_function_address = a1; |
5192 Register context = cp; | 5230 Register context = cp; |
5193 | 5231 |
5194 int argc = ArgumentBits::decode(bit_field_); | 5232 int argc = ArgumentBits::decode(bit_field_); |
5195 bool is_store = IsStoreBits::decode(bit_field_); | 5233 bool is_store = IsStoreBits::decode(bit_field_); |
5196 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); | 5234 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); |
5197 | 5235 |
5198 typedef FunctionCallbackArguments FCA; | 5236 typedef FunctionCallbackArguments FCA; |
5199 | 5237 |
5200 STATIC_ASSERT(FCA::kContextSaveIndex == 6); | 5238 STATIC_ASSERT(FCA::kContextSaveIndex == 6); |
5201 STATIC_ASSERT(FCA::kCalleeIndex == 5); | 5239 STATIC_ASSERT(FCA::kCalleeIndex == 5); |
5202 STATIC_ASSERT(FCA::kDataIndex == 4); | 5240 STATIC_ASSERT(FCA::kDataIndex == 4); |
5203 STATIC_ASSERT(FCA::kReturnValueOffset == 3); | 5241 STATIC_ASSERT(FCA::kReturnValueOffset == 3); |
5204 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); | 5242 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); |
5205 STATIC_ASSERT(FCA::kIsolateIndex == 1); | 5243 STATIC_ASSERT(FCA::kIsolateIndex == 1); |
5206 STATIC_ASSERT(FCA::kHolderIndex == 0); | 5244 STATIC_ASSERT(FCA::kHolderIndex == 0); |
5207 STATIC_ASSERT(FCA::kArgsLength == 7); | 5245 STATIC_ASSERT(FCA::kArgsLength == 7); |
5208 | 5246 |
5209 // Save context, callee and call data. | 5247 // Save context, callee and call data. |
5210 __ Push(context, callee, call_data); | 5248 __ Push(context, callee, call_data); |
5211 // Load context from callee. | 5249 // Load context from callee. |
5212 __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 5250 __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset)); |
5213 | 5251 |
5214 Register scratch = call_data; | 5252 Register scratch = call_data; |
5215 if (!call_data_undefined) { | 5253 if (!call_data_undefined) { |
5216 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 5254 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
5217 } | 5255 } |
5218 // Push return value and default return value. | 5256 // Push return value and default return value. |
5219 __ Push(scratch, scratch); | 5257 __ Push(scratch, scratch); |
5220 __ li(scratch, | 5258 __ li(scratch, |
5221 Operand(ExternalReference::isolate_address(isolate()))); | 5259 Operand(ExternalReference::isolate_address(isolate()))); |
5222 // Push isolate and holder. | 5260 // Push isolate and holder. |
5223 __ Push(scratch, holder); | 5261 __ Push(scratch, holder); |
5224 | 5262 |
5225 // Prepare arguments. | 5263 // Prepare arguments. |
5226 __ mov(scratch, sp); | 5264 __ mov(scratch, sp); |
5227 | 5265 |
5228 // Allocate the v8::Arguments structure in the arguments' space since | 5266 // Allocate the v8::Arguments structure in the arguments' space since |
5229 // it's not controlled by GC. | 5267 // it's not controlled by GC. |
5230 const int kApiStackSpace = 4; | 5268 const int kApiStackSpace = 4; |
5231 | 5269 |
5232 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5270 FrameScope frame_scope(masm, StackFrame::MANUAL); |
5233 __ EnterExitFrame(false, kApiStackSpace); | 5271 __ EnterExitFrame(false, kApiStackSpace); |
5234 | 5272 |
5235 ASSERT(!api_function_address.is(a0) && !scratch.is(a0)); | 5273 ASSERT(!api_function_address.is(a0) && !scratch.is(a0)); |
5236 // a0 = FunctionCallbackInfo& | 5274 // a0 = FunctionCallbackInfo& |
5237 // Arguments is after the return address. | 5275 // Arguments is after the return address. |
5238 __ Addu(a0, sp, Operand(1 * kPointerSize)); | 5276 __ Daddu(a0, sp, Operand(1 * kPointerSize)); |
5239 // FunctionCallbackInfo::implicit_args_ | 5277 // FunctionCallbackInfo::implicit_args_ |
5240 __ sw(scratch, MemOperand(a0, 0 * kPointerSize)); | 5278 __ sd(scratch, MemOperand(a0, 0 * kPointerSize)); |
5241 // FunctionCallbackInfo::values_ | 5279 // FunctionCallbackInfo::values_ |
5242 __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 5280 __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); |
5243 __ sw(at, MemOperand(a0, 1 * kPointerSize)); | 5281 __ sd(at, MemOperand(a0, 1 * kPointerSize)); |
5244 // FunctionCallbackInfo::length_ = argc | 5282 // FunctionCallbackInfo::length_ = argc |
5245 __ li(at, Operand(argc)); | 5283 __ li(at, Operand(argc)); |
5246 __ sw(at, MemOperand(a0, 2 * kPointerSize)); | 5284 __ sd(at, MemOperand(a0, 2 * kPointerSize)); |
5247 // FunctionCallbackInfo::is_construct_call = 0 | 5285 // FunctionCallbackInfo::is_construct_call = 0 |
5248 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize)); | 5286 __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize)); |
5249 | 5287 |
5250 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; | 5288 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; |
5251 ExternalReference thunk_ref = | 5289 ExternalReference thunk_ref = |
5252 ExternalReference::invoke_function_callback(isolate()); | 5290 ExternalReference::invoke_function_callback(isolate()); |
5253 | 5291 |
5254 AllowExternalCallThatCantCauseGC scope(masm); | 5292 AllowExternalCallThatCantCauseGC scope(masm); |
5255 MemOperand context_restore_operand( | 5293 MemOperand context_restore_operand( |
5256 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | 5294 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); |
5257 // Stores return the first js argument. | 5295 // Stores return the first js argument. |
5258 int return_value_offset = 0; | 5296 int return_value_offset = 0; |
(...skipping 16 matching lines...) Expand all Loading... |
5275 // ----------- S t a t e ------------- | 5313 // ----------- S t a t e ------------- |
5276 // -- sp[0] : name | 5314 // -- sp[0] : name |
5277 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object | 5315 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object |
5278 // -- ... | 5316 // -- ... |
5279 // -- a2 : api_function_address | 5317 // -- a2 : api_function_address |
5280 // ----------------------------------- | 5318 // ----------------------------------- |
5281 | 5319 |
5282 Register api_function_address = a2; | 5320 Register api_function_address = a2; |
5283 | 5321 |
5284 __ mov(a0, sp); // a0 = Handle<Name> | 5322 __ mov(a0, sp); // a0 = Handle<Name> |
5285 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA | 5323 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA |
5286 | 5324 |
5287 const int kApiStackSpace = 1; | 5325 const int kApiStackSpace = 1; |
5288 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5326 FrameScope frame_scope(masm, StackFrame::MANUAL); |
5289 __ EnterExitFrame(false, kApiStackSpace); | 5327 __ EnterExitFrame(false, kApiStackSpace); |
5290 | 5328 |
5291 // Create PropertyAccessorInfo instance on the stack above the exit frame with | 5329 // Create PropertyAccessorInfo instance on the stack above the exit frame with |
5292 // a1 (internal::Object** args_) as the data. | 5330 // a1 (internal::Object** args_) as the data. |
5293 __ sw(a1, MemOperand(sp, 1 * kPointerSize)); | 5331 __ sd(a1, MemOperand(sp, 1 * kPointerSize)); |
5294 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo& | 5332 __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo& |
5295 | 5333 |
5296 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | 5334 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; |
5297 | 5335 |
5298 ExternalReference thunk_ref = | 5336 ExternalReference thunk_ref = |
5299 ExternalReference::invoke_accessor_getter_callback(isolate()); | 5337 ExternalReference::invoke_accessor_getter_callback(isolate()); |
5300 __ CallApiFunctionAndReturn(api_function_address, | 5338 __ CallApiFunctionAndReturn(api_function_address, |
5301 thunk_ref, | 5339 thunk_ref, |
5302 kStackUnwindSpace, | 5340 kStackUnwindSpace, |
5303 MemOperand(fp, 6 * kPointerSize), | 5341 MemOperand(fp, 6 * kPointerSize), |
5304 NULL); | 5342 NULL); |
5305 } | 5343 } |
5306 | 5344 |
5307 | 5345 |
5308 #undef __ | 5346 #undef __ |
5309 | 5347 |
5310 } } // namespace v8::internal | 5348 } } // namespace v8::internal |
5311 | 5349 |
5312 #endif // V8_TARGET_ARCH_MIPS | 5350 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |