Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1421253004: Use the iOS ABI when running SIMARM on Mac or targeting iOS. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after
524 524
525 // Arguments are Smi but the shift produced an overflow to Mint. 525 // Arguments are Smi but the shift produced an overflow to Mint.
526 __ CompareImmediate(R1, 0); 526 __ CompareImmediate(R1, 0);
527 __ b(&fall_through, LT); 527 __ b(&fall_through, LT);
528 __ SmiUntag(R1); 528 __ SmiUntag(R1);
529 529
530 // Pull off high bits that will be shifted off of R1 by making a mask 530 // Pull off high bits that will be shifted off of R1 by making a mask
531 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 531 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
532 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 532 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
533 // lo bits = R1 << R0 533 // lo bits = R1 << R0
534 __ LoadImmediate(R7, 1); 534 __ LoadImmediate(R711, 1);
535 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 535 __ mov(R711, Operand(R711, LSL, R0)); // R711 <- 1 << R0
536 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 536 __ sub(R711, R711, Operand(1)); // R711 <- R711 - 1
537 __ rsb(R6, R0, Operand(32)); // R6 <- 32 - R0 537 __ rsb(R6, R0, Operand(32)); // R6 <- 32 - R0
538 __ mov(R7, Operand(R7, LSL, R6)); // R7 <- R7 << R6 538 __ mov(R711, Operand(R711, LSL, R6)); // R711 <- R711 << R6
539 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 539 __ and_(R711, R1, Operand(R711)); // R711 <- R711 & R1
540 __ mov(R7, Operand(R7, LSR, R6)); // R7 <- R7 >> R6 540 __ mov(R711, Operand(R711, LSR, R6)); // R711 <- R711 >> R6
541 // Now R7 has the bits that fall off of R1 on a left shift. 541 // Now R711 has the bits that fall off of R1 on a left shift.
542 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 542 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
543 543
544 const Class& mint_class = Class::Handle( 544 const Class& mint_class = Class::Handle(
545 Isolate::Current()->object_store()->mint_class()); 545 Isolate::Current()->object_store()->mint_class());
546 __ TryAllocate(mint_class, &fall_through, R0, R2); 546 __ TryAllocate(mint_class, &fall_through, R0, R2);
547 547
548 548
549 __ str(R1, FieldAddress(R0, Mint::value_offset())); 549 __ str(R1, FieldAddress(R0, Mint::value_offset()));
550 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 550 __ str(R711, FieldAddress(R0, Mint::value_offset() + kWordSize));
551 __ Pop(R6); 551 __ Pop(R6);
552 __ Ret(); 552 __ Ret();
553 __ Bind(&fall_through); 553 __ Bind(&fall_through);
554 ASSERT(CODE_REG == R6); 554 ASSERT(CODE_REG == R6);
555 __ Pop(R6); 555 __ Pop(R6);
556 } 556 }
557 557
558 558
559 static void Get64SmiOrMint(Assembler* assembler, 559 static void Get64SmiOrMint(Assembler* assembler,
560 Register res_hi, 560 Register res_hi,
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
614 break; 614 break;
615 default: 615 default:
616 UNREACHABLE(); 616 UNREACHABLE();
617 hi_true_cond = hi_false_cond = lo_false_cond = VS; 617 hi_true_cond = hi_false_cond = lo_false_cond = VS;
618 } 618 }
619 619
620 __ Bind(&try_mint_smi); 620 __ Bind(&try_mint_smi);
621 // Get left as 64 bit integer. 621 // Get left as 64 bit integer.
622 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through); 622 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through);
623 // Get right as 64 bit integer. 623 // Get right as 64 bit integer.
624 Get64SmiOrMint(assembler, R7, R8, R0, &fall_through); 624 Get64SmiOrMint(assembler, R711, R8, R0, &fall_through);
625 // R3: left high. 625 // R3: left high.
626 // R2: left low. 626 // R2: left low.
627 // R7: right high. 627 // R711: right high.
628 // R8: right low. 628 // R8: right low.
629 629
630 __ cmp(R3, Operand(R7)); // Compare left hi, right high. 630 __ cmp(R3, Operand(R711)); // Compare left hi, right high.
631 __ b(&is_false, hi_false_cond); 631 __ b(&is_false, hi_false_cond);
632 __ b(&is_true, hi_true_cond); 632 __ b(&is_true, hi_true_cond);
633 __ cmp(R2, Operand(R8)); // Compare left lo, right lo. 633 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
634 __ b(&is_false, lo_false_cond); 634 __ b(&is_false, lo_false_cond);
635 // Else is true. 635 // Else is true.
636 __ b(&is_true); 636 __ b(&is_true);
637 637
638 __ Bind(&fall_through); 638 __ Bind(&fall_through);
639 } 639 }
640 640
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
769 769
770 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi. 770 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
771 __ ldrd(R0, R1, SP, 2 * kWordSize); 771 __ ldrd(R0, R1, SP, 2 * kWordSize);
772 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0. 772 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
773 __ ldrd(R2, R3, SP, 0 * kWordSize); 773 __ ldrd(R2, R3, SP, 0 * kWordSize);
774 __ SmiUntag(R3); 774 __ SmiUntag(R3);
775 // R4 = n ~/ _DIGIT_BITS 775 // R4 = n ~/ _DIGIT_BITS
776 __ Asr(R4, R3, Operand(5)); 776 __ Asr(R4, R3, Operand(5));
777 // R8 = &x_digits[0] 777 // R8 = &x_digits[0]
778 __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 778 __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
779 // R7 = &x_digits[x_used] 779 // R711 = &x_digits[x_used]
780 __ add(R7, R8, Operand(R0, LSL, 1)); 780 __ add(R711, R8, Operand(R0, LSL, 1));
781 // R6 = &r_digits[1] 781 // R6 = &r_digits[1]
782 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag + 782 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag +
783 Bigint::kBytesPerDigit)); 783 Bigint::kBytesPerDigit));
784 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 784 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
785 __ add(R4, R4, Operand(R0, ASR, 1)); 785 __ add(R4, R4, Operand(R0, ASR, 1));
786 __ add(R6, R6, Operand(R4, LSL, 2)); 786 __ add(R6, R6, Operand(R4, LSL, 2));
787 // R1 = n % _DIGIT_BITS 787 // R1 = n % _DIGIT_BITS
788 __ and_(R1, R3, Operand(31)); 788 __ and_(R1, R3, Operand(31));
789 // R0 = 32 - R1 789 // R0 = 32 - R1
790 __ rsb(R0, R1, Operand(32)); 790 __ rsb(R0, R1, Operand(32));
791 __ mov(R9, Operand(0)); 791 __ mov(R9, Operand(0));
792 Label loop; 792 Label loop;
793 __ Bind(&loop); 793 __ Bind(&loop);
794 __ ldr(R4, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 794 __ ldr(R4, Address(R711, -Bigint::kBytesPerDigit, Address::PreIndex));
795 __ orr(R9, R9, Operand(R4, LSR, R0)); 795 __ orr(R9, R9, Operand(R4, LSR, R0));
796 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex)); 796 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
797 __ mov(R9, Operand(R4, LSL, R1)); 797 __ mov(R9, Operand(R4, LSL, R1));
798 __ teq(R7, Operand(R8)); 798 __ teq(R711, Operand(R8));
799 __ b(&loop, NE); 799 __ b(&loop, NE);
800 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex)); 800 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
801 // Returning Object::null() is not required, since this method is private. 801 // Returning Object::null() is not required, since this method is private.
802 __ Ret(); 802 __ Ret();
803 } 803 }
804 804
805 805
806 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 806 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
807 // static void _lsh(Uint32List x_digits, int x_used, int n, 807 // static void _lsh(Uint32List x_digits, int x_used, int n,
808 // Uint32List r_digits) 808 // Uint32List r_digits)
809 809
810 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi. 810 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
811 __ ldrd(R0, R1, SP, 2 * kWordSize); 811 __ ldrd(R0, R1, SP, 2 * kWordSize);
812 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0. 812 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
813 __ ldrd(R2, R3, SP, 0 * kWordSize); 813 __ ldrd(R2, R3, SP, 0 * kWordSize);
814 __ SmiUntag(R3); 814 __ SmiUntag(R3);
815 // R4 = n ~/ _DIGIT_BITS 815 // R4 = n ~/ _DIGIT_BITS
816 __ Asr(R4, R3, Operand(5)); 816 __ Asr(R4, R3, Operand(5));
817 // R6 = &r_digits[0] 817 // R6 = &r_digits[0]
818 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag)); 818 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag));
819 // R7 = &x_digits[n ~/ _DIGIT_BITS] 819 // R711 = &x_digits[n ~/ _DIGIT_BITS]
820 __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 820 __ add(R711, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
821 __ add(R7, R7, Operand(R4, LSL, 2)); 821 __ add(R711, R711, Operand(R4, LSL, 2));
822 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 822 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
823 __ add(R4, R4, Operand(1)); 823 __ add(R4, R4, Operand(1));
824 __ rsb(R4, R4, Operand(R0, ASR, 1)); 824 __ rsb(R4, R4, Operand(R0, ASR, 1));
825 __ add(R8, R6, Operand(R4, LSL, 2)); 825 __ add(R8, R6, Operand(R4, LSL, 2));
826 // R1 = n % _DIGIT_BITS 826 // R1 = n % _DIGIT_BITS
827 __ and_(R1, R3, Operand(31)); 827 __ and_(R1, R3, Operand(31));
828 // R0 = 32 - R1 828 // R0 = 32 - R1
829 __ rsb(R0, R1, Operand(32)); 829 __ rsb(R0, R1, Operand(32));
830 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 830 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
831 __ ldr(R9, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 831 __ ldr(R9, Address(R711, Bigint::kBytesPerDigit, Address::PostIndex));
832 __ mov(R9, Operand(R9, LSR, R1)); 832 __ mov(R9, Operand(R9, LSR, R1));
833 Label loop_entry; 833 Label loop_entry;
834 __ b(&loop_entry); 834 __ b(&loop_entry);
835 Label loop; 835 Label loop;
836 __ Bind(&loop); 836 __ Bind(&loop);
837 __ ldr(R4, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 837 __ ldr(R4, Address(R711, Bigint::kBytesPerDigit, Address::PostIndex));
838 __ orr(R9, R9, Operand(R4, LSL, R0)); 838 __ orr(R9, R9, Operand(R4, LSL, R0));
839 __ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 839 __ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
840 __ mov(R9, Operand(R4, LSR, R1)); 840 __ mov(R9, Operand(R4, LSR, R1));
841 __ Bind(&loop_entry); 841 __ Bind(&loop_entry);
842 __ teq(R6, Operand(R8)); 842 __ teq(R6, Operand(R8));
843 __ b(&loop, NE); 843 __ b(&loop, NE);
844 __ str(R9, Address(R6, 0)); 844 __ str(R9, Address(R6, 0));
845 // Returning Object::null() is not required, since this method is private. 845 // Returning Object::null() is not required, since this method is private.
846 __ Ret(); 846 __ Ret();
847 } 847 }
(...skipping 12 matching lines...) Expand all
860 // R2 = a_used, R3 = a_digits 860 // R2 = a_used, R3 = a_digits
861 __ ldrd(R2, R3, SP, 1 * kWordSize); 861 __ ldrd(R2, R3, SP, 1 * kWordSize);
862 // R3 = &a_digits[0] 862 // R3 = &a_digits[0]
863 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 863 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
864 864
865 // R8 = r_digits 865 // R8 = r_digits
866 __ ldr(R8, Address(SP, 0 * kWordSize)); 866 __ ldr(R8, Address(SP, 0 * kWordSize));
867 // R8 = &r_digits[0] 867 // R8 = &r_digits[0]
868 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag)); 868 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
869 869
870 // R7 = &digits[a_used >> 1], a_used is Smi. 870 // R711 = &digits[a_used >> 1], a_used is Smi.
871 __ add(R7, R1, Operand(R2, LSL, 1)); 871 __ add(R711, R1, Operand(R2, LSL, 1));
872 872
873 // R6 = &digits[used >> 1], used is Smi. 873 // R6 = &digits[used >> 1], used is Smi.
874 __ add(R6, R1, Operand(R0, LSL, 1)); 874 __ add(R6, R1, Operand(R0, LSL, 1));
875 875
876 __ adds(R4, R4, Operand(0)); // carry flag = 0 876 __ adds(R4, R4, Operand(0)); // carry flag = 0
877 Label add_loop; 877 Label add_loop;
878 __ Bind(&add_loop); 878 __ Bind(&add_loop);
879 // Loop a_used times, a_used > 0. 879 // Loop a_used times, a_used > 0.
880 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); 880 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
881 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 881 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
882 __ adcs(R4, R4, Operand(R9)); 882 __ adcs(R4, R4, Operand(R9));
883 __ teq(R1, Operand(R7)); // Does not affect carry flag. 883 __ teq(R1, Operand(R711)); // Does not affect carry flag.
884 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); 884 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
885 __ b(&add_loop, NE); 885 __ b(&add_loop, NE);
886 886
887 Label last_carry; 887 Label last_carry;
888 __ teq(R1, Operand(R6)); // Does not affect carry flag. 888 __ teq(R1, Operand(R6)); // Does not affect carry flag.
889 __ b(&last_carry, EQ); // If used - a_used == 0. 889 __ b(&last_carry, EQ); // If used - a_used == 0.
890 890
891 Label carry_loop; 891 Label carry_loop;
892 __ Bind(&carry_loop); 892 __ Bind(&carry_loop);
893 // Loop used - a_used times, used - a_used > 0. 893 // Loop used - a_used times, used - a_used > 0.
(...skipping 26 matching lines...) Expand all
920 // R2 = a_used, R3 = a_digits 920 // R2 = a_used, R3 = a_digits
921 __ ldrd(R2, R3, SP, 1 * kWordSize); 921 __ ldrd(R2, R3, SP, 1 * kWordSize);
922 // R3 = &a_digits[0] 922 // R3 = &a_digits[0]
923 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 923 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
924 924
925 // R8 = r_digits 925 // R8 = r_digits
926 __ ldr(R8, Address(SP, 0 * kWordSize)); 926 __ ldr(R8, Address(SP, 0 * kWordSize));
927 // R8 = &r_digits[0] 927 // R8 = &r_digits[0]
928 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag)); 928 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
929 929
930 // R7 = &digits[a_used >> 1], a_used is Smi. 930 // R711 = &digits[a_used >> 1], a_used is Smi.
931 __ add(R7, R1, Operand(R2, LSL, 1)); 931 __ add(R711, R1, Operand(R2, LSL, 1));
932 932
933 // R6 = &digits[used >> 1], used is Smi. 933 // R6 = &digits[used >> 1], used is Smi.
934 __ add(R6, R1, Operand(R0, LSL, 1)); 934 __ add(R6, R1, Operand(R0, LSL, 1));
935 935
936 __ subs(R4, R4, Operand(0)); // carry flag = 1 936 __ subs(R4, R4, Operand(0)); // carry flag = 1
937 Label sub_loop; 937 Label sub_loop;
938 __ Bind(&sub_loop); 938 __ Bind(&sub_loop);
939 // Loop a_used times, a_used > 0. 939 // Loop a_used times, a_used > 0.
940 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); 940 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
941 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 941 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
942 __ sbcs(R4, R4, Operand(R9)); 942 __ sbcs(R4, R4, Operand(R9));
943 __ teq(R1, Operand(R7)); // Does not affect carry flag. 943 __ teq(R1, Operand(R711)); // Does not affect carry flag.
944 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); 944 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
945 __ b(&sub_loop, NE); 945 __ b(&sub_loop, NE);
946 946
947 Label done; 947 Label done;
948 __ teq(R1, Operand(R6)); // Does not affect carry flag. 948 __ teq(R1, Operand(R6)); // Does not affect carry flag.
949 __ b(&done, EQ); // If used - a_used == 0. 949 __ b(&done, EQ); // If used - a_used == 0.
950 950
951 Label carry_loop; 951 Label carry_loop;
952 __ Bind(&carry_loop); 952 __ Bind(&carry_loop);
953 // Loop used - a_used times, used - a_used > 0. 953 // Loop used - a_used times, used - a_used > 0.
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1096 __ ldrd(R2, R3, SP, 2 * kWordSize); // R2 = i as Smi, R3 = x_digits 1096 __ ldrd(R2, R3, SP, 2 * kWordSize); // R2 = i as Smi, R3 = x_digits
1097 __ add(R3, R3, Operand(R2, LSL, 1)); 1097 __ add(R3, R3, Operand(R2, LSL, 1));
1098 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 1098 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
1099 1099
1100 // R3 = x = *xip++, return if x == 0 1100 // R3 = x = *xip++, return if x == 0
1101 Label x_zero; 1101 Label x_zero;
1102 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1102 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1103 __ tst(R3, Operand(R3)); 1103 __ tst(R3, Operand(R3));
1104 __ b(&x_zero, EQ); 1104 __ b(&x_zero, EQ);
1105 1105
1106 // R7 = ajp = &a_digits[i] 1106 // R711 = ajp = &a_digits[i]
1107 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits 1107 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
1108 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi. 1108 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
1109 __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 1109 __ add(R711, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
1110 1110
1111 // R8:R0 = t = x*x + *ajp 1111 // R8:R0 = t = x*x + *ajp
1112 __ ldr(R0, Address(R7, 0)); 1112 __ ldr(R0, Address(R711, 0));
1113 __ mov(R8, Operand(0)); 1113 __ mov(R8, Operand(0));
1114 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0. 1114 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
1115 1115
1116 // *ajp++ = low32(t) = R0 1116 // *ajp++ = low32(t) = R0
1117 __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 1117 __ str(R0, Address(R711, Bigint::kBytesPerDigit, Address::PostIndex));
1118 1118
1119 // R8 = low32(c) = high32(t) 1119 // R8 = low32(c) = high32(t)
1120 // R9 = high32(c) = 0 1120 // R9 = high32(c) = 0
1121 __ mov(R9, Operand(0)); 1121 __ mov(R9, Operand(0));
1122 1122
1123 // int n = used - i - 1; while (--n >= 0) ... 1123 // int n = used - i - 1; while (--n >= 0) ...
1124 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1124 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1125 __ sub(R6, R0, Operand(R2)); 1125 __ sub(R6, R0, Operand(R2));
1126 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1126 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1127 __ rsbs(R6, R0, Operand(R6, ASR, kSmiTagSize)); 1127 __ rsbs(R6, R0, Operand(R6, ASR, kSmiTagSize));
1128 1128
1129 Label loop, done; 1129 Label loop, done;
1130 __ b(&done, MI); 1130 __ b(&done, MI);
1131 1131
1132 __ Bind(&loop); 1132 __ Bind(&loop);
1133 // x: R3 1133 // x: R3
1134 // xip: R4 1134 // xip: R4
1135 // ajp: R7 1135 // ajp: R711
1136 // c: R9:R8 1136 // c: R9:R8
1137 // t: R2:R1:R0 (not live at loop entry) 1137 // t: R2:R1:R0 (not live at loop entry)
1138 // n: R6 1138 // n: R6
1139 1139
1140 // uint32_t xi = *xip++ 1140 // uint32_t xi = *xip++
1141 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1141 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1142 1142
1143 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c 1143 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c
1144 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1144 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1145 __ adds(R0, R0, Operand(R0)); 1145 __ adds(R0, R0, Operand(R0));
1146 __ adcs(R1, R1, Operand(R1)); 1146 __ adcs(R1, R1, Operand(R1));
1147 __ mov(R2, Operand(0)); 1147 __ mov(R2, Operand(0));
1148 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1148 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1149 __ adds(R0, R0, Operand(R8)); 1149 __ adds(R0, R0, Operand(R8));
1150 __ adcs(R1, R1, Operand(R9)); 1150 __ adcs(R1, R1, Operand(R9));
1151 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1151 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1152 __ ldr(R8, Address(R7, 0)); // R8 = aj = *ajp. 1152 __ ldr(R8, Address(R711, 0)); // R8 = aj = *ajp.
1153 __ adds(R0, R0, Operand(R8)); 1153 __ adds(R0, R0, Operand(R8));
1154 __ adcs(R8, R1, Operand(0)); 1154 __ adcs(R8, R1, Operand(0));
1155 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj. 1155 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
1156 1156
1157 // *ajp++ = low32(t) = R0 1157 // *ajp++ = low32(t) = R0
1158 __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 1158 __ str(R0, Address(R711, Bigint::kBytesPerDigit, Address::PostIndex));
1159 1159
1160 // while (--n >= 0) 1160 // while (--n >= 0)
1161 __ subs(R6, R6, Operand(1)); // --n 1161 __ subs(R6, R6, Operand(1)); // --n
1162 __ b(&loop, PL); 1162 __ b(&loop, PL);
1163 1163
1164 __ Bind(&done); 1164 __ Bind(&done);
1165 // uint32_t aj = *ajp 1165 // uint32_t aj = *ajp
1166 __ ldr(R0, Address(R7, 0)); 1166 __ ldr(R0, Address(R711, 0));
1167 1167
1168 // uint64_t t = aj + c 1168 // uint64_t t = aj + c
1169 __ adds(R8, R8, Operand(R0)); 1169 __ adds(R8, R8, Operand(R0));
1170 __ adc(R9, R9, Operand(0)); 1170 __ adc(R9, R9, Operand(0));
1171 1171
1172 // *ajp = low32(t) = R8 1172 // *ajp = low32(t) = R8
1173 // *(ajp + 1) = high32(t) = R9 1173 // *(ajp + 1) = high32(t) = R9
1174 __ strd(R8, R9, R7, 0); 1174 __ strd(R8, R9, R711, 0);
1175 1175
1176 __ Bind(&x_zero); 1176 __ Bind(&x_zero);
1177 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1177 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1178 __ Ret(); 1178 __ Ret();
1179 } 1179 }
1180 1180
1181 1181
1182 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { 1182 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) {
1183 // No unsigned 64-bit / 32-bit divide instruction. 1183 // No unsigned 64-bit / 32-bit divide instruction.
1184 } 1184 }
(...skipping 485 matching lines...) Expand 10 before | Expand all | Expand 10 after
1670 // R8: String data. 1670 // R8: String data.
1671 // R0: Hash code, untagged integer. 1671 // R0: Hash code, untagged integer.
1672 1672
1673 Label loop; 1673 Label loop;
1674 // Add to hash code: (hash_ is uint32) 1674 // Add to hash code: (hash_ is uint32)
1675 // hash_ += ch; 1675 // hash_ += ch;
1676 // hash_ += hash_ << 10; 1676 // hash_ += hash_ << 10;
1677 // hash_ ^= hash_ >> 6; 1677 // hash_ ^= hash_ >> 6;
1678 // Get one characters (ch). 1678 // Get one characters (ch).
1679 __ Bind(&loop); 1679 __ Bind(&loop);
1680 __ ldrb(R7, Address(R8, 0)); 1680 __ ldrb(R711, Address(R8, 0));
1681 // R7: ch. 1681 // R711: ch.
1682 __ add(R3, R3, Operand(1)); 1682 __ add(R3, R3, Operand(1));
1683 __ add(R8, R8, Operand(1)); 1683 __ add(R8, R8, Operand(1));
1684 __ add(R0, R0, Operand(R7)); 1684 __ add(R0, R0, Operand(R711));
1685 __ add(R0, R0, Operand(R0, LSL, 10)); 1685 __ add(R0, R0, Operand(R0, LSL, 10));
1686 __ eor(R0, R0, Operand(R0, LSR, 6)); 1686 __ eor(R0, R0, Operand(R0, LSR, 6));
1687 __ cmp(R3, Operand(R2)); 1687 __ cmp(R3, Operand(R2));
1688 __ b(&loop, NE); 1688 __ b(&loop, NE);
1689 1689
1690 // Finalize. 1690 // Finalize.
1691 // hash_ += hash_ << 3; 1691 // hash_ += hash_ << 3;
1692 // hash_ ^= hash_ >> 11; 1692 // hash_ ^= hash_ >> 11;
1693 // hash_ += hash_ << 15; 1693 // hash_ += hash_ << 15;
1694 __ add(R0, R0, Operand(R0, LSL, 3)); 1694 __ add(R0, R0, Operand(R0, LSL, 3));
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1732 1732
1733 // length_reg: allocation size. 1733 // length_reg: allocation size.
1734 __ adds(R1, R0, Operand(length_reg)); 1734 __ adds(R1, R0, Operand(length_reg));
1735 __ b(&fail, CS); // Fail on unsigned overflow. 1735 __ b(&fail, CS); // Fail on unsigned overflow.
1736 1736
1737 // Check if the allocation fits into the remaining space. 1737 // Check if the allocation fits into the remaining space.
1738 // R0: potential new object start. 1738 // R0: potential new object start.
1739 // R1: potential next object start. 1739 // R1: potential next object start.
1740 // R2: allocation size. 1740 // R2: allocation size.
1741 // R3: heap. 1741 // R3: heap.
1742 __ ldr(R7, Address(R3, Heap::EndOffset(space))); 1742 __ ldr(R711, Address(R3, Heap::EndOffset(space)));
1743 __ cmp(R1, Operand(R7)); 1743 __ cmp(R1, Operand(R711));
1744 __ b(&fail, CS); 1744 __ b(&fail, CS);
1745 1745
1746 // Successfully allocated the object(s), now update top to point to 1746 // Successfully allocated the object(s), now update top to point to
1747 // next object start and initialize the object. 1747 // next object start and initialize the object.
1748 __ LoadAllocationStatsAddress(R4, cid, /* inline_isolate = */ false); 1748 __ LoadAllocationStatsAddress(R4, cid, /* inline_isolate = */ false);
1749 __ str(R1, Address(R3, Heap::TopOffset(space))); 1749 __ str(R1, Address(R3, Heap::TopOffset(space)));
1750 __ AddImmediate(R0, kHeapObjectTag); 1750 __ AddImmediate(R0, kHeapObjectTag);
1751 1751
1752 // Initialize the tags. 1752 // Initialize the tags.
1753 // R0: new object start as a tagged pointer. 1753 // R0: new object start as a tagged pointer.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1817 // R3: Start address to copy from (untagged). 1817 // R3: Start address to copy from (untagged).
1818 // R1: Untagged start index. 1818 // R1: Untagged start index.
1819 __ ldr(R2, Address(SP, kEndIndexOffset)); 1819 __ ldr(R2, Address(SP, kEndIndexOffset));
1820 __ SmiUntag(R2); 1820 __ SmiUntag(R2);
1821 __ sub(R2, R2, Operand(R1)); 1821 __ sub(R2, R2, Operand(R1));
1822 1822
1823 // R3: Start address to copy from (untagged). 1823 // R3: Start address to copy from (untagged).
1824 // R2: Untagged number of bytes to copy. 1824 // R2: Untagged number of bytes to copy.
1825 // R0: Tagged result string. 1825 // R0: Tagged result string.
1826 // R8: Pointer into R3. 1826 // R8: Pointer into R3.
1827 // R7: Pointer into R0. 1827 // R711: Pointer into R0.
1828 // R1: Scratch register. 1828 // R1: Scratch register.
1829 Label loop, done; 1829 Label loop, done;
1830 __ cmp(R2, Operand(0)); 1830 __ cmp(R2, Operand(0));
1831 __ b(&done, LE); 1831 __ b(&done, LE);
1832 __ mov(R8, Operand(R3)); 1832 __ mov(R8, Operand(R3));
1833 __ mov(R7, Operand(R0)); 1833 __ mov(R711, Operand(R0));
1834 __ Bind(&loop); 1834 __ Bind(&loop);
1835 __ ldrb(R1, Address(R8, 0)); 1835 __ ldrb(R1, Address(R8, 0));
1836 __ AddImmediate(R8, 1); 1836 __ AddImmediate(R8, 1);
1837 __ sub(R2, R2, Operand(1)); 1837 __ sub(R2, R2, Operand(1));
1838 __ cmp(R2, Operand(0)); 1838 __ cmp(R2, Operand(0));
1839 __ strb(R1, FieldAddress(R7, OneByteString::data_offset())); 1839 __ strb(R1, FieldAddress(R711, OneByteString::data_offset()));
1840 __ AddImmediate(R7, 1); 1840 __ AddImmediate(R711, 1);
1841 __ b(&loop, GT); 1841 __ b(&loop, GT);
1842 1842
1843 __ Bind(&done); 1843 __ Bind(&done);
1844 __ Ret(); 1844 __ Ret();
1845 __ Bind(&fall_through); 1845 __ Bind(&fall_through);
1846 } 1846 }
1847 1847
1848 1848
1849 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { 1849 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) {
1850 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. 1850 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
2003 2003
2004 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2004 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2005 __ LoadIsolate(R0); 2005 __ LoadIsolate(R0);
2006 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2006 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2007 __ Ret(); 2007 __ Ret();
2008 } 2008 }
2009 2009
2010 } // namespace dart 2010 } // namespace dart
2011 2011
2012 #endif // defined TARGET_ARCH_ARM 2012 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698