Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(745)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1343373003: Revert "VM: New calling convention for generated code." (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 531 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 542
543 void Intrinsifier::Integer_bitXor(Assembler* assembler) { 543 void Intrinsifier::Integer_bitXor(Assembler* assembler) {
544 Integer_bitXorFromInteger(assembler); 544 Integer_bitXorFromInteger(assembler);
545 } 545 }
546 546
547 547
548 void Intrinsifier::Integer_shl(Assembler* assembler) { 548 void Intrinsifier::Integer_shl(Assembler* assembler) {
549 ASSERT(kSmiTagShift == 1); 549 ASSERT(kSmiTagShift == 1);
550 ASSERT(kSmiTag == 0); 550 ASSERT(kSmiTag == 0);
551 Label fall_through; 551 Label fall_through;
552 __ Push(R10); 552
553 TestBothArgumentsSmis(assembler, &fall_through); 553 TestBothArgumentsSmis(assembler, &fall_through);
554 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); 554 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits));
555 __ b(&fall_through, HI); 555 __ b(&fall_through, HI);
556 556
557 __ SmiUntag(R0); 557 __ SmiUntag(R0);
558 558
559 // Check for overflow by shifting left and shifting back arithmetically. 559 // Check for overflow by shifting left and shifting back arithmetically.
560 // If the result is different from the original, there was overflow. 560 // If the result is different from the original, there was overflow.
561 __ mov(IP, Operand(R1, LSL, R0)); 561 __ mov(IP, Operand(R1, LSL, R0));
562 __ cmp(R1, Operand(IP, ASR, R0)); 562 __ cmp(R1, Operand(IP, ASR, R0));
563 563
564 // No overflow, result in R0. 564 // No overflow, result in R0.
565 __ mov(R0, Operand(R1, LSL, R0), EQ); 565 __ mov(R0, Operand(R1, LSL, R0), EQ);
566 __ bx(LR, EQ); 566 __ bx(LR, EQ);
567 567
568 // Arguments are Smi but the shift produced an overflow to Mint. 568 // Arguments are Smi but the shift produced an overflow to Mint.
569 __ CompareImmediate(R1, 0); 569 __ CompareImmediate(R1, 0);
570 __ b(&fall_through, LT); 570 __ b(&fall_through, LT);
571 __ SmiUntag(R1); 571 __ SmiUntag(R1);
572 572
573 // Pull off high bits that will be shifted off of R1 by making a mask 573 // Pull off high bits that will be shifted off of R1 by making a mask
574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
576 // lo bits = R1 << R0 576 // lo bits = R1 << R0
577 __ LoadImmediate(R7, 1); 577 __ LoadImmediate(R7, 1);
578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
580 __ rsb(R10, R0, Operand(32)); // R10 <- 32 - R0 580 __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0
581 __ mov(R7, Operand(R7, LSL, R10)); // R7 <- R7 << R10 581 __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9
582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
583 __ mov(R7, Operand(R7, LSR, R10)); // R7 <- R7 >> R10 583 __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9
584 // Now R7 has the bits that fall off of R1 on a left shift. 584 // Now R7 has the bits that fall off of R1 on a left shift.
585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
586 586
587 const Class& mint_class = Class::Handle( 587 const Class& mint_class = Class::Handle(
588 Isolate::Current()->object_store()->mint_class()); 588 Isolate::Current()->object_store()->mint_class());
589 __ TryAllocate(mint_class, &fall_through, R0, R2); 589 __ TryAllocate(mint_class, &fall_through, R0, R2);
590 590
591 591
592 __ str(R1, FieldAddress(R0, Mint::value_offset())); 592 __ str(R1, FieldAddress(R0, Mint::value_offset()));
593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
594 __ Pop(R10);
595 __ Ret(); 594 __ Ret();
596 __ Bind(&fall_through); 595 __ Bind(&fall_through);
597 ASSERT(CODE_REG == R10);
598 __ Pop(R10);
599 } 596 }
600 597
601 598
602 static void Get64SmiOrMint(Assembler* assembler, 599 static void Get64SmiOrMint(Assembler* assembler,
603 Register res_hi, 600 Register res_hi,
604 Register res_lo, 601 Register res_lo,
605 Register reg, 602 Register reg,
606 Label* not_smi_or_mint) { 603 Label* not_smi_or_mint) {
607 Label not_smi, done; 604 Label not_smi, done;
608 __ tst(reg, Operand(kSmiTagMask)); 605 __ tst(reg, Operand(kSmiTagMask));
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
814 __ ldrd(R2, SP, 2 * kWordSize); 811 __ ldrd(R2, SP, 2 * kWordSize);
815 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 812 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
816 __ ldrd(R4, SP, 0 * kWordSize); 813 __ ldrd(R4, SP, 0 * kWordSize);
817 __ SmiUntag(R5); 814 __ SmiUntag(R5);
818 // R0 = n ~/ _DIGIT_BITS 815 // R0 = n ~/ _DIGIT_BITS
819 __ Asr(R0, R5, Operand(5)); 816 __ Asr(R0, R5, Operand(5));
820 // R6 = &x_digits[0] 817 // R6 = &x_digits[0]
821 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 818 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
822 // R7 = &x_digits[x_used] 819 // R7 = &x_digits[x_used]
823 __ add(R7, R6, Operand(R2, LSL, 1)); 820 __ add(R7, R6, Operand(R2, LSL, 1));
824 // R10 = &r_digits[1] 821 // R9 = &r_digits[1]
825 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag + 822 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
826 Bigint::kBytesPerDigit)); 823 Bigint::kBytesPerDigit));
827 // R10 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 824 // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
828 __ add(R0, R0, Operand(R2, ASR, 1)); 825 __ add(R0, R0, Operand(R2, ASR, 1));
829 __ add(R10, R10, Operand(R0, LSL, 2)); 826 __ add(R9, R9, Operand(R0, LSL, 2));
830 // R3 = n % _DIGIT_BITS 827 // R3 = n % _DIGIT_BITS
831 __ and_(R3, R5, Operand(31)); 828 __ and_(R3, R5, Operand(31));
832 // R2 = 32 - R3 829 // R2 = 32 - R3
833 __ rsb(R2, R3, Operand(32)); 830 __ rsb(R2, R3, Operand(32));
834 __ mov(R1, Operand(0)); 831 __ mov(R1, Operand(0));
835 Label loop; 832 Label loop;
836 __ Bind(&loop); 833 __ Bind(&loop);
837 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 834 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
838 __ orr(R1, R1, Operand(R0, LSR, R2)); 835 __ orr(R1, R1, Operand(R0, LSR, R2));
839 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); 836 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
840 __ mov(R1, Operand(R0, LSL, R3)); 837 __ mov(R1, Operand(R0, LSL, R3));
841 __ teq(R7, Operand(R6)); 838 __ teq(R7, Operand(R6));
842 __ b(&loop, NE); 839 __ b(&loop, NE);
843 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); 840 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
844 // Returning Object::null() is not required, since this method is private. 841 // Returning Object::null() is not required, since this method is private.
845 __ Ret(); 842 __ Ret();
846 } 843 }
847 844
848 845
849 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 846 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
850 // static void _lsh(Uint32List x_digits, int x_used, int n, 847 // static void _lsh(Uint32List x_digits, int x_used, int n,
851 // Uint32List r_digits) 848 // Uint32List r_digits)
852 849
853 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 850 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
854 __ ldrd(R2, SP, 2 * kWordSize); 851 __ ldrd(R2, SP, 2 * kWordSize);
855 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 852 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
856 __ ldrd(R4, SP, 0 * kWordSize); 853 __ ldrd(R4, SP, 0 * kWordSize);
857 __ SmiUntag(R5); 854 __ SmiUntag(R5);
858 // R0 = n ~/ _DIGIT_BITS 855 // R0 = n ~/ _DIGIT_BITS
859 __ Asr(R0, R5, Operand(5)); 856 __ Asr(R0, R5, Operand(5));
860 // R10 = &r_digits[0] 857 // R9 = &r_digits[0]
861 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); 858 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
862 // R7 = &x_digits[n ~/ _DIGIT_BITS] 859 // R7 = &x_digits[n ~/ _DIGIT_BITS]
863 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 860 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
864 __ add(R7, R7, Operand(R0, LSL, 2)); 861 __ add(R7, R7, Operand(R0, LSL, 2));
865 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 862 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
866 __ add(R0, R0, Operand(1)); 863 __ add(R0, R0, Operand(1));
867 __ rsb(R0, R0, Operand(R2, ASR, 1)); 864 __ rsb(R0, R0, Operand(R2, ASR, 1));
868 __ add(R6, R10, Operand(R0, LSL, 2)); 865 __ add(R6, R9, Operand(R0, LSL, 2));
869 // R3 = n % _DIGIT_BITS 866 // R3 = n % _DIGIT_BITS
870 __ and_(R3, R5, Operand(31)); 867 __ and_(R3, R5, Operand(31));
871 // R2 = 32 - R3 868 // R2 = 32 - R3
872 __ rsb(R2, R3, Operand(32)); 869 __ rsb(R2, R3, Operand(32));
873 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 870 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
874 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 871 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
875 __ mov(R1, Operand(R1, LSR, R3)); 872 __ mov(R1, Operand(R1, LSR, R3));
876 Label loop_entry; 873 Label loop_entry;
877 __ b(&loop_entry); 874 __ b(&loop_entry);
878 Label loop; 875 Label loop;
879 __ Bind(&loop); 876 __ Bind(&loop);
880 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 877 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
881 __ orr(R1, R1, Operand(R0, LSL, R2)); 878 __ orr(R1, R1, Operand(R0, LSL, R2));
882 __ str(R1, Address(R10, Bigint::kBytesPerDigit, Address::PostIndex)); 879 __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
883 __ mov(R1, Operand(R0, LSR, R3)); 880 __ mov(R1, Operand(R0, LSR, R3));
884 __ Bind(&loop_entry); 881 __ Bind(&loop_entry);
885 __ teq(R10, Operand(R6)); 882 __ teq(R9, Operand(R6));
886 __ b(&loop, NE); 883 __ b(&loop, NE);
887 __ str(R1, Address(R10, 0)); 884 __ str(R1, Address(R9, 0));
888 // Returning Object::null() is not required, since this method is private. 885 // Returning Object::null() is not required, since this method is private.
889 __ Ret(); 886 __ Ret();
890 } 887 }
891 888
892 889
893 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 890 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
894 // static void _absAdd(Uint32List digits, int used, 891 // static void _absAdd(Uint32List digits, int used,
895 // Uint32List a_digits, int a_used, 892 // Uint32List a_digits, int a_used,
896 // Uint32List r_digits) 893 // Uint32List r_digits)
897 894
898 // R2 = used, R3 = digits 895 // R2 = used, R3 = digits
899 __ ldrd(R2, SP, 3 * kWordSize); 896 __ ldrd(R2, SP, 3 * kWordSize);
900 // R3 = &digits[0] 897 // R3 = &digits[0]
901 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 898 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
902 899
903 // R4 = a_used, R5 = a_digits 900 // R4 = a_used, R5 = a_digits
904 __ ldrd(R4, SP, 1 * kWordSize); 901 __ ldrd(R4, SP, 1 * kWordSize);
905 // R5 = &a_digits[0] 902 // R5 = &a_digits[0]
906 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 903 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
907 904
908 // R6 = r_digits 905 // R6 = r_digits
909 __ ldr(R6, Address(SP, 0 * kWordSize)); 906 __ ldr(R6, Address(SP, 0 * kWordSize));
910 // R6 = &r_digits[0] 907 // R6 = &r_digits[0]
911 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 908 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
912 909
913 // R7 = &digits[a_used >> 1], a_used is Smi. 910 // R7 = &digits[a_used >> 1], a_used is Smi.
914 __ add(R7, R3, Operand(R4, LSL, 1)); 911 __ add(R7, R3, Operand(R4, LSL, 1));
915 912
916 // R10 = &digits[used >> 1], used is Smi. 913 // R9 = &digits[used >> 1], used is Smi.
917 __ add(R10, R3, Operand(R2, LSL, 1)); 914 __ add(R9, R3, Operand(R2, LSL, 1));
918 915
919 __ adds(R0, R0, Operand(0)); // carry flag = 0 916 __ adds(R0, R0, Operand(0)); // carry flag = 0
920 Label add_loop; 917 Label add_loop;
921 __ Bind(&add_loop); 918 __ Bind(&add_loop);
922 // Loop a_used times, a_used > 0. 919 // Loop a_used times, a_used > 0.
923 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 920 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
924 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 921 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
925 __ adcs(R0, R0, Operand(R1)); 922 __ adcs(R0, R0, Operand(R1));
926 __ teq(R3, Operand(R7)); // Does not affect carry flag. 923 __ teq(R3, Operand(R7)); // Does not affect carry flag.
927 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 924 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
928 __ b(&add_loop, NE); 925 __ b(&add_loop, NE);
929 926
930 Label last_carry; 927 Label last_carry;
931 __ teq(R3, Operand(R10)); // Does not affect carry flag. 928 __ teq(R3, Operand(R9)); // Does not affect carry flag.
932 __ b(&last_carry, EQ); // If used - a_used == 0. 929 __ b(&last_carry, EQ); // If used - a_used == 0.
933 930
934 Label carry_loop; 931 Label carry_loop;
935 __ Bind(&carry_loop); 932 __ Bind(&carry_loop);
936 // Loop used - a_used times, used - a_used > 0. 933 // Loop used - a_used times, used - a_used > 0.
937 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 934 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
938 __ adcs(R0, R0, Operand(0)); 935 __ adcs(R0, R0, Operand(0));
939 __ teq(R3, Operand(R10)); // Does not affect carry flag. 936 __ teq(R3, Operand(R9)); // Does not affect carry flag.
940 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 937 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
941 __ b(&carry_loop, NE); 938 __ b(&carry_loop, NE);
942 939
943 __ Bind(&last_carry); 940 __ Bind(&last_carry);
944 __ mov(R0, Operand(0)); 941 __ mov(R0, Operand(0));
945 __ adc(R0, R0, Operand(0)); 942 __ adc(R0, R0, Operand(0));
946 __ str(R0, Address(R6, 0)); 943 __ str(R0, Address(R6, 0));
947 944
948 // Returning Object::null() is not required, since this method is private. 945 // Returning Object::null() is not required, since this method is private.
949 __ Ret(); 946 __ Ret();
(...skipping 16 matching lines...) Expand all
966 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 963 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
967 964
968 // R6 = r_digits 965 // R6 = r_digits
969 __ ldr(R6, Address(SP, 0 * kWordSize)); 966 __ ldr(R6, Address(SP, 0 * kWordSize));
970 // R6 = &r_digits[0] 967 // R6 = &r_digits[0]
971 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 968 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
972 969
973 // R7 = &digits[a_used >> 1], a_used is Smi. 970 // R7 = &digits[a_used >> 1], a_used is Smi.
974 __ add(R7, R3, Operand(R4, LSL, 1)); 971 __ add(R7, R3, Operand(R4, LSL, 1));
975 972
976 // R10 = &digits[used >> 1], used is Smi. 973 // R9 = &digits[used >> 1], used is Smi.
977 __ add(R10, R3, Operand(R2, LSL, 1)); 974 __ add(R9, R3, Operand(R2, LSL, 1));
978 975
979 __ subs(R0, R0, Operand(0)); // carry flag = 1 976 __ subs(R0, R0, Operand(0)); // carry flag = 1
980 Label sub_loop; 977 Label sub_loop;
981 __ Bind(&sub_loop); 978 __ Bind(&sub_loop);
982 // Loop a_used times, a_used > 0. 979 // Loop a_used times, a_used > 0.
983 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 980 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
984 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 981 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
985 __ sbcs(R0, R0, Operand(R1)); 982 __ sbcs(R0, R0, Operand(R1));
986 __ teq(R3, Operand(R7)); // Does not affect carry flag. 983 __ teq(R3, Operand(R7)); // Does not affect carry flag.
987 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 984 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
988 __ b(&sub_loop, NE); 985 __ b(&sub_loop, NE);
989 986
990 Label done; 987 Label done;
991 __ teq(R3, Operand(R10)); // Does not affect carry flag. 988 __ teq(R3, Operand(R9)); // Does not affect carry flag.
992 __ b(&done, EQ); // If used - a_used == 0. 989 __ b(&done, EQ); // If used - a_used == 0.
993 990
994 Label carry_loop; 991 Label carry_loop;
995 __ Bind(&carry_loop); 992 __ Bind(&carry_loop);
996 // Loop used - a_used times, used - a_used > 0. 993 // Loop used - a_used times, used - a_used > 0.
997 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 994 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
998 __ sbcs(R0, R0, Operand(0)); 995 __ sbcs(R0, R0, Operand(0));
999 __ teq(R3, Operand(R10)); // Does not affect carry flag. 996 __ teq(R3, Operand(R9)); // Does not affect carry flag.
1000 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 997 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
1001 __ b(&carry_loop, NE); 998 __ b(&carry_loop, NE);
1002 999
1003 __ Bind(&done); 1000 __ Bind(&done);
1004 // Returning Object::null() is not required, since this method is private. 1001 // Returning Object::null() is not required, since this method is private.
1005 __ Ret(); 1002 __ Ret();
1006 } 1003 }
1007 1004
1008 1005
1009 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 1006 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1158 1155
1159 // *ajp++ = low32(t) = R0 1156 // *ajp++ = low32(t) = R0
1160 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1157 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1161 1158
1162 // R6 = low32(c) = high32(t) 1159 // R6 = low32(c) = high32(t)
1163 // R7 = high32(c) = 0 1160 // R7 = high32(c) = 0
1164 __ mov(R7, Operand(0)); 1161 __ mov(R7, Operand(0));
1165 1162
1166 // int n = used - i - 1; while (--n >= 0) ... 1163 // int n = used - i - 1; while (--n >= 0) ...
1167 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1164 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1168 __ sub(R10, R0, Operand(R2)); 1165 __ sub(R9, R0, Operand(R2));
1169 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1166 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1170 __ rsbs(R10, R0, Operand(R10, ASR, kSmiTagSize)); 1167 __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize));
1171 1168
1172 Label loop, done; 1169 Label loop, done;
1173 __ b(&done, MI); 1170 __ b(&done, MI);
1174 1171
1175 __ Bind(&loop); 1172 __ Bind(&loop);
1176 // x: R3 1173 // x: R3
1177 // xip: R4 1174 // xip: R4
1178 // ajp: R5 1175 // ajp: R5
1179 // c: R7:R6 1176 // c: R7:R6
1180 // t: R2:R1:R0 (not live at loop entry) 1177 // t: R2:R1:R0 (not live at loop entry)
1181 // n: R10 1178 // n: R9
1182 1179
1183 // uint32_t xi = *xip++ 1180 // uint32_t xi = *xip++
1184 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1181 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1185 1182
1186 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c 1183 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c
1187 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1184 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1188 __ adds(R0, R0, Operand(R0)); 1185 __ adds(R0, R0, Operand(R0));
1189 __ adcs(R1, R1, Operand(R1)); 1186 __ adcs(R1, R1, Operand(R1));
1190 __ mov(R2, Operand(0)); 1187 __ mov(R2, Operand(0));
1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1188 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1192 __ adds(R0, R0, Operand(R6)); 1189 __ adds(R0, R0, Operand(R6));
1193 __ adcs(R1, R1, Operand(R7)); 1190 __ adcs(R1, R1, Operand(R7));
1194 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1195 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. 1192 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp.
1196 __ adds(R0, R0, Operand(R6)); 1193 __ adds(R0, R0, Operand(R6));
1197 __ adcs(R6, R1, Operand(0)); 1194 __ adcs(R6, R1, Operand(0));
1198 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. 1195 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj.
1199 1196
1200 // *ajp++ = low32(t) = R0 1197 // *ajp++ = low32(t) = R0
1201 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1198 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1202 1199
1203 // while (--n >= 0) 1200 // while (--n >= 0)
1204 __ subs(R10, R10, Operand(1)); // --n 1201 __ subs(R9, R9, Operand(1)); // --n
1205 __ b(&loop, PL); 1202 __ b(&loop, PL);
1206 1203
1207 __ Bind(&done); 1204 __ Bind(&done);
1208 // uint32_t aj = *ajp 1205 // uint32_t aj = *ajp
1209 __ ldr(R0, Address(R5, 0)); 1206 __ ldr(R0, Address(R5, 0));
1210 1207
1211 // uint64_t t = aj + c 1208 // uint64_t t = aj + c
1212 __ adds(R6, R6, Operand(R0)); 1209 __ adds(R6, R6, Operand(R0));
1213 __ adc(R7, R7, Operand(0)); 1210 __ adc(R7, R7, Operand(0));
1214 1211
(...skipping 794 matching lines...) Expand 10 before | Expand all | Expand 10 after
2009 __ LoadClassId(R1, R1); 2006 __ LoadClassId(R1, R1);
2010 __ AddImmediate(R1, R1, -kOneByteStringCid); 2007 __ AddImmediate(R1, R1, -kOneByteStringCid);
2011 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); 2008 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
2012 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); 2009 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid)));
2013 2010
2014 // Registers are now set up for the lazy compile stub. It expects the function 2011 // Registers are now set up for the lazy compile stub. It expects the function
2015 // in R0, the argument descriptor in R4, and IC-Data in R5. 2012 // in R0, the argument descriptor in R4, and IC-Data in R5.
2016 __ eor(R5, R5, Operand(R5)); 2013 __ eor(R5, R5, Operand(R5));
2017 2014
2018 // Tail-call the function. 2015 // Tail-call the function.
2019 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2020 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); 2016 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
2021 __ bx(R1); 2017 __ bx(R1);
2022 } 2018 }
2023 2019
2024 2020
2025 // On stack: user tag (+0). 2021 // On stack: user tag (+0).
2026 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 2022 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
2027 // R1: Isolate. 2023 // R1: Isolate.
2028 __ LoadIsolate(R1); 2024 __ LoadIsolate(R1);
2029 // R0: Current user tag. 2025 // R0: Current user tag.
(...skipping 19 matching lines...) Expand all
2049 2045
2050 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2046 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2051 __ LoadIsolate(R0); 2047 __ LoadIsolate(R0);
2052 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2048 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2053 __ Ret(); 2049 __ Ret();
2054 } 2050 }
2055 2051
2056 } // namespace dart 2052 } // namespace dart
2057 2053
2058 #endif // defined TARGET_ARCH_ARM 2054 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698