Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(245)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: fixed comments Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 531 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 542
543 void Intrinsifier::Integer_bitXor(Assembler* assembler) { 543 void Intrinsifier::Integer_bitXor(Assembler* assembler) {
544 Integer_bitXorFromInteger(assembler); 544 Integer_bitXorFromInteger(assembler);
545 } 545 }
546 546
547 547
548 void Intrinsifier::Integer_shl(Assembler* assembler) { 548 void Intrinsifier::Integer_shl(Assembler* assembler) {
549 ASSERT(kSmiTagShift == 1); 549 ASSERT(kSmiTagShift == 1);
550 ASSERT(kSmiTag == 0); 550 ASSERT(kSmiTag == 0);
551 Label fall_through; 551 Label fall_through;
552 552 __ Push(R10);
553 TestBothArgumentsSmis(assembler, &fall_through); 553 TestBothArgumentsSmis(assembler, &fall_through);
554 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); 554 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits));
555 __ b(&fall_through, HI); 555 __ b(&fall_through, HI);
556 556
557 __ SmiUntag(R0); 557 __ SmiUntag(R0);
558 558
559 // Check for overflow by shifting left and shifting back arithmetically. 559 // Check for overflow by shifting left and shifting back arithmetically.
560 // If the result is different from the original, there was overflow. 560 // If the result is different from the original, there was overflow.
561 __ mov(IP, Operand(R1, LSL, R0)); 561 __ mov(IP, Operand(R1, LSL, R0));
562 __ cmp(R1, Operand(IP, ASR, R0)); 562 __ cmp(R1, Operand(IP, ASR, R0));
563 563
564 // No overflow, result in R0. 564 // No overflow, result in R0.
565 __ mov(R0, Operand(R1, LSL, R0), EQ); 565 __ mov(R0, Operand(R1, LSL, R0), EQ);
566 __ bx(LR, EQ); 566 __ bx(LR, EQ);
567 567
568 // Arguments are Smi but the shift produced an overflow to Mint. 568 // Arguments are Smi but the shift produced an overflow to Mint.
569 __ CompareImmediate(R1, 0); 569 __ CompareImmediate(R1, 0);
570 __ b(&fall_through, LT); 570 __ b(&fall_through, LT);
571 __ SmiUntag(R1); 571 __ SmiUntag(R1);
572 572
573 // Pull off high bits that will be shifted off of R1 by making a mask 573 // Pull off high bits that will be shifted off of R1 by making a mask
574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 574 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 575 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
576 // lo bits = R1 << R0 576 // lo bits = R1 << R0
577 __ LoadImmediate(R7, 1); 577 __ LoadImmediate(R7, 1);
578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 578 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 579 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
580 __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0 580 __ rsb(R10, R0, Operand(32)); // R10 <- 32 - R0
581 __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9 581 __ mov(R7, Operand(R7, LSL, R10)); // R7 <- R7 << R10
582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 582 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
583 __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9 583 __ mov(R7, Operand(R7, LSR, R10)); // R7 <- R7 >> R10
584 // Now R7 has the bits that fall off of R1 on a left shift. 584 // Now R7 has the bits that fall off of R1 on a left shift.
585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 585 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
586 586
587 const Class& mint_class = Class::Handle( 587 const Class& mint_class = Class::Handle(
588 Isolate::Current()->object_store()->mint_class()); 588 Isolate::Current()->object_store()->mint_class());
589 __ TryAllocate(mint_class, &fall_through, R0, R2); 589 __ TryAllocate(mint_class, &fall_through, R0, R2);
590 590
591 591
592 __ str(R1, FieldAddress(R0, Mint::value_offset())); 592 __ str(R1, FieldAddress(R0, Mint::value_offset()));
593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 593 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
594 __ Pop(R10);
594 __ Ret(); 595 __ Ret();
595 __ Bind(&fall_through); 596 __ Bind(&fall_through);
597 ASSERT(CODE_REG == R10);
598 __ Pop(R10);
596 } 599 }
597 600
598 601
599 static void Get64SmiOrMint(Assembler* assembler, 602 static void Get64SmiOrMint(Assembler* assembler,
600 Register res_hi, 603 Register res_hi,
601 Register res_lo, 604 Register res_lo,
602 Register reg, 605 Register reg,
603 Label* not_smi_or_mint) { 606 Label* not_smi_or_mint) {
604 Label not_smi, done; 607 Label not_smi, done;
605 __ tst(reg, Operand(kSmiTagMask)); 608 __ tst(reg, Operand(kSmiTagMask));
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
811 __ ldrd(R2, SP, 2 * kWordSize); 814 __ ldrd(R2, SP, 2 * kWordSize);
812 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 815 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
813 __ ldrd(R4, SP, 0 * kWordSize); 816 __ ldrd(R4, SP, 0 * kWordSize);
814 __ SmiUntag(R5); 817 __ SmiUntag(R5);
815 // R0 = n ~/ _DIGIT_BITS 818 // R0 = n ~/ _DIGIT_BITS
816 __ Asr(R0, R5, Operand(5)); 819 __ Asr(R0, R5, Operand(5));
817 // R6 = &x_digits[0] 820 // R6 = &x_digits[0]
818 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 821 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
819 // R7 = &x_digits[x_used] 822 // R7 = &x_digits[x_used]
820 __ add(R7, R6, Operand(R2, LSL, 1)); 823 __ add(R7, R6, Operand(R2, LSL, 1));
821 // R9 = &r_digits[1] 824 // R10 = &r_digits[1]
822 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag + 825 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
823 Bigint::kBytesPerDigit)); 826 Bigint::kBytesPerDigit));
824 // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 827 // R10 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
825 __ add(R0, R0, Operand(R2, ASR, 1)); 828 __ add(R0, R0, Operand(R2, ASR, 1));
826 __ add(R9, R9, Operand(R0, LSL, 2)); 829 __ add(R10, R10, Operand(R0, LSL, 2));
827 // R3 = n % _DIGIT_BITS 830 // R3 = n % _DIGIT_BITS
828 __ and_(R3, R5, Operand(31)); 831 __ and_(R3, R5, Operand(31));
829 // R2 = 32 - R3 832 // R2 = 32 - R3
830 __ rsb(R2, R3, Operand(32)); 833 __ rsb(R2, R3, Operand(32));
831 __ mov(R1, Operand(0)); 834 __ mov(R1, Operand(0));
832 Label loop; 835 Label loop;
833 __ Bind(&loop); 836 __ Bind(&loop);
834 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 837 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
835 __ orr(R1, R1, Operand(R0, LSR, R2)); 838 __ orr(R1, R1, Operand(R0, LSR, R2));
836 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); 839 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex));
837 __ mov(R1, Operand(R0, LSL, R3)); 840 __ mov(R1, Operand(R0, LSL, R3));
838 __ teq(R7, Operand(R6)); 841 __ teq(R7, Operand(R6));
839 __ b(&loop, NE); 842 __ b(&loop, NE);
840 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex)); 843 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex));
841 // Returning Object::null() is not required, since this method is private. 844 // Returning Object::null() is not required, since this method is private.
842 __ Ret(); 845 __ Ret();
843 } 846 }
844 847
845 848
846 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 849 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
847 // static void _lsh(Uint32List x_digits, int x_used, int n, 850 // static void _lsh(Uint32List x_digits, int x_used, int n,
848 // Uint32List r_digits) 851 // Uint32List r_digits)
849 852
850 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 853 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
851 __ ldrd(R2, SP, 2 * kWordSize); 854 __ ldrd(R2, SP, 2 * kWordSize);
852 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 855 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
853 __ ldrd(R4, SP, 0 * kWordSize); 856 __ ldrd(R4, SP, 0 * kWordSize);
854 __ SmiUntag(R5); 857 __ SmiUntag(R5);
855 // R0 = n ~/ _DIGIT_BITS 858 // R0 = n ~/ _DIGIT_BITS
856 __ Asr(R0, R5, Operand(5)); 859 __ Asr(R0, R5, Operand(5));
857 // R9 = &r_digits[0] 860 // R10 = &r_digits[0]
858 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); 861 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
859 // R7 = &x_digits[n ~/ _DIGIT_BITS] 862 // R7 = &x_digits[n ~/ _DIGIT_BITS]
860 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 863 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
861 __ add(R7, R7, Operand(R0, LSL, 2)); 864 __ add(R7, R7, Operand(R0, LSL, 2));
862 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 865 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
863 __ add(R0, R0, Operand(1)); 866 __ add(R0, R0, Operand(1));
864 __ rsb(R0, R0, Operand(R2, ASR, 1)); 867 __ rsb(R0, R0, Operand(R2, ASR, 1));
865 __ add(R6, R9, Operand(R0, LSL, 2)); 868 __ add(R6, R10, Operand(R0, LSL, 2));
866 // R3 = n % _DIGIT_BITS 869 // R3 = n % _DIGIT_BITS
867 __ and_(R3, R5, Operand(31)); 870 __ and_(R3, R5, Operand(31));
868 // R2 = 32 - R3 871 // R2 = 32 - R3
869 __ rsb(R2, R3, Operand(32)); 872 __ rsb(R2, R3, Operand(32));
870 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 873 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
871 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 874 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
872 __ mov(R1, Operand(R1, LSR, R3)); 875 __ mov(R1, Operand(R1, LSR, R3));
873 Label loop_entry; 876 Label loop_entry;
874 __ b(&loop_entry); 877 __ b(&loop_entry);
875 Label loop; 878 Label loop;
876 __ Bind(&loop); 879 __ Bind(&loop);
877 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 880 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
878 __ orr(R1, R1, Operand(R0, LSL, R2)); 881 __ orr(R1, R1, Operand(R0, LSL, R2));
879 __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex)); 882 __ str(R1, Address(R10, Bigint::kBytesPerDigit, Address::PostIndex));
880 __ mov(R1, Operand(R0, LSR, R3)); 883 __ mov(R1, Operand(R0, LSR, R3));
881 __ Bind(&loop_entry); 884 __ Bind(&loop_entry);
882 __ teq(R9, Operand(R6)); 885 __ teq(R10, Operand(R6));
883 __ b(&loop, NE); 886 __ b(&loop, NE);
884 __ str(R1, Address(R9, 0)); 887 __ str(R1, Address(R10, 0));
885 // Returning Object::null() is not required, since this method is private. 888 // Returning Object::null() is not required, since this method is private.
886 __ Ret(); 889 __ Ret();
887 } 890 }
888 891
889 892
890 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 893 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
891 // static void _absAdd(Uint32List digits, int used, 894 // static void _absAdd(Uint32List digits, int used,
892 // Uint32List a_digits, int a_used, 895 // Uint32List a_digits, int a_used,
893 // Uint32List r_digits) 896 // Uint32List r_digits)
894 897
895 // R2 = used, R3 = digits 898 // R2 = used, R3 = digits
896 __ ldrd(R2, SP, 3 * kWordSize); 899 __ ldrd(R2, SP, 3 * kWordSize);
897 // R3 = &digits[0] 900 // R3 = &digits[0]
898 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 901 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
899 902
900 // R4 = a_used, R5 = a_digits 903 // R4 = a_used, R5 = a_digits
901 __ ldrd(R4, SP, 1 * kWordSize); 904 __ ldrd(R4, SP, 1 * kWordSize);
902 // R5 = &a_digits[0] 905 // R5 = &a_digits[0]
903 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 906 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
904 907
905 // R6 = r_digits 908 // R6 = r_digits
906 __ ldr(R6, Address(SP, 0 * kWordSize)); 909 __ ldr(R6, Address(SP, 0 * kWordSize));
907 // R6 = &r_digits[0] 910 // R6 = &r_digits[0]
908 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 911 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
909 912
910 // R7 = &digits[a_used >> 1], a_used is Smi. 913 // R7 = &digits[a_used >> 1], a_used is Smi.
911 __ add(R7, R3, Operand(R4, LSL, 1)); 914 __ add(R7, R3, Operand(R4, LSL, 1));
912 915
913 // R9 = &digits[used >> 1], used is Smi. 916 // R10 = &digits[used >> 1], used is Smi.
914 __ add(R9, R3, Operand(R2, LSL, 1)); 917 __ add(R10, R3, Operand(R2, LSL, 1));
915 918
916 __ adds(R0, R0, Operand(0)); // carry flag = 0 919 __ adds(R0, R0, Operand(0)); // carry flag = 0
917 Label add_loop; 920 Label add_loop;
918 __ Bind(&add_loop); 921 __ Bind(&add_loop);
919 // Loop a_used times, a_used > 0. 922 // Loop a_used times, a_used > 0.
920 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 923 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
921 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 924 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
922 __ adcs(R0, R0, Operand(R1)); 925 __ adcs(R0, R0, Operand(R1));
923 __ teq(R3, Operand(R7)); // Does not affect carry flag. 926 __ teq(R3, Operand(R7)); // Does not affect carry flag.
924 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 927 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
925 __ b(&add_loop, NE); 928 __ b(&add_loop, NE);
926 929
927 Label last_carry; 930 Label last_carry;
928 __ teq(R3, Operand(R9)); // Does not affect carry flag. 931 __ teq(R3, Operand(R10)); // Does not affect carry flag.
929 __ b(&last_carry, EQ); // If used - a_used == 0. 932 __ b(&last_carry, EQ); // If used - a_used == 0.
930 933
931 Label carry_loop; 934 Label carry_loop;
932 __ Bind(&carry_loop); 935 __ Bind(&carry_loop);
933 // Loop used - a_used times, used - a_used > 0. 936 // Loop used - a_used times, used - a_used > 0.
934 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 937 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
935 __ adcs(R0, R0, Operand(0)); 938 __ adcs(R0, R0, Operand(0));
936 __ teq(R3, Operand(R9)); // Does not affect carry flag. 939 __ teq(R3, Operand(R10)); // Does not affect carry flag.
937 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 940 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
938 __ b(&carry_loop, NE); 941 __ b(&carry_loop, NE);
939 942
940 __ Bind(&last_carry); 943 __ Bind(&last_carry);
941 __ mov(R0, Operand(0)); 944 __ mov(R0, Operand(0));
942 __ adc(R0, R0, Operand(0)); 945 __ adc(R0, R0, Operand(0));
943 __ str(R0, Address(R6, 0)); 946 __ str(R0, Address(R6, 0));
944 947
945 // Returning Object::null() is not required, since this method is private. 948 // Returning Object::null() is not required, since this method is private.
946 __ Ret(); 949 __ Ret();
(...skipping 16 matching lines...) Expand all
963 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 966 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
964 967
965 // R6 = r_digits 968 // R6 = r_digits
966 __ ldr(R6, Address(SP, 0 * kWordSize)); 969 __ ldr(R6, Address(SP, 0 * kWordSize));
967 // R6 = &r_digits[0] 970 // R6 = &r_digits[0]
968 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 971 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
969 972
970 // R7 = &digits[a_used >> 1], a_used is Smi. 973 // R7 = &digits[a_used >> 1], a_used is Smi.
971 __ add(R7, R3, Operand(R4, LSL, 1)); 974 __ add(R7, R3, Operand(R4, LSL, 1));
972 975
973 // R9 = &digits[used >> 1], used is Smi. 976 // R10 = &digits[used >> 1], used is Smi.
974 __ add(R9, R3, Operand(R2, LSL, 1)); 977 __ add(R10, R3, Operand(R2, LSL, 1));
975 978
976 __ subs(R0, R0, Operand(0)); // carry flag = 1 979 __ subs(R0, R0, Operand(0)); // carry flag = 1
977 Label sub_loop; 980 Label sub_loop;
978 __ Bind(&sub_loop); 981 __ Bind(&sub_loop);
979 // Loop a_used times, a_used > 0. 982 // Loop a_used times, a_used > 0.
980 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 983 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
981 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 984 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
982 __ sbcs(R0, R0, Operand(R1)); 985 __ sbcs(R0, R0, Operand(R1));
983 __ teq(R3, Operand(R7)); // Does not affect carry flag. 986 __ teq(R3, Operand(R7)); // Does not affect carry flag.
984 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 987 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
985 __ b(&sub_loop, NE); 988 __ b(&sub_loop, NE);
986 989
987 Label done; 990 Label done;
988 __ teq(R3, Operand(R9)); // Does not affect carry flag. 991 __ teq(R3, Operand(R10)); // Does not affect carry flag.
989 __ b(&done, EQ); // If used - a_used == 0. 992 __ b(&done, EQ); // If used - a_used == 0.
990 993
991 Label carry_loop; 994 Label carry_loop;
992 __ Bind(&carry_loop); 995 __ Bind(&carry_loop);
993 // Loop used - a_used times, used - a_used > 0. 996 // Loop used - a_used times, used - a_used > 0.
994 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 997 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
995 __ sbcs(R0, R0, Operand(0)); 998 __ sbcs(R0, R0, Operand(0));
996 __ teq(R3, Operand(R9)); // Does not affect carry flag. 999 __ teq(R3, Operand(R10)); // Does not affect carry flag.
997 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 1000 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
998 __ b(&carry_loop, NE); 1001 __ b(&carry_loop, NE);
999 1002
1000 __ Bind(&done); 1003 __ Bind(&done);
1001 // Returning Object::null() is not required, since this method is private. 1004 // Returning Object::null() is not required, since this method is private.
1002 __ Ret(); 1005 __ Ret();
1003 } 1006 }
1004 1007
1005 1008
1006 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 1009 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1155 1158
1156 // *ajp++ = low32(t) = R0 1159 // *ajp++ = low32(t) = R0
1157 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1160 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1158 1161
1159 // R6 = low32(c) = high32(t) 1162 // R6 = low32(c) = high32(t)
1160 // R7 = high32(c) = 0 1163 // R7 = high32(c) = 0
1161 __ mov(R7, Operand(0)); 1164 __ mov(R7, Operand(0));
1162 1165
1163 // int n = used - i - 1; while (--n >= 0) ... 1166 // int n = used - i - 1; while (--n >= 0) ...
1164 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1167 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1165 __ sub(R9, R0, Operand(R2)); 1168 __ sub(R10, R0, Operand(R2));
1166 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1169 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1167 __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize)); 1170 __ rsbs(R10, R0, Operand(R10, ASR, kSmiTagSize));
1168 1171
1169 Label loop, done; 1172 Label loop, done;
1170 __ b(&done, MI); 1173 __ b(&done, MI);
1171 1174
1172 __ Bind(&loop); 1175 __ Bind(&loop);
1173 // x: R3 1176 // x: R3
1174 // xip: R4 1177 // xip: R4
1175 // ajp: R5 1178 // ajp: R5
1176 // c: R7:R6 1179 // c: R7:R6
1177 // t: R2:R1:R0 (not live at loop entry) 1180 // t: R2:R1:R0 (not live at loop entry)
1178 // n: R9 1181 // n: R10
1179 1182
1180 // uint32_t xi = *xip++ 1183 // uint32_t xi = *xip++
1181 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1184 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1182 1185
1183 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c 1186 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c
1184 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1187 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1185 __ adds(R0, R0, Operand(R0)); 1188 __ adds(R0, R0, Operand(R0));
1186 __ adcs(R1, R1, Operand(R1)); 1189 __ adcs(R1, R1, Operand(R1));
1187 __ mov(R2, Operand(0)); 1190 __ mov(R2, Operand(0));
1188 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1189 __ adds(R0, R0, Operand(R6)); 1192 __ adds(R0, R0, Operand(R6));
1190 __ adcs(R1, R1, Operand(R7)); 1193 __ adcs(R1, R1, Operand(R7));
1191 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1194 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1192 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. 1195 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp.
1193 __ adds(R0, R0, Operand(R6)); 1196 __ adds(R0, R0, Operand(R6));
1194 __ adcs(R6, R1, Operand(0)); 1197 __ adcs(R6, R1, Operand(0));
1195 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. 1198 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj.
1196 1199
1197 // *ajp++ = low32(t) = R0 1200 // *ajp++ = low32(t) = R0
1198 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1201 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1199 1202
1200 // while (--n >= 0) 1203 // while (--n >= 0)
1201 __ subs(R9, R9, Operand(1)); // --n 1204 __ subs(R10, R10, Operand(1)); // --n
1202 __ b(&loop, PL); 1205 __ b(&loop, PL);
1203 1206
1204 __ Bind(&done); 1207 __ Bind(&done);
1205 // uint32_t aj = *ajp 1208 // uint32_t aj = *ajp
1206 __ ldr(R0, Address(R5, 0)); 1209 __ ldr(R0, Address(R5, 0));
1207 1210
1208 // uint64_t t = aj + c 1211 // uint64_t t = aj + c
1209 __ adds(R6, R6, Operand(R0)); 1212 __ adds(R6, R6, Operand(R0));
1210 __ adc(R7, R7, Operand(0)); 1213 __ adc(R7, R7, Operand(0));
1211 1214
(...skipping 794 matching lines...) Expand 10 before | Expand all | Expand 10 after
2006 __ LoadClassId(R1, R1); 2009 __ LoadClassId(R1, R1);
2007 __ AddImmediate(R1, R1, -kOneByteStringCid); 2010 __ AddImmediate(R1, R1, -kOneByteStringCid);
2008 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); 2011 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
2009 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); 2012 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid)));
2010 2013
2011 // Registers are now set up for the lazy compile stub. It expects the function 2014 // Registers are now set up for the lazy compile stub. It expects the function
2012 // in R0, the argument descriptor in R4, and IC-Data in R5. 2015 // in R0, the argument descriptor in R4, and IC-Data in R5.
2013 __ eor(R5, R5, Operand(R5)); 2016 __ eor(R5, R5, Operand(R5));
2014 2017
2015 // Tail-call the function. 2018 // Tail-call the function.
2019 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2016 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); 2020 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
2017 __ bx(R1); 2021 __ bx(R1);
2018 } 2022 }
2019 2023
2020 2024
2021 // On stack: user tag (+0). 2025 // On stack: user tag (+0).
2022 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 2026 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
2023 // R1: Isolate. 2027 // R1: Isolate.
2024 __ LoadIsolate(R1); 2028 __ LoadIsolate(R1);
2025 // R0: Current user tag. 2029 // R0: Current user tag.
(...skipping 19 matching lines...) Expand all
2045 2049
2046 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2050 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2047 __ LoadIsolate(R0); 2051 __ LoadIsolate(R0);
2048 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2052 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2049 __ Ret(); 2053 __ Ret();
2050 } 2054 }
2051 2055
2052 } // namespace dart 2056 } // namespace dart
2053 2057
2054 #endif // defined TARGET_ARCH_ARM 2058 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698