Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(78)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1156593002: Cache current thread in a reserved register and use it in LoadIsolate (Closed) Base URL: https://github.com/dart-lang/sdk.git@master
Patch Set: Added more comments. Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
11 #include "vm/cpu.h" 11 #include "vm/cpu.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/flow_graph_compiler.h" 13 #include "vm/flow_graph_compiler.h"
14 #include "vm/object.h" 14 #include "vm/object.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/regexp_assembler.h" 16 #include "vm/regexp_assembler.h"
17 #include "vm/symbols.h" 17 #include "vm/symbols.h"
18 18
19 namespace dart { 19 namespace dart {
20 20
21 // When entering intrinsics code: 21 // When entering intrinsics code:
22 // R5: IC Data 22 // R5: IC Data
23 // R4: Arguments descriptor 23 // R4: Arguments descriptor
24 // LR: Return address 24 // LR: Return address
25 // The R5, R4 registers can be destroyed only if there is no slow-path, i.e. 25 // The R5, R4 registers can be destroyed only if there is no slow-path, i.e.
26 // if the intrinsified method always executes a return. 26 // if the intrinsified method always executes a return.
27 // The FP register should not be modified, because it is used by the profiler. 27 // The FP register should not be modified, because it is used by the profiler.
srdjan 2015/05/26 20:10:44 Mention THR(R8) as not allowed to be modified,
koda 2015/05/26 20:29:12 Done.
28 28
29 #define __ assembler-> 29 #define __ assembler->
30 30
31 31
32 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; } 32 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; }
33 33
34 34
35 static intptr_t ComputeObjectArrayTypeArgumentsOffset() { 35 static intptr_t ComputeObjectArrayTypeArgumentsOffset() {
36 const Library& core_lib = Library::Handle(Library::CoreLibrary()); 36 const Library& core_lib = Library::Handle(Library::CoreLibrary());
37 const Class& cls = Class::Handle( 37 const Class& cls = Class::Handle(
(...skipping 531 matching lines...) Expand 10 before | Expand all | Expand 10 after
569 __ b(&fall_through, LT); 569 __ b(&fall_through, LT);
570 __ SmiUntag(R1); 570 __ SmiUntag(R1);
571 571
572 // Pull off high bits that will be shifted off of R1 by making a mask 572 // Pull off high bits that will be shifted off of R1 by making a mask
573 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 573 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
574 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 574 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
575 // lo bits = R1 << R0 575 // lo bits = R1 << R0
576 __ LoadImmediate(R7, 1); 576 __ LoadImmediate(R7, 1);
577 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 577 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
578 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 578 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
579 __ rsb(R8, R0, Operand(32)); // R8 <- 32 - R0 579 __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0
580 __ mov(R7, Operand(R7, LSL, R8)); // R7 <- R7 << R8 580 __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9
581 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 581 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
582 __ mov(R7, Operand(R7, LSR, R8)); // R7 <- R7 >> R8 582 __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9
583 // Now R7 has the bits that fall off of R1 on a left shift. 583 // Now R7 has the bits that fall off of R1 on a left shift.
584 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 584 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
585 585
586 const Class& mint_class = Class::Handle( 586 const Class& mint_class = Class::Handle(
587 Isolate::Current()->object_store()->mint_class()); 587 Isolate::Current()->object_store()->mint_class());
588 __ TryAllocate(mint_class, &fall_through, R0, R2); 588 __ TryAllocate(mint_class, &fall_through, R0, R2);
589 589
590 590
591 __ str(R1, FieldAddress(R0, Mint::value_offset())); 591 __ str(R1, FieldAddress(R0, Mint::value_offset()));
592 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 592 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
810 __ ldrd(R2, SP, 2 * kWordSize); 810 __ ldrd(R2, SP, 2 * kWordSize);
811 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 811 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
812 __ ldrd(R4, SP, 0 * kWordSize); 812 __ ldrd(R4, SP, 0 * kWordSize);
813 __ SmiUntag(R5); 813 __ SmiUntag(R5);
814 // R0 = n ~/ _DIGIT_BITS 814 // R0 = n ~/ _DIGIT_BITS
815 __ Asr(R0, R5, Operand(5)); 815 __ Asr(R0, R5, Operand(5));
816 // R6 = &x_digits[0] 816 // R6 = &x_digits[0]
817 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 817 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
818 // R7 = &x_digits[x_used] 818 // R7 = &x_digits[x_used]
819 __ add(R7, R6, Operand(R2, LSL, 1)); 819 __ add(R7, R6, Operand(R2, LSL, 1));
820 // R8 = &r_digits[1] 820 // R9 = &r_digits[1]
821 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag + 821 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
822 Bigint::kBytesPerDigit)); 822 Bigint::kBytesPerDigit));
823 // R8 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 823 // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
824 __ add(R0, R0, Operand(R2, ASR, 1)); 824 __ add(R0, R0, Operand(R2, ASR, 1));
825 __ add(R8, R8, Operand(R0, LSL, 2)); 825 __ add(R9, R9, Operand(R0, LSL, 2));
826 // R3 = n % _DIGIT_BITS 826 // R3 = n % _DIGIT_BITS
827 __ and_(R3, R5, Operand(31)); 827 __ and_(R3, R5, Operand(31));
828 // R2 = 32 - R3 828 // R2 = 32 - R3
829 __ rsb(R2, R3, Operand(32)); 829 __ rsb(R2, R3, Operand(32));
830 __ mov(R1, Operand(0)); 830 __ mov(R1, Operand(0));
831 Label loop; 831 Label loop;
832 __ Bind(&loop); 832 __ Bind(&loop);
833 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 833 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
834 __ orr(R1, R1, Operand(R0, LSR, R2)); 834 __ orr(R1, R1, Operand(R0, LSR, R2));
835 __ str(R1, Address(R8, -Bigint::kBytesPerDigit, Address::PreIndex)); 835 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
836 __ mov(R1, Operand(R0, LSL, R3)); 836 __ mov(R1, Operand(R0, LSL, R3));
837 __ teq(R7, Operand(R6)); 837 __ teq(R7, Operand(R6));
838 __ b(&loop, NE); 838 __ b(&loop, NE);
839 __ str(R1, Address(R8, -Bigint::kBytesPerDigit, Address::PreIndex)); 839 __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
840 // Returning Object::null() is not required, since this method is private. 840 // Returning Object::null() is not required, since this method is private.
841 __ Ret(); 841 __ Ret();
842 } 842 }
843 843
844 844
845 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 845 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
846 // static void _lsh(Uint32List x_digits, int x_used, int n, 846 // static void _lsh(Uint32List x_digits, int x_used, int n,
847 // Uint32List r_digits) 847 // Uint32List r_digits)
848 848
849 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 849 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
850 __ ldrd(R2, SP, 2 * kWordSize); 850 __ ldrd(R2, SP, 2 * kWordSize);
851 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 851 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
852 __ ldrd(R4, SP, 0 * kWordSize); 852 __ ldrd(R4, SP, 0 * kWordSize);
853 __ SmiUntag(R5); 853 __ SmiUntag(R5);
854 // R0 = n ~/ _DIGIT_BITS 854 // R0 = n ~/ _DIGIT_BITS
855 __ Asr(R0, R5, Operand(5)); 855 __ Asr(R0, R5, Operand(5));
856 // R8 = &r_digits[0] 856 // R9 = &r_digits[0]
857 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); 857 __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
858 // R7 = &x_digits[n ~/ _DIGIT_BITS] 858 // R7 = &x_digits[n ~/ _DIGIT_BITS]
859 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 859 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
860 __ add(R7, R7, Operand(R0, LSL, 2)); 860 __ add(R7, R7, Operand(R0, LSL, 2));
861 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 861 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
862 __ add(R0, R0, Operand(1)); 862 __ add(R0, R0, Operand(1));
863 __ rsb(R0, R0, Operand(R2, ASR, 1)); 863 __ rsb(R0, R0, Operand(R2, ASR, 1));
864 __ add(R6, R8, Operand(R0, LSL, 2)); 864 __ add(R6, R9, Operand(R0, LSL, 2));
865 // R3 = n % _DIGIT_BITS 865 // R3 = n % _DIGIT_BITS
866 __ and_(R3, R5, Operand(31)); 866 __ and_(R3, R5, Operand(31));
867 // R2 = 32 - R3 867 // R2 = 32 - R3
868 __ rsb(R2, R3, Operand(32)); 868 __ rsb(R2, R3, Operand(32));
869 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 869 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
870 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 870 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
871 __ mov(R1, Operand(R1, LSR, R3)); 871 __ mov(R1, Operand(R1, LSR, R3));
872 Label loop_entry; 872 Label loop_entry;
873 __ b(&loop_entry); 873 __ b(&loop_entry);
874 Label loop; 874 Label loop;
875 __ Bind(&loop); 875 __ Bind(&loop);
876 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 876 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
877 __ orr(R1, R1, Operand(R0, LSL, R2)); 877 __ orr(R1, R1, Operand(R0, LSL, R2));
878 __ str(R1, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); 878 __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
879 __ mov(R1, Operand(R0, LSR, R3)); 879 __ mov(R1, Operand(R0, LSR, R3));
880 __ Bind(&loop_entry); 880 __ Bind(&loop_entry);
881 __ teq(R8, Operand(R6)); 881 __ teq(R9, Operand(R6));
882 __ b(&loop, NE); 882 __ b(&loop, NE);
883 __ str(R1, Address(R8, 0)); 883 __ str(R1, Address(R9, 0));
884 // Returning Object::null() is not required, since this method is private. 884 // Returning Object::null() is not required, since this method is private.
885 __ Ret(); 885 __ Ret();
886 } 886 }
887 887
888 888
889 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 889 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
890 // static void _absAdd(Uint32List digits, int used, 890 // static void _absAdd(Uint32List digits, int used,
891 // Uint32List a_digits, int a_used, 891 // Uint32List a_digits, int a_used,
892 // Uint32List r_digits) 892 // Uint32List r_digits)
893 893
894 // R2 = used, R3 = digits 894 // R2 = used, R3 = digits
895 __ ldrd(R2, SP, 3 * kWordSize); 895 __ ldrd(R2, SP, 3 * kWordSize);
896 // R3 = &digits[0] 896 // R3 = &digits[0]
897 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 897 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
898 898
899 // R4 = a_used, R5 = a_digits 899 // R4 = a_used, R5 = a_digits
900 __ ldrd(R4, SP, 1 * kWordSize); 900 __ ldrd(R4, SP, 1 * kWordSize);
901 // R5 = &a_digits[0] 901 // R5 = &a_digits[0]
902 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 902 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
903 903
904 // R6 = r_digits 904 // R6 = r_digits
905 __ ldr(R6, Address(SP, 0 * kWordSize)); 905 __ ldr(R6, Address(SP, 0 * kWordSize));
906 // R6 = &r_digits[0] 906 // R6 = &r_digits[0]
907 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 907 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
908 908
909 // R7 = &digits[a_used >> 1], a_used is Smi. 909 // R7 = &digits[a_used >> 1], a_used is Smi.
910 __ add(R7, R3, Operand(R4, LSL, 1)); 910 __ add(R7, R3, Operand(R4, LSL, 1));
911 911
912 // R8 = &digits[used >> 1], used is Smi. 912 // R9 = &digits[used >> 1], used is Smi.
913 __ add(R8, R3, Operand(R2, LSL, 1)); 913 __ add(R9, R3, Operand(R2, LSL, 1));
914 914
915 __ adds(R0, R0, Operand(0)); // carry flag = 0 915 __ adds(R0, R0, Operand(0)); // carry flag = 0
916 Label add_loop; 916 Label add_loop;
917 __ Bind(&add_loop); 917 __ Bind(&add_loop);
918 // Loop a_used times, a_used > 0. 918 // Loop a_used times, a_used > 0.
919 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 919 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
920 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 920 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
921 __ adcs(R0, R0, Operand(R1)); 921 __ adcs(R0, R0, Operand(R1));
922 __ teq(R3, Operand(R7)); // Does not affect carry flag. 922 __ teq(R3, Operand(R7)); // Does not affect carry flag.
923 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 923 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
924 __ b(&add_loop, NE); 924 __ b(&add_loop, NE);
925 925
926 Label last_carry; 926 Label last_carry;
927 __ teq(R3, Operand(R8)); // Does not affect carry flag. 927 __ teq(R3, Operand(R9)); // Does not affect carry flag.
928 __ b(&last_carry, EQ); // If used - a_used == 0. 928 __ b(&last_carry, EQ); // If used - a_used == 0.
929 929
930 Label carry_loop; 930 Label carry_loop;
931 __ Bind(&carry_loop); 931 __ Bind(&carry_loop);
932 // Loop used - a_used times, used - a_used > 0. 932 // Loop used - a_used times, used - a_used > 0.
933 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 933 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
934 __ adcs(R0, R0, Operand(0)); 934 __ adcs(R0, R0, Operand(0));
935 __ teq(R3, Operand(R8)); // Does not affect carry flag. 935 __ teq(R3, Operand(R9)); // Does not affect carry flag.
936 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 936 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
937 __ b(&carry_loop, NE); 937 __ b(&carry_loop, NE);
938 938
939 __ Bind(&last_carry); 939 __ Bind(&last_carry);
940 __ mov(R0, Operand(0)); 940 __ mov(R0, Operand(0));
941 __ adc(R0, R0, Operand(0)); 941 __ adc(R0, R0, Operand(0));
942 __ str(R0, Address(R6, 0)); 942 __ str(R0, Address(R6, 0));
943 943
944 // Returning Object::null() is not required, since this method is private. 944 // Returning Object::null() is not required, since this method is private.
945 __ Ret(); 945 __ Ret();
(...skipping 16 matching lines...) Expand all
962 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 962 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag));
963 963
964 // R6 = r_digits 964 // R6 = r_digits
965 __ ldr(R6, Address(SP, 0 * kWordSize)); 965 __ ldr(R6, Address(SP, 0 * kWordSize));
966 // R6 = &r_digits[0] 966 // R6 = &r_digits[0]
967 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag)); 967 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
968 968
969 // R7 = &digits[a_used >> 1], a_used is Smi. 969 // R7 = &digits[a_used >> 1], a_used is Smi.
970 __ add(R7, R3, Operand(R4, LSL, 1)); 970 __ add(R7, R3, Operand(R4, LSL, 1));
971 971
972 // R8 = &digits[used >> 1], used is Smi. 972 // R9 = &digits[used >> 1], used is Smi.
973 __ add(R8, R3, Operand(R2, LSL, 1)); 973 __ add(R9, R3, Operand(R2, LSL, 1));
974 974
975 __ subs(R0, R0, Operand(0)); // carry flag = 1 975 __ subs(R0, R0, Operand(0)); // carry flag = 1
976 Label sub_loop; 976 Label sub_loop;
977 __ Bind(&sub_loop); 977 __ Bind(&sub_loop);
978 // Loop a_used times, a_used > 0. 978 // Loop a_used times, a_used > 0.
979 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 979 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
980 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 980 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
981 __ sbcs(R0, R0, Operand(R1)); 981 __ sbcs(R0, R0, Operand(R1));
982 __ teq(R3, Operand(R7)); // Does not affect carry flag. 982 __ teq(R3, Operand(R7)); // Does not affect carry flag.
983 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 983 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
984 __ b(&sub_loop, NE); 984 __ b(&sub_loop, NE);
985 985
986 Label done; 986 Label done;
987 __ teq(R3, Operand(R8)); // Does not affect carry flag. 987 __ teq(R3, Operand(R9)); // Does not affect carry flag.
988 __ b(&done, EQ); // If used - a_used == 0. 988 __ b(&done, EQ); // If used - a_used == 0.
989 989
990 Label carry_loop; 990 Label carry_loop;
991 __ Bind(&carry_loop); 991 __ Bind(&carry_loop);
992 // Loop used - a_used times, used - a_used > 0. 992 // Loop used - a_used times, used - a_used > 0.
993 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 993 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
994 __ sbcs(R0, R0, Operand(0)); 994 __ sbcs(R0, R0, Operand(0));
995 __ teq(R3, Operand(R8)); // Does not affect carry flag. 995 __ teq(R3, Operand(R9)); // Does not affect carry flag.
996 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 996 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
997 __ b(&carry_loop, NE); 997 __ b(&carry_loop, NE);
998 998
999 __ Bind(&done); 999 __ Bind(&done);
1000 // Returning Object::null() is not required, since this method is private. 1000 // Returning Object::null() is not required, since this method is private.
1001 __ Ret(); 1001 __ Ret();
1002 } 1002 }
1003 1003
1004 1004
1005 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 1005 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 1154
1155 // *ajp++ = low32(t) = R0 1155 // *ajp++ = low32(t) = R0
1156 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1156 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1157 1157
1158 // R6 = low32(c) = high32(t) 1158 // R6 = low32(c) = high32(t)
1159 // R7 = high32(c) = 0 1159 // R7 = high32(c) = 0
1160 __ mov(R7, Operand(0)); 1160 __ mov(R7, Operand(0));
1161 1161
1162 // int n = used - i - 1; while (--n >= 0) ... 1162 // int n = used - i - 1; while (--n >= 0) ...
1163 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1163 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1164 __ sub(R8, R0, Operand(R2)); 1164 __ sub(R9, R0, Operand(R2));
1165 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1165 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1166 __ rsbs(R8, R0, Operand(R8, ASR, kSmiTagSize)); 1166 __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize));
1167 1167
1168 Label loop, done; 1168 Label loop, done;
1169 __ b(&done, MI); 1169 __ b(&done, MI);
1170 1170
1171 __ Bind(&loop); 1171 __ Bind(&loop);
1172 // x: R3 1172 // x: R3
1173 // xip: R4 1173 // xip: R4
1174 // ajp: R5 1174 // ajp: R5
1175 // c: R7:R6 1175 // c: R7:R6
1176 // t: R2:R1:R0 (not live at loop entry) 1176 // t: R2:R1:R0 (not live at loop entry)
1177 // n: R8 1177 // n: R9
1178 1178
1179 // uint32_t xi = *xip++ 1179 // uint32_t xi = *xip++
1180 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1180 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1181 1181
1182 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c 1182 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c
1183 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1183 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1184 __ adds(R0, R0, Operand(R0)); 1184 __ adds(R0, R0, Operand(R0));
1185 __ adcs(R1, R1, Operand(R1)); 1185 __ adcs(R1, R1, Operand(R1));
1186 __ mov(R2, Operand(0)); 1186 __ mov(R2, Operand(0));
1187 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1187 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1188 __ adds(R0, R0, Operand(R6)); 1188 __ adds(R0, R0, Operand(R6));
1189 __ adcs(R1, R1, Operand(R7)); 1189 __ adcs(R1, R1, Operand(R7));
1190 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1190 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1191 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. 1191 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp.
1192 __ adds(R0, R0, Operand(R6)); 1192 __ adds(R0, R0, Operand(R6));
1193 __ adcs(R6, R1, Operand(0)); 1193 __ adcs(R6, R1, Operand(0));
1194 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. 1194 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj.
1195 1195
1196 // *ajp++ = low32(t) = R0 1196 // *ajp++ = low32(t) = R0
1197 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1197 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
1198 1198
1199 // while (--n >= 0) 1199 // while (--n >= 0)
1200 __ subs(R8, R8, Operand(1)); // --n 1200 __ subs(R9, R9, Operand(1)); // --n
1201 __ b(&loop, PL); 1201 __ b(&loop, PL);
1202 1202
1203 __ Bind(&done); 1203 __ Bind(&done);
1204 // uint32_t aj = *ajp 1204 // uint32_t aj = *ajp
1205 __ ldr(R0, Address(R5, 0)); 1205 __ ldr(R0, Address(R5, 0));
1206 1206
1207 // uint64_t t = aj + c 1207 // uint64_t t = aj + c
1208 __ adds(R6, R6, Operand(R0)); 1208 __ adds(R6, R6, Operand(R0));
1209 __ adc(R7, R7, Operand(0)); 1209 __ adc(R7, R7, Operand(0));
1210 1210
(...skipping 815 matching lines...) Expand 10 before | Expand all | Expand 10 after
2026 Isolate* isolate = Isolate::Current(); 2026 Isolate* isolate = Isolate::Current();
2027 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); 2027 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate));
2028 // Set return value to Isolate::current_tag_. 2028 // Set return value to Isolate::current_tag_.
2029 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); 2029 __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
2030 __ Ret(); 2030 __ Ret();
2031 } 2031 }
2032 2032
2033 } // namespace dart 2033 } // namespace dart
2034 2034
2035 #endif // defined TARGET_ARCH_ARM 2035 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698