Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(587)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 1419223003: Re-assign registers on ARM so PP and CODE_REG are below R7 (FP on iOS). (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
11 #include "vm/cpu.h" 11 #include "vm/cpu.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/flow_graph_compiler.h" 13 #include "vm/flow_graph_compiler.h"
14 #include "vm/object.h" 14 #include "vm/object.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/regexp_assembler.h" 16 #include "vm/regexp_assembler.h"
17 #include "vm/symbols.h" 17 #include "vm/symbols.h"
18 18
19 namespace dart { 19 namespace dart {
20 20
21 DECLARE_FLAG(bool, interpret_irregexp); 21 DECLARE_FLAG(bool, interpret_irregexp);
22 22
23 // When entering intrinsics code: 23 // When entering intrinsics code:
24 // R5: IC Data
25 // R4: Arguments descriptor 24 // R4: Arguments descriptor
26 // LR: Return address 25 // LR: Return address
27 // The R5, R4 registers can be destroyed only if there is no slow-path, i.e. 26 // The R4 register can be destroyed only if there is no slow-path, i.e.
28 // if the intrinsified method always executes a return. 27 // if the intrinsified method always executes a return.
29 // The FP register should not be modified, because it is used by the profiler. 28 // The FP register should not be modified, because it is used by the profiler.
30 // The THR register (see constants_arm.h) must be preserved. 29 // The PP and THR registers (see constants_arm.h) must be preserved.
31 30
32 #define __ assembler-> 31 #define __ assembler->
33 32
34 33
35 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; } 34 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; }
36 35
37 36
38 // Intrinsify only for Smi value and index. Non-smi values need a store buffer 37 // Intrinsify only for Smi value and index. Non-smi values need a store buffer
39 // update. Array length is always a Smi. 38 // update. Array length is always a Smi.
40 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { 39 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) {
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
209 __ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ 208 __ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
210 __ InitializeFieldNoBarrier(R0, \ 209 __ InitializeFieldNoBarrier(R0, \
211 FieldAddress(R0, type_name::length_offset()), \ 210 FieldAddress(R0, type_name::length_offset()), \
212 R3); \ 211 R3); \
213 /* Initialize all array elements to 0. */ \ 212 /* Initialize all array elements to 0. */ \
214 /* R0: new object start as a tagged pointer. */ \ 213 /* R0: new object start as a tagged pointer. */ \
215 /* R1: new object end address. */ \ 214 /* R1: new object end address. */ \
216 /* R2: allocation size. */ \ 215 /* R2: allocation size. */ \
217 /* R3: iterator which initially points to the start of the variable */ \ 216 /* R3: iterator which initially points to the start of the variable */ \
218 /* R4: allocation stats address */ \ 217 /* R4: allocation stats address */ \
219 /* R6, R7: zero. */ \ 218 /* R8, R9: zero. */ \
220 /* data area to be initialized. */ \ 219 /* data area to be initialized. */ \
221 __ LoadImmediate(R6, 0); \ 220 __ LoadImmediate(R8, 0); \
222 __ mov(R7, Operand(R6)); \ 221 __ mov(R9, Operand(R8)); \
223 __ AddImmediate(R3, R0, sizeof(Raw##type_name) - 1); \ 222 __ AddImmediate(R3, R0, sizeof(Raw##type_name) - 1); \
224 Label init_loop; \ 223 Label init_loop; \
225 __ Bind(&init_loop); \ 224 __ Bind(&init_loop); \
226 __ AddImmediate(R3, 2 * kWordSize); \ 225 __ AddImmediate(R3, 2 * kWordSize); \
227 __ cmp(R3, Operand(R1)); \ 226 __ cmp(R3, Operand(R1)); \
228 __ strd(R6, R7, R3, -2 * kWordSize, LS); \ 227 __ strd(R8, R9, R3, -2 * kWordSize, LS); \
229 __ b(&init_loop, CC); \ 228 __ b(&init_loop, CC); \
230 __ str(R6, Address(R3, -2 * kWordSize), HI); \ 229 __ str(R8, Address(R3, -2 * kWordSize), HI); \
231 \ 230 \
232 __ IncrementAllocationStatsWithSize(R4, R2, space); \ 231 __ IncrementAllocationStatsWithSize(R4, R2, space); \
233 __ Ret(); \ 232 __ Ret(); \
234 __ Bind(&fall_through); \ 233 __ Bind(&fall_through); \
235 234
236 235
237 static int GetScaleFactor(intptr_t size) { 236 static int GetScaleFactor(intptr_t size) {
238 switch (size) { 237 switch (size) {
239 case 1: return 0; 238 case 1: return 0;
240 case 2: return 1; 239 case 2: return 1;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
307 __ subs(R0, R1, Operand(R0)); // Subtract. 306 __ subs(R0, R1, Operand(R0)); // Subtract.
308 __ bx(LR, VC); // Return if no overflow. 307 __ bx(LR, VC); // Return if no overflow.
309 // Otherwise fall through. 308 // Otherwise fall through.
310 __ Bind(&fall_through); 309 __ Bind(&fall_through);
311 } 310 }
312 311
313 312
314 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { 313 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
315 Label fall_through; 314 Label fall_through;
316 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 315 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
317 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. 316 __ SmiUntag(R0); // Untags R0. We only want result shifted by one.
318 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. 317 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1.
319 __ cmp(IP, Operand(R0, ASR, 31)); 318 __ cmp(IP, Operand(R0, ASR, 31));
320 __ bx(LR, EQ); 319 __ bx(LR, EQ);
321 __ Bind(&fall_through); // Fall through on overflow. 320 __ Bind(&fall_through); // Fall through on overflow.
322 } 321 }
323 322
324 323
325 void Intrinsifier::Integer_mul(Assembler* assembler) { 324 void Intrinsifier::Integer_mul(Assembler* assembler) {
326 Integer_mulFromInteger(assembler); 325 Integer_mulFromInteger(assembler);
327 } 326 }
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
500 499
501 void Intrinsifier::Integer_bitXor(Assembler* assembler) { 500 void Intrinsifier::Integer_bitXor(Assembler* assembler) {
502 Integer_bitXorFromInteger(assembler); 501 Integer_bitXorFromInteger(assembler);
503 } 502 }
504 503
505 504
506 void Intrinsifier::Integer_shl(Assembler* assembler) { 505 void Intrinsifier::Integer_shl(Assembler* assembler) {
507 ASSERT(kSmiTagShift == 1); 506 ASSERT(kSmiTagShift == 1);
508 ASSERT(kSmiTag == 0); 507 ASSERT(kSmiTag == 0);
509 Label fall_through; 508 Label fall_through;
510 __ Push(R10); 509 __ Push(R6);
511 TestBothArgumentsSmis(assembler, &fall_through); 510 TestBothArgumentsSmis(assembler, &fall_through);
512 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); 511 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits));
513 __ b(&fall_through, HI); 512 __ b(&fall_through, HI);
514 513
515 __ SmiUntag(R0); 514 __ SmiUntag(R0);
516 515
517 // Check for overflow by shifting left and shifting back arithmetically. 516 // Check for overflow by shifting left and shifting back arithmetically.
518 // If the result is different from the original, there was overflow. 517 // If the result is different from the original, there was overflow.
519 __ mov(IP, Operand(R1, LSL, R0)); 518 __ mov(IP, Operand(R1, LSL, R0));
520 __ cmp(R1, Operand(IP, ASR, R0)); 519 __ cmp(R1, Operand(IP, ASR, R0));
521 520
522 // No overflow, result in R0. 521 // No overflow, result in R0.
523 __ mov(R0, Operand(R1, LSL, R0), EQ); 522 __ mov(R0, Operand(R1, LSL, R0), EQ);
524 __ bx(LR, EQ); 523 __ bx(LR, EQ);
525 524
526 // Arguments are Smi but the shift produced an overflow to Mint. 525 // Arguments are Smi but the shift produced an overflow to Mint.
527 __ CompareImmediate(R1, 0); 526 __ CompareImmediate(R1, 0);
528 __ b(&fall_through, LT); 527 __ b(&fall_through, LT);
529 __ SmiUntag(R1); 528 __ SmiUntag(R1);
530 529
531 // Pull off high bits that will be shifted off of R1 by making a mask 530 // Pull off high bits that will be shifted off of R1 by making a mask
532 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. 531 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
533 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) 532 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
534 // lo bits = R1 << R0 533 // lo bits = R1 << R0
535 __ LoadImmediate(R7, 1); 534 __ LoadImmediate(R7, 1);
536 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 535 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
537 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 536 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
538 __ rsb(R10, R0, Operand(32)); // R10 <- 32 - R0 537 __ rsb(R6, R0, Operand(32)); // R6 <- 32 - R0
539 __ mov(R7, Operand(R7, LSL, R10)); // R7 <- R7 << R10 538 __ mov(R7, Operand(R7, LSL, R6)); // R7 <- R7 << R6
540 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 539 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
541 __ mov(R7, Operand(R7, LSR, R10)); // R7 <- R7 >> R10 540 __ mov(R7, Operand(R7, LSR, R6)); // R7 <- R7 >> R6
542 // Now R7 has the bits that fall off of R1 on a left shift. 541 // Now R7 has the bits that fall off of R1 on a left shift.
543 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 542 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
544 543
545 const Class& mint_class = Class::Handle( 544 const Class& mint_class = Class::Handle(
546 Isolate::Current()->object_store()->mint_class()); 545 Isolate::Current()->object_store()->mint_class());
547 __ TryAllocate(mint_class, &fall_through, R0, R2); 546 __ TryAllocate(mint_class, &fall_through, R0, R2);
548 547
549 548
550 __ str(R1, FieldAddress(R0, Mint::value_offset())); 549 __ str(R1, FieldAddress(R0, Mint::value_offset()));
551 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); 550 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
552 __ Pop(R10); 551 __ Pop(R6);
553 __ Ret(); 552 __ Ret();
554 __ Bind(&fall_through); 553 __ Bind(&fall_through);
555 ASSERT(CODE_REG == R10); 554 ASSERT(CODE_REG == R6);
556 __ Pop(R10); 555 __ Pop(R6);
557 } 556 }
558 557
559 558
560 static void Get64SmiOrMint(Assembler* assembler, 559 static void Get64SmiOrMint(Assembler* assembler,
561 Register res_hi, 560 Register res_hi,
562 Register res_lo, 561 Register res_lo,
563 Register reg, 562 Register reg,
564 Label* not_smi_or_mint) { 563 Label* not_smi_or_mint) {
565 Label not_smi, done; 564 Label not_smi, done;
566 __ tst(reg, Operand(kSmiTagMask)); 565 __ tst(reg, Operand(kSmiTagMask));
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 break; 614 break;
616 default: 615 default:
617 UNREACHABLE(); 616 UNREACHABLE();
618 hi_true_cond = hi_false_cond = lo_false_cond = VS; 617 hi_true_cond = hi_false_cond = lo_false_cond = VS;
619 } 618 }
620 619
621 __ Bind(&try_mint_smi); 620 __ Bind(&try_mint_smi);
622 // Get left as 64 bit integer. 621 // Get left as 64 bit integer.
623 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through); 622 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through);
624 // Get right as 64 bit integer. 623 // Get right as 64 bit integer.
625 Get64SmiOrMint(assembler, R7, R6, R0, &fall_through); 624 Get64SmiOrMint(assembler, R7, R8, R0, &fall_through);
626 // R3: left high. 625 // R3: left high.
627 // R2: left low. 626 // R2: left low.
628 // R7: right high. 627 // R7: right high.
629 // R6: right low. 628 // R8: right low.
630 629
631 __ cmp(R3, Operand(R7)); // Compare left hi, right high. 630 __ cmp(R3, Operand(R7)); // Compare left hi, right high.
632 __ b(&is_false, hi_false_cond); 631 __ b(&is_false, hi_false_cond);
633 __ b(&is_true, hi_true_cond); 632 __ b(&is_true, hi_true_cond);
634 __ cmp(R2, Operand(R6)); // Compare left lo, right lo. 633 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
635 __ b(&is_false, lo_false_cond); 634 __ b(&is_false, lo_false_cond);
636 // Else is true. 635 // Else is true.
637 __ b(&is_true); 636 __ b(&is_true);
638 637
639 __ Bind(&fall_through); 638 __ Bind(&fall_through);
640 } 639 }
641 640
642 641
643 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { 642 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) {
644 CompareIntegers(assembler, LT); 643 CompareIntegers(assembler, LT);
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
761 __ rsb(R0, R0, Operand(32)); 760 __ rsb(R0, R0, Operand(32));
762 __ SmiTag(R0); 761 __ SmiTag(R0);
763 __ Ret(); 762 __ Ret();
764 } 763 }
765 764
766 765
767 void Intrinsifier::Bigint_lsh(Assembler* assembler) { 766 void Intrinsifier::Bigint_lsh(Assembler* assembler) {
768 // static void _lsh(Uint32List x_digits, int x_used, int n, 767 // static void _lsh(Uint32List x_digits, int x_used, int n,
769 // Uint32List r_digits) 768 // Uint32List r_digits)
770 769
771 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 770 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
772 __ ldrd(R2, R3, SP, 2 * kWordSize); 771 __ ldrd(R0, R1, SP, 2 * kWordSize);
773 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 772 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
774 __ ldrd(R4, R5, SP, 0 * kWordSize); 773 __ ldrd(R2, R3, SP, 0 * kWordSize);
775 __ SmiUntag(R5); 774 __ SmiUntag(R3);
776 // R0 = n ~/ _DIGIT_BITS 775 // R4 = n ~/ _DIGIT_BITS
777 __ Asr(R0, R5, Operand(5)); 776 __ Asr(R4, R3, Operand(5));
778 // R6 = &x_digits[0] 777 // R8 = &x_digits[0]
779 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 778 __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
780 // R7 = &x_digits[x_used] 779 // R7 = &x_digits[x_used]
781 __ add(R7, R6, Operand(R2, LSL, 1)); 780 __ add(R7, R8, Operand(R0, LSL, 1));
782 // R10 = &r_digits[1] 781 // R6 = &r_digits[1]
783 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag + 782 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag +
784 Bigint::kBytesPerDigit)); 783 Bigint::kBytesPerDigit));
785 // R10 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 784 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
786 __ add(R0, R0, Operand(R2, ASR, 1)); 785 __ add(R4, R4, Operand(R0, ASR, 1));
787 __ add(R10, R10, Operand(R0, LSL, 2)); 786 __ add(R6, R6, Operand(R4, LSL, 2));
788 // R3 = n % _DIGIT_BITS 787 // R1 = n % _DIGIT_BITS
789 __ and_(R3, R5, Operand(31)); 788 __ and_(R1, R3, Operand(31));
790 // R2 = 32 - R3 789 // R0 = 32 - R1
791 __ rsb(R2, R3, Operand(32)); 790 __ rsb(R0, R1, Operand(32));
792 __ mov(R1, Operand(0)); 791 __ mov(R9, Operand(0));
793 Label loop; 792 Label loop;
794 __ Bind(&loop); 793 __ Bind(&loop);
795 __ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex)); 794 __ ldr(R4, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
796 __ orr(R1, R1, Operand(R0, LSR, R2)); 795 __ orr(R9, R9, Operand(R4, LSR, R0));
797 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); 796 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
798 __ mov(R1, Operand(R0, LSL, R3)); 797 __ mov(R9, Operand(R4, LSL, R1));
799 __ teq(R7, Operand(R6)); 798 __ teq(R7, Operand(R8));
800 __ b(&loop, NE); 799 __ b(&loop, NE);
801 __ str(R1, Address(R10, -Bigint::kBytesPerDigit, Address::PreIndex)); 800 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
802 // Returning Object::null() is not required, since this method is private. 801 // Returning Object::null() is not required, since this method is private.
803 __ Ret(); 802 __ Ret();
804 } 803 }
805 804
806 805
807 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 806 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
808 // static void _lsh(Uint32List x_digits, int x_used, int n, 807 // static void _lsh(Uint32List x_digits, int x_used, int n,
809 // Uint32List r_digits) 808 // Uint32List r_digits)
810 809
811 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. 810 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
812 __ ldrd(R2, R3, SP, 2 * kWordSize); 811 __ ldrd(R0, R1, SP, 2 * kWordSize);
813 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. 812 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
814 __ ldrd(R4, R5, SP, 0 * kWordSize); 813 __ ldrd(R2, R3, SP, 0 * kWordSize);
815 __ SmiUntag(R5); 814 __ SmiUntag(R3);
816 // R0 = n ~/ _DIGIT_BITS 815 // R4 = n ~/ _DIGIT_BITS
817 __ Asr(R0, R5, Operand(5)); 816 __ Asr(R4, R3, Operand(5));
818 // R10 = &r_digits[0] 817 // R6 = &r_digits[0]
819 __ add(R10, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); 818 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag));
820 // R7 = &x_digits[n ~/ _DIGIT_BITS] 819 // R7 = &x_digits[n ~/ _DIGIT_BITS]
821 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 820 __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
822 __ add(R7, R7, Operand(R0, LSL, 2)); 821 __ add(R7, R7, Operand(R4, LSL, 2));
823 // R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] 822 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
824 __ add(R0, R0, Operand(1)); 823 __ add(R4, R4, Operand(1));
825 __ rsb(R0, R0, Operand(R2, ASR, 1)); 824 __ rsb(R4, R4, Operand(R0, ASR, 1));
826 __ add(R6, R10, Operand(R0, LSL, 2)); 825 __ add(R8, R6, Operand(R4, LSL, 2));
827 // R3 = n % _DIGIT_BITS 826 // R1 = n % _DIGIT_BITS
828 __ and_(R3, R5, Operand(31)); 827 __ and_(R1, R3, Operand(31));
829 // R2 = 32 - R3 828 // R0 = 32 - R1
830 __ rsb(R2, R3, Operand(32)); 829 __ rsb(R0, R1, Operand(32));
831 // R1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) 830 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
832 __ ldr(R1, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 831 __ ldr(R9, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
833 __ mov(R1, Operand(R1, LSR, R3)); 832 __ mov(R9, Operand(R9, LSR, R1));
834 Label loop_entry; 833 Label loop_entry;
835 __ b(&loop_entry); 834 __ b(&loop_entry);
836 Label loop; 835 Label loop;
837 __ Bind(&loop); 836 __ Bind(&loop);
838 __ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex)); 837 __ ldr(R4, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
839 __ orr(R1, R1, Operand(R0, LSL, R2)); 838 __ orr(R9, R9, Operand(R4, LSL, R0));
840 __ str(R1, Address(R10, Bigint::kBytesPerDigit, Address::PostIndex)); 839 __ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
841 __ mov(R1, Operand(R0, LSR, R3)); 840 __ mov(R9, Operand(R4, LSR, R1));
842 __ Bind(&loop_entry); 841 __ Bind(&loop_entry);
843 __ teq(R10, Operand(R6)); 842 __ teq(R6, Operand(R8));
844 __ b(&loop, NE); 843 __ b(&loop, NE);
845 __ str(R1, Address(R10, 0)); 844 __ str(R9, Address(R6, 0));
846 // Returning Object::null() is not required, since this method is private. 845 // Returning Object::null() is not required, since this method is private.
847 __ Ret(); 846 __ Ret();
848 } 847 }
849 848
850 849
851 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 850 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
852 // static void _absAdd(Uint32List digits, int used, 851 // static void _absAdd(Uint32List digits, int used,
853 // Uint32List a_digits, int a_used, 852 // Uint32List a_digits, int a_used,
854 // Uint32List r_digits) 853 // Uint32List r_digits)
855 854
856 // R2 = used, R3 = digits 855 // R0 = used, R1 = digits
857 __ ldrd(R2, R3, SP, 3 * kWordSize); 856 __ ldrd(R0, R1, SP, 3 * kWordSize);
858 // R3 = &digits[0] 857 // R1 = &digits[0]
858 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
859
860 // R2 = a_used, R3 = a_digits
861 __ ldrd(R2, R3, SP, 1 * kWordSize);
862 // R3 = &a_digits[0]
859 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 863 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
860 864
861 // R4 = a_used, R5 = a_digits 865 // R8 = r_digits
862 __ ldrd(R4, R5, SP, 1 * kWordSize); 866 __ ldr(R8, Address(SP, 0 * kWordSize));
863 // R5 = &a_digits[0] 867 // R8 = &r_digits[0]
864 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 868 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
865
866 // R6 = r_digits
867 __ ldr(R6, Address(SP, 0 * kWordSize));
868 // R6 = &r_digits[0]
869 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
870 869
871 // R7 = &digits[a_used >> 1], a_used is Smi. 870 // R7 = &digits[a_used >> 1], a_used is Smi.
872 __ add(R7, R3, Operand(R4, LSL, 1)); 871 __ add(R7, R1, Operand(R2, LSL, 1));
873 872
874 // R10 = &digits[used >> 1], used is Smi. 873 // R6 = &digits[used >> 1], used is Smi.
875 __ add(R10, R3, Operand(R2, LSL, 1)); 874 __ add(R6, R1, Operand(R0, LSL, 1));
876 875
877 __ adds(R0, R0, Operand(0)); // carry flag = 0 876 __ adds(R4, R4, Operand(0)); // carry flag = 0
878 Label add_loop; 877 Label add_loop;
879 __ Bind(&add_loop); 878 __ Bind(&add_loop);
880 // Loop a_used times, a_used > 0. 879 // Loop a_used times, a_used > 0.
881 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 880 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
882 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 881 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
883 __ adcs(R0, R0, Operand(R1)); 882 __ adcs(R4, R4, Operand(R9));
884 __ teq(R3, Operand(R7)); // Does not affect carry flag. 883 __ teq(R1, Operand(R7)); // Does not affect carry flag.
885 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 884 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
886 __ b(&add_loop, NE); 885 __ b(&add_loop, NE);
887 886
888 Label last_carry; 887 Label last_carry;
889 __ teq(R3, Operand(R10)); // Does not affect carry flag. 888 __ teq(R1, Operand(R6)); // Does not affect carry flag.
890 __ b(&last_carry, EQ); // If used - a_used == 0. 889 __ b(&last_carry, EQ); // If used - a_used == 0.
891 890
892 Label carry_loop; 891 Label carry_loop;
893 __ Bind(&carry_loop); 892 __ Bind(&carry_loop);
894 // Loop used - a_used times, used - a_used > 0. 893 // Loop used - a_used times, used - a_used > 0.
895 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 894 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
896 __ adcs(R0, R0, Operand(0)); 895 __ adcs(R4, R4, Operand(0));
897 __ teq(R3, Operand(R10)); // Does not affect carry flag. 896 __ teq(R1, Operand(R6)); // Does not affect carry flag.
898 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 897 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
899 __ b(&carry_loop, NE); 898 __ b(&carry_loop, NE);
900 899
901 __ Bind(&last_carry); 900 __ Bind(&last_carry);
902 __ mov(R0, Operand(0)); 901 __ mov(R4, Operand(0));
903 __ adc(R0, R0, Operand(0)); 902 __ adc(R4, R4, Operand(0));
904 __ str(R0, Address(R6, 0)); 903 __ str(R4, Address(R8, 0));
905 904
906 // Returning Object::null() is not required, since this method is private. 905 // Returning Object::null() is not required, since this method is private.
907 __ Ret(); 906 __ Ret();
908 } 907 }
909 908
910 909
911 void Intrinsifier::Bigint_absSub(Assembler* assembler) { 910 void Intrinsifier::Bigint_absSub(Assembler* assembler) {
912 // static void _absSub(Uint32List digits, int used, 911 // static void _absSub(Uint32List digits, int used,
913 // Uint32List a_digits, int a_used, 912 // Uint32List a_digits, int a_used,
914 // Uint32List r_digits) 913 // Uint32List r_digits)
915 914
916 // R2 = used, R3 = digits 915 // R0 = used, R1 = digits
917 __ ldrd(R2, R3, SP, 3 * kWordSize); 916 __ ldrd(R0, R1, SP, 3 * kWordSize);
918 // R3 = &digits[0] 917 // R1 = &digits[0]
918 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
919
920 // R2 = a_used, R3 = a_digits
921 __ ldrd(R2, R3, SP, 1 * kWordSize);
922 // R3 = &a_digits[0]
919 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 923 __ add(R3, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
920 924
921 // R4 = a_used, R5 = a_digits 925 // R8 = r_digits
922 __ ldrd(R4, R5, SP, 1 * kWordSize); 926 __ ldr(R8, Address(SP, 0 * kWordSize));
923 // R5 = &a_digits[0] 927 // R8 = &r_digits[0]
924 __ add(R5, R5, Operand(TypedData::data_offset() - kHeapObjectTag)); 928 __ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
925
926 // R6 = r_digits
927 __ ldr(R6, Address(SP, 0 * kWordSize));
928 // R6 = &r_digits[0]
929 __ add(R6, R6, Operand(TypedData::data_offset() - kHeapObjectTag));
930 929
931 // R7 = &digits[a_used >> 1], a_used is Smi. 930 // R7 = &digits[a_used >> 1], a_used is Smi.
932 __ add(R7, R3, Operand(R4, LSL, 1)); 931 __ add(R7, R1, Operand(R2, LSL, 1));
933 932
934 // R10 = &digits[used >> 1], used is Smi. 933 // R6 = &digits[used >> 1], used is Smi.
935 __ add(R10, R3, Operand(R2, LSL, 1)); 934 __ add(R6, R1, Operand(R0, LSL, 1));
936 935
937 __ subs(R0, R0, Operand(0)); // carry flag = 1 936 __ subs(R4, R4, Operand(0)); // carry flag = 1
938 Label sub_loop; 937 Label sub_loop;
939 __ Bind(&sub_loop); 938 __ Bind(&sub_loop);
940 // Loop a_used times, a_used > 0. 939 // Loop a_used times, a_used > 0.
941 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 940 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
942 __ ldr(R1, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 941 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
943 __ sbcs(R0, R0, Operand(R1)); 942 __ sbcs(R4, R4, Operand(R9));
944 __ teq(R3, Operand(R7)); // Does not affect carry flag. 943 __ teq(R1, Operand(R7)); // Does not affect carry flag.
945 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 944 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
946 __ b(&sub_loop, NE); 945 __ b(&sub_loop, NE);
947 946
948 Label done; 947 Label done;
949 __ teq(R3, Operand(R10)); // Does not affect carry flag. 948 __ teq(R1, Operand(R6)); // Does not affect carry flag.
950 __ b(&done, EQ); // If used - a_used == 0. 949 __ b(&done, EQ); // If used - a_used == 0.
951 950
952 Label carry_loop; 951 Label carry_loop;
953 __ Bind(&carry_loop); 952 __ Bind(&carry_loop);
954 // Loop used - a_used times, used - a_used > 0. 953 // Loop used - a_used times, used - a_used > 0.
955 __ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); 954 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
956 __ sbcs(R0, R0, Operand(0)); 955 __ sbcs(R4, R4, Operand(0));
957 __ teq(R3, Operand(R10)); // Does not affect carry flag. 956 __ teq(R1, Operand(R6)); // Does not affect carry flag.
958 __ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 957 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
959 __ b(&carry_loop, NE); 958 __ b(&carry_loop, NE);
960 959
961 __ Bind(&done); 960 __ Bind(&done);
962 // Returning Object::null() is not required, since this method is private. 961 // Returning Object::null() is not required, since this method is private.
963 __ Ret(); 962 __ Ret();
964 } 963 }
965 964
966 965
967 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 966 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
968 // Pseudo code: 967 // Pseudo code:
(...skipping 24 matching lines...) Expand all
993 // } 992 // }
994 993
995 Label done; 994 Label done;
996 // R3 = x, no_op if x == 0 995 // R3 = x, no_op if x == 0
997 __ ldrd(R0, R1, SP, 5 * kWordSize); // R0 = xi as Smi, R1 = x_digits. 996 __ ldrd(R0, R1, SP, 5 * kWordSize); // R0 = xi as Smi, R1 = x_digits.
998 __ add(R1, R1, Operand(R0, LSL, 1)); 997 __ add(R1, R1, Operand(R0, LSL, 1));
999 __ ldr(R3, FieldAddress(R1, TypedData::data_offset())); 998 __ ldr(R3, FieldAddress(R1, TypedData::data_offset()));
1000 __ tst(R3, Operand(R3)); 999 __ tst(R3, Operand(R3));
1001 __ b(&done, EQ); 1000 __ b(&done, EQ);
1002 1001
1003 // R6 = SmiUntag(n), no_op if n == 0 1002 // R8 = SmiUntag(n), no_op if n == 0
1004 __ ldr(R6, Address(SP, 0 * kWordSize)); 1003 __ ldr(R8, Address(SP, 0 * kWordSize));
1005 __ Asrs(R6, R6, Operand(kSmiTagSize)); 1004 __ Asrs(R8, R8, Operand(kSmiTagSize));
1006 __ b(&done, EQ); 1005 __ b(&done, EQ);
1007 1006
1008 // R4 = mip = &m_digits[i >> 1] 1007 // R4 = mip = &m_digits[i >> 1]
1009 __ ldrd(R0, R1, SP, 3 * kWordSize); // R0 = i as Smi, R1 = m_digits. 1008 __ ldrd(R0, R1, SP, 3 * kWordSize); // R0 = i as Smi, R1 = m_digits.
1010 __ add(R1, R1, Operand(R0, LSL, 1)); 1009 __ add(R1, R1, Operand(R0, LSL, 1));
1011 __ add(R4, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 1010 __ add(R4, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
1012 1011
1013 // R5 = ajp = &a_digits[j >> 1] 1012 // R9 = ajp = &a_digits[j >> 1]
1014 __ ldrd(R0, R1, SP, 1 * kWordSize); // R0 = j as Smi, R1 = a_digits. 1013 __ ldrd(R0, R1, SP, 1 * kWordSize); // R0 = j as Smi, R1 = a_digits.
1015 __ add(R1, R1, Operand(R0, LSL, 1)); 1014 __ add(R1, R1, Operand(R0, LSL, 1));
1016 __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 1015 __ add(R9, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
1017 1016
1018 // R1 = c = 0 1017 // R1 = c = 0
1019 __ mov(R1, Operand(0)); 1018 __ mov(R1, Operand(0));
1020 1019
1021 Label muladd_loop; 1020 Label muladd_loop;
1022 __ Bind(&muladd_loop); 1021 __ Bind(&muladd_loop);
1023 // x: R3 1022 // x: R3
1024 // mip: R4 1023 // mip: R4
1025 // ajp: R5 1024 // ajp: R9
1026 // c: R1 1025 // c: R1
1027 // n: R6 1026 // n: R8
1028 1027
1029 // uint32_t mi = *mip++ 1028 // uint32_t mi = *mip++
1030 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1029 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1031 1030
1032 // uint32_t aj = *ajp 1031 // uint32_t aj = *ajp
1033 __ ldr(R0, Address(R5, 0)); 1032 __ ldr(R0, Address(R9, 0));
1034 1033
1035 // uint64_t t = x*mi + aj + c 1034 // uint64_t t = x*mi + aj + c
1036 __ umaal(R0, R1, R2, R3); // R1:R0 = R2*R3 + R1 + R0. 1035 __ umaal(R0, R1, R2, R3); // R1:R0 = R2*R3 + R1 + R0.
1037 1036
1038 // *ajp++ = low32(t) = R0 1037 // *ajp++ = low32(t) = R0
1039 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1038 __ str(R0, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
1040 1039
1041 // c = high32(t) = R1 1040 // c = high32(t) = R1
1042 1041
1043 // while (--n > 0) 1042 // while (--n > 0)
1044 __ subs(R6, R6, Operand(1)); // --n 1043 __ subs(R8, R8, Operand(1)); // --n
1045 __ b(&muladd_loop, NE); 1044 __ b(&muladd_loop, NE);
1046 1045
1047 __ tst(R1, Operand(R1)); 1046 __ tst(R1, Operand(R1));
1048 __ b(&done, EQ); 1047 __ b(&done, EQ);
1049 1048
1050 // *ajp++ += c 1049 // *ajp++ += c
1051 __ ldr(R0, Address(R5, 0)); 1050 __ ldr(R0, Address(R9, 0));
1052 __ adds(R0, R0, Operand(R1)); 1051 __ adds(R0, R0, Operand(R1));
1053 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1052 __ str(R0, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
1054 __ b(&done, CC); 1053 __ b(&done, CC);
1055 1054
1056 Label propagate_carry_loop; 1055 Label propagate_carry_loop;
1057 __ Bind(&propagate_carry_loop); 1056 __ Bind(&propagate_carry_loop);
1058 __ ldr(R0, Address(R5, 0)); 1057 __ ldr(R0, Address(R9, 0));
1059 __ adds(R0, R0, Operand(1)); 1058 __ adds(R0, R0, Operand(1));
1060 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1059 __ str(R0, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
1061 __ b(&propagate_carry_loop, CS); 1060 __ b(&propagate_carry_loop, CS);
1062 1061
1063 __ Bind(&done); 1062 __ Bind(&done);
1064 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1063 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1065 __ Ret(); 1064 __ Ret();
1066 } 1065 }
1067 1066
1068 1067
1069 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { 1068 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
1070 // Pseudo code: 1069 // Pseudo code:
(...skipping 26 matching lines...) Expand all
1097 __ ldrd(R2, R3, SP, 2 * kWordSize); // R2 = i as Smi, R3 = x_digits 1096 __ ldrd(R2, R3, SP, 2 * kWordSize); // R2 = i as Smi, R3 = x_digits
1098 __ add(R3, R3, Operand(R2, LSL, 1)); 1097 __ add(R3, R3, Operand(R2, LSL, 1));
1099 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 1098 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
1100 1099
1101 // R3 = x = *xip++, return if x == 0 1100 // R3 = x = *xip++, return if x == 0
1102 Label x_zero; 1101 Label x_zero;
1103 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1102 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1104 __ tst(R3, Operand(R3)); 1103 __ tst(R3, Operand(R3));
1105 __ b(&x_zero, EQ); 1104 __ b(&x_zero, EQ);
1106 1105
1107 // R5 = ajp = &a_digits[i] 1106 // R7 = ajp = &a_digits[i]
1108 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits 1107 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
1109 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi. 1108 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
1110 __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 1109 __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
1111 1110
1112 // R6:R0 = t = x*x + *ajp 1111 // R8:R0 = t = x*x + *ajp
1113 __ ldr(R0, Address(R5, 0)); 1112 __ ldr(R0, Address(R7, 0));
1114 __ mov(R6, Operand(0)); 1113 __ mov(R8, Operand(0));
1115 __ umaal(R0, R6, R3, R3); // R6:R0 = R3*R3 + R6 + R0. 1114 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
1116 1115
1117 // *ajp++ = low32(t) = R0 1116 // *ajp++ = low32(t) = R0
1118 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1117 __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
1119 1118
1120 // R6 = low32(c) = high32(t) 1119 // R8 = low32(c) = high32(t)
1121 // R7 = high32(c) = 0 1120 // R9 = high32(c) = 0
1122 __ mov(R7, Operand(0)); 1121 __ mov(R9, Operand(0));
1123 1122
1124 // int n = used - i - 1; while (--n >= 0) ... 1123 // int n = used - i - 1; while (--n >= 0) ...
1125 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1124 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1126 __ sub(R10, R0, Operand(R2)); 1125 __ sub(R6, R0, Operand(R2));
1127 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0) 1126 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
1128 __ rsbs(R10, R0, Operand(R10, ASR, kSmiTagSize)); 1127 __ rsbs(R6, R0, Operand(R6, ASR, kSmiTagSize));
1129 1128
1130 Label loop, done; 1129 Label loop, done;
1131 __ b(&done, MI); 1130 __ b(&done, MI);
1132 1131
1133 __ Bind(&loop); 1132 __ Bind(&loop);
1134 // x: R3 1133 // x: R3
1135 // xip: R4 1134 // xip: R4
1136 // ajp: R5 1135 // ajp: R7
1137 // c: R7:R6 1136 // c: R9:R8
1138 // t: R2:R1:R0 (not live at loop entry) 1137 // t: R2:R1:R0 (not live at loop entry)
1139 // n: R10 1138 // n: R6
1140 1139
1141 // uint32_t xi = *xip++ 1140 // uint32_t xi = *xip++
1142 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); 1141 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
1143 1142
1144 // uint96_t t = R7:R6:R0 = 2*x*xi + aj + c 1143 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c
1145 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. 1144 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
1146 __ adds(R0, R0, Operand(R0)); 1145 __ adds(R0, R0, Operand(R0));
1147 __ adcs(R1, R1, Operand(R1)); 1146 __ adcs(R1, R1, Operand(R1));
1148 __ mov(R2, Operand(0)); 1147 __ mov(R2, Operand(0));
1149 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. 1148 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
1150 __ adds(R0, R0, Operand(R6)); 1149 __ adds(R0, R0, Operand(R8));
1151 __ adcs(R1, R1, Operand(R7)); 1150 __ adcs(R1, R1, Operand(R9));
1152 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. 1151 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
1153 __ ldr(R6, Address(R5, 0)); // R6 = aj = *ajp. 1152 __ ldr(R8, Address(R7, 0)); // R8 = aj = *ajp.
1154 __ adds(R0, R0, Operand(R6)); 1153 __ adds(R0, R0, Operand(R8));
1155 __ adcs(R6, R1, Operand(0)); 1154 __ adcs(R8, R1, Operand(0));
1156 __ adc(R7, R2, Operand(0)); // R7:R6:R0 = 2*x*xi + c + aj. 1155 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
1157 1156
1158 // *ajp++ = low32(t) = R0 1157 // *ajp++ = low32(t) = R0
1159 __ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex)); 1158 __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
1160 1159
1161 // while (--n >= 0) 1160 // while (--n >= 0)
1162 __ subs(R10, R10, Operand(1)); // --n 1161 __ subs(R6, R6, Operand(1)); // --n
1163 __ b(&loop, PL); 1162 __ b(&loop, PL);
1164 1163
1165 __ Bind(&done); 1164 __ Bind(&done);
1166 // uint32_t aj = *ajp 1165 // uint32_t aj = *ajp
1167 __ ldr(R0, Address(R5, 0)); 1166 __ ldr(R0, Address(R7, 0));
1168 1167
1169 // uint64_t t = aj + c 1168 // uint64_t t = aj + c
1170 __ adds(R6, R6, Operand(R0)); 1169 __ adds(R8, R8, Operand(R0));
1171 __ adc(R7, R7, Operand(0)); 1170 __ adc(R9, R9, Operand(0));
1172 1171
1173 // *ajp = low32(t) = R6 1172 // *ajp = low32(t) = R8
1174 // *(ajp + 1) = high32(t) = R7 1173 // *(ajp + 1) = high32(t) = R9
1175 __ strd(R6, R7, R5, 0); 1174 __ strd(R8, R9, R7, 0);
1176 1175
1177 __ Bind(&x_zero); 1176 __ Bind(&x_zero);
1178 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1177 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1179 __ Ret(); 1178 __ Ret();
1180 } 1179 }
1181 1180
1182 1181
1183 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { 1182 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) {
1184 // No unsigned 64-bit / 32-bit divide instruction. 1183 // No unsigned 64-bit / 32-bit divide instruction.
1185 } 1184 }
(...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after
1507 __ ldr(R1, FieldAddress(R0, state_field.Offset())); 1506 __ ldr(R1, FieldAddress(R0, state_field.Offset()));
1508 // Addresses of _state[0] and _state[1]. 1507 // Addresses of _state[0] and _state[1].
1509 1508
1510 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); 1509 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid);
1511 const int64_t disp_1 = disp_0 + 1510 const int64_t disp_1 = disp_0 +
1512 Instance::ElementSizeFor(kTypedDataUint32ArrayCid); 1511 Instance::ElementSizeFor(kTypedDataUint32ArrayCid);
1513 1512
1514 __ LoadImmediate(R0, a_int32_value); 1513 __ LoadImmediate(R0, a_int32_value);
1515 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); 1514 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag);
1516 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); 1515 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag);
1517 __ mov(R6, Operand(0)); // Zero extend unsigned _state[kSTATE_HI]. 1516 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI].
1518 // Unsigned 32-bit multiply and 64-bit accumulate into R6:R3. 1517 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3.
1519 __ umlal(R3, R6, R0, R2); // R6:R3 <- R6:R3 + R0 * R2. 1518 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2.
1520 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); 1519 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag);
1521 __ StoreToOffset(kWord, R6, R1, disp_1 - kHeapObjectTag); 1520 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag);
1522 __ Ret(); 1521 __ Ret();
1523 } 1522 }
1524 1523
1525 1524
1526 void Intrinsifier::ObjectEquals(Assembler* assembler) { 1525 void Intrinsifier::ObjectEquals(Assembler* assembler) {
1527 __ ldr(R0, Address(SP, 0 * kWordSize)); 1526 __ ldr(R0, Address(SP, 0 * kWordSize));
1528 __ ldr(R1, Address(SP, 1 * kWordSize)); 1527 __ ldr(R1, Address(SP, 1 * kWordSize));
1529 __ cmp(R0, Operand(R1)); 1528 __ cmp(R0, Operand(R1));
1530 __ LoadObject(R0, Bool::False(), NE); 1529 __ LoadObject(R0, Bool::False(), NE);
1531 __ LoadObject(R0, Bool::True(), EQ); 1530 __ LoadObject(R0, Bool::True(), EQ);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
1657 1656
1658 __ ldr(R2, FieldAddress(R1, String::length_offset())); 1657 __ ldr(R2, FieldAddress(R1, String::length_offset()));
1659 1658
1660 Label done; 1659 Label done;
1661 // If the string is empty, set the hash to 1, and return. 1660 // If the string is empty, set the hash to 1, and return.
1662 __ cmp(R2, Operand(Smi::RawValue(0))); 1661 __ cmp(R2, Operand(Smi::RawValue(0)));
1663 __ b(&done, EQ); 1662 __ b(&done, EQ);
1664 1663
1665 __ SmiUntag(R2); 1664 __ SmiUntag(R2);
1666 __ mov(R3, Operand(0)); 1665 __ mov(R3, Operand(0));
1667 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag); 1666 __ AddImmediate(R8, R1, OneByteString::data_offset() - kHeapObjectTag);
1668 // R1: Instance of OneByteString. 1667 // R1: Instance of OneByteString.
1669 // R2: String length, untagged integer. 1668 // R2: String length, untagged integer.
1670 // R3: Loop counter, untagged integer. 1669 // R3: Loop counter, untagged integer.
1671 // R6: String data. 1670 // R8: String data.
1672 // R0: Hash code, untagged integer. 1671 // R0: Hash code, untagged integer.
1673 1672
1674 Label loop; 1673 Label loop;
1675 // Add to hash code: (hash_ is uint32) 1674 // Add to hash code: (hash_ is uint32)
1676 // hash_ += ch; 1675 // hash_ += ch;
1677 // hash_ += hash_ << 10; 1676 // hash_ += hash_ << 10;
1678 // hash_ ^= hash_ >> 6; 1677 // hash_ ^= hash_ >> 6;
1679 // Get one characters (ch). 1678 // Get one characters (ch).
1680 __ Bind(&loop); 1679 __ Bind(&loop);
1681 __ ldrb(R7, Address(R6, 0)); 1680 __ ldrb(R7, Address(R8, 0));
1682 // R7: ch. 1681 // R7: ch.
1683 __ add(R3, R3, Operand(1)); 1682 __ add(R3, R3, Operand(1));
1684 __ add(R6, R6, Operand(1)); 1683 __ add(R8, R8, Operand(1));
1685 __ add(R0, R0, Operand(R7)); 1684 __ add(R0, R0, Operand(R7));
1686 __ add(R0, R0, Operand(R0, LSL, 10)); 1685 __ add(R0, R0, Operand(R0, LSL, 10));
1687 __ eor(R0, R0, Operand(R0, LSR, 6)); 1686 __ eor(R0, R0, Operand(R0, LSR, 6));
1688 __ cmp(R3, Operand(R2)); 1687 __ cmp(R3, Operand(R2));
1689 __ b(&loop, NE); 1688 __ b(&loop, NE);
1690 1689
1691 // Finalize. 1690 // Finalize.
1692 // hash_ += hash_ << 3; 1691 // hash_ += hash_ << 3;
1693 // hash_ ^= hash_ >> 11; 1692 // hash_ ^= hash_ >> 11;
1694 // hash_ += hash_ << 15; 1693 // hash_ += hash_ << 15;
(...skipping 17 matching lines...) Expand all
1712 // initialized. 1711 // initialized.
1713 // 'length-reg' (R2) contains tagged length. 1712 // 'length-reg' (R2) contains tagged length.
1714 // Returns new string as tagged pointer in R0. 1713 // Returns new string as tagged pointer in R0.
1715 static void TryAllocateOnebyteString(Assembler* assembler, 1714 static void TryAllocateOnebyteString(Assembler* assembler,
1716 Label* ok, 1715 Label* ok,
1717 Label* failure) { 1716 Label* failure) {
1718 const Register length_reg = R2; 1717 const Register length_reg = R2;
1719 Label fail; 1718 Label fail;
1720 __ MaybeTraceAllocation(kOneByteStringCid, R0, failure, 1719 __ MaybeTraceAllocation(kOneByteStringCid, R0, failure,
1721 /* inline_isolate = */ false); 1720 /* inline_isolate = */ false);
1722 __ mov(R6, Operand(length_reg)); // Save the length register. 1721 __ mov(R8, Operand(length_reg)); // Save the length register.
1723 // TODO(koda): Protect against negative length and overflow here. 1722 // TODO(koda): Protect against negative length and overflow here.
1724 __ SmiUntag(length_reg); 1723 __ SmiUntag(length_reg);
1725 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; 1724 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1;
1726 __ AddImmediate(length_reg, fixed_size); 1725 __ AddImmediate(length_reg, fixed_size);
1727 __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1)); 1726 __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1));
1728 1727
1729 const intptr_t cid = kOneByteStringCid; 1728 const intptr_t cid = kOneByteStringCid;
1730 Heap::Space space = Heap::SpaceForAllocation(cid); 1729 Heap::Space space = Heap::SpaceForAllocation(cid);
1731 __ ldr(R3, Address(THR, Thread::heap_offset())); 1730 __ ldr(R3, Address(THR, Thread::heap_offset()));
1732 __ ldr(R0, Address(R3, Heap::TopOffset(space))); 1731 __ ldr(R0, Address(R3, Heap::TopOffset(space)));
(...skipping 29 matching lines...) Expand all
1762 __ mov(R3, Operand(R2, LSL, shift), LS); 1761 __ mov(R3, Operand(R2, LSL, shift), LS);
1763 __ mov(R3, Operand(0), HI); 1762 __ mov(R3, Operand(0), HI);
1764 1763
1765 // Get the class index and insert it into the tags. 1764 // Get the class index and insert it into the tags.
1766 // R3: size and bit tags. 1765 // R3: size and bit tags.
1767 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 1766 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
1768 __ orr(R3, R3, Operand(TMP)); 1767 __ orr(R3, R3, Operand(TMP));
1769 __ str(R3, FieldAddress(R0, String::tags_offset())); // Store tags. 1768 __ str(R3, FieldAddress(R0, String::tags_offset())); // Store tags.
1770 } 1769 }
1771 1770
1772 // Set the length field using the saved length (R6). 1771 // Set the length field using the saved length (R8).
1773 __ InitializeFieldNoBarrier(R0, 1772 __ InitializeFieldNoBarrier(R0,
1774 FieldAddress(R0, String::length_offset()), 1773 FieldAddress(R0, String::length_offset()),
1775 R6); 1774 R8);
1776 // Clear hash. 1775 // Clear hash.
1777 __ LoadImmediate(TMP, 0); 1776 __ LoadImmediate(TMP, 0);
1778 __ InitializeFieldNoBarrier(R0, 1777 __ InitializeFieldNoBarrier(R0,
1779 FieldAddress(R0, String::hash_offset()), 1778 FieldAddress(R0, String::hash_offset()),
1780 TMP); 1779 TMP);
1781 1780
1782 __ IncrementAllocationStatsWithSize(R4, R2, space); 1781 __ IncrementAllocationStatsWithSize(R4, R2, space);
1783 __ b(ok); 1782 __ b(ok);
1784 1783
1785 __ Bind(&fail); 1784 __ Bind(&fail);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1817 1816
1818 // R3: Start address to copy from (untagged). 1817 // R3: Start address to copy from (untagged).
1819 // R1: Untagged start index. 1818 // R1: Untagged start index.
1820 __ ldr(R2, Address(SP, kEndIndexOffset)); 1819 __ ldr(R2, Address(SP, kEndIndexOffset));
1821 __ SmiUntag(R2); 1820 __ SmiUntag(R2);
1822 __ sub(R2, R2, Operand(R1)); 1821 __ sub(R2, R2, Operand(R1));
1823 1822
1824 // R3: Start address to copy from (untagged). 1823 // R3: Start address to copy from (untagged).
1825 // R2: Untagged number of bytes to copy. 1824 // R2: Untagged number of bytes to copy.
1826 // R0: Tagged result string. 1825 // R0: Tagged result string.
1827 // R6: Pointer into R3. 1826 // R8: Pointer into R3.
1828 // R7: Pointer into R0. 1827 // R7: Pointer into R0.
1829 // R1: Scratch register. 1828 // R1: Scratch register.
1830 Label loop, done; 1829 Label loop, done;
1831 __ cmp(R2, Operand(0)); 1830 __ cmp(R2, Operand(0));
1832 __ b(&done, LE); 1831 __ b(&done, LE);
1833 __ mov(R6, Operand(R3)); 1832 __ mov(R8, Operand(R3));
1834 __ mov(R7, Operand(R0)); 1833 __ mov(R7, Operand(R0));
1835 __ Bind(&loop); 1834 __ Bind(&loop);
1836 __ ldrb(R1, Address(R6, 0)); 1835 __ ldrb(R1, Address(R8, 0));
1837 __ AddImmediate(R6, 1); 1836 __ AddImmediate(R8, 1);
1838 __ sub(R2, R2, Operand(1)); 1837 __ sub(R2, R2, Operand(1));
1839 __ cmp(R2, Operand(0)); 1838 __ cmp(R2, Operand(0));
1840 __ strb(R1, FieldAddress(R7, OneByteString::data_offset())); 1839 __ strb(R1, FieldAddress(R7, OneByteString::data_offset()));
1841 __ AddImmediate(R7, 1); 1840 __ AddImmediate(R7, 1);
1842 __ b(&loop, GT); 1841 __ b(&loop, GT);
1843 1842
1844 __ Bind(&done); 1843 __ Bind(&done);
1845 __ Ret(); 1844 __ Ret();
1846 __ Bind(&fall_through); 1845 __ Bind(&fall_through);
1847 } 1846 }
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1948 void Intrinsifier::JSRegExp_ExecuteMatch(Assembler* assembler) { 1947 void Intrinsifier::JSRegExp_ExecuteMatch(Assembler* assembler) {
1949 if (FLAG_interpret_irregexp) return; 1948 if (FLAG_interpret_irregexp) return;
1950 1949
1951 static const intptr_t kRegExpParamOffset = 2 * kWordSize; 1950 static const intptr_t kRegExpParamOffset = 2 * kWordSize;
1952 static const intptr_t kStringParamOffset = 1 * kWordSize; 1951 static const intptr_t kStringParamOffset = 1 * kWordSize;
1953 // start_index smi is located at offset 0. 1952 // start_index smi is located at offset 0.
1954 1953
1955 // Incoming registers: 1954 // Incoming registers:
1956 // R0: Function. (Will be reloaded with the specialized matcher function.) 1955 // R0: Function. (Will be reloaded with the specialized matcher function.)
1957 // R4: Arguments descriptor. (Will be preserved.) 1956 // R4: Arguments descriptor. (Will be preserved.)
1958 // R5: Unknown. (Must be GC safe on tail call.) 1957 // R9: Unknown. (Must be GC safe on tail call.)
1959 1958
1960 // Load the specialized function pointer into R0. Leverage the fact the 1959 // Load the specialized function pointer into R0. Leverage the fact the
1961 // string CIDs as well as stored function pointers are in sequence. 1960 // string CIDs as well as stored function pointers are in sequence.
1962 __ ldr(R2, Address(SP, kRegExpParamOffset)); 1961 __ ldr(R2, Address(SP, kRegExpParamOffset));
1963 __ ldr(R1, Address(SP, kStringParamOffset)); 1962 __ ldr(R1, Address(SP, kStringParamOffset));
1964 __ LoadClassId(R1, R1); 1963 __ LoadClassId(R1, R1);
1965 __ AddImmediate(R1, R1, -kOneByteStringCid); 1964 __ AddImmediate(R1, R1, -kOneByteStringCid);
1966 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); 1965 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2));
1967 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); 1966 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid)));
1968 1967
1969 // Registers are now set up for the lazy compile stub. It expects the function 1968 // Registers are now set up for the lazy compile stub. It expects the function
1970 // in R0, the argument descriptor in R4, and IC-Data in R5. 1969 // in R0, the argument descriptor in R4, and IC-Data in R9.
1971 __ eor(R5, R5, Operand(R5)); 1970 __ eor(R9, R9, Operand(R9));
1972 1971
1973 // Tail-call the function. 1972 // Tail-call the function.
1974 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1973 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1975 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); 1974 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
1976 __ bx(R1); 1975 __ bx(R1);
1977 } 1976 }
1978 1977
1979 1978
1980 // On stack: user tag (+0). 1979 // On stack: user tag (+0).
1981 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 1980 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
(...skipping 22 matching lines...) Expand all
2004 2003
2005 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2004 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2006 __ LoadIsolate(R0); 2005 __ LoadIsolate(R0);
2007 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2006 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2008 __ Ret(); 2007 __ Ret();
2009 } 2008 }
2010 2009
2011 } // namespace dart 2010 } // namespace dart
2012 2011
2013 #endif // defined TARGET_ARCH_ARM 2012 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698