| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
| 6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
| 7 | 7 |
| 8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
| 9 | 9 |
| 10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
| (...skipping 458 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 469 ExternalTypedData::data_offset() - kHeapObjectTag); | 469 ExternalTypedData::data_offset() - kHeapObjectTag); |
| 470 __ SmiUntag(R0); | 470 __ SmiUntag(R0); |
| 471 __ ldrb(R0, Address(R1, R0)); | 471 __ ldrb(R0, Address(R1, R0)); |
| 472 __ SmiTag(R0); | 472 __ SmiTag(R0); |
| 473 __ Ret(); | 473 __ Ret(); |
| 474 __ Bind(&fall_through); | 474 __ Bind(&fall_through); |
| 475 } | 475 } |
| 476 | 476 |
| 477 | 477 |
| 478 void Intrinsifier::Float64Array_getIndexed(Assembler* assembler) { | 478 void Intrinsifier::Float64Array_getIndexed(Assembler* assembler) { |
| 479 if (!TargetCPUFeatures::vfp_supported()) { |
| 480 return; |
| 481 } |
| 479 Label fall_through; | 482 Label fall_through; |
| 480 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index. | 483 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index. |
| 481 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array. | 484 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array. |
| 482 __ tst(R0, Operand(kSmiTagMask)); | 485 __ tst(R0, Operand(kSmiTagMask)); |
| 483 __ b(&fall_through, NE); // Index is not a smi, fall through. | 486 __ b(&fall_through, NE); // Index is not a smi, fall through. |
| 484 | 487 |
| 485 // Range check. | 488 // Range check. |
| 486 __ ldr(R6, FieldAddress(R1, TypedData::length_offset())); | 489 __ ldr(R6, FieldAddress(R1, TypedData::length_offset())); |
| 487 __ cmp(R0, Operand(R6)); | 490 __ cmp(R0, Operand(R6)); |
| 488 __ b(&fall_through, CS); | 491 __ b(&fall_through, CS); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 504 &fall_through, | 507 &fall_through, |
| 505 R0, // Result register. | 508 R0, // Result register. |
| 506 R1); | 509 R1); |
| 507 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 510 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
| 508 __ Ret(); | 511 __ Ret(); |
| 509 __ Bind(&fall_through); | 512 __ Bind(&fall_through); |
| 510 } | 513 } |
| 511 | 514 |
| 512 | 515 |
| 513 void Intrinsifier::Float64Array_setIndexed(Assembler* assembler) { | 516 void Intrinsifier::Float64Array_setIndexed(Assembler* assembler) { |
| 517 if (!TargetCPUFeatures::vfp_supported()) { |
| 518 return; |
| 519 } |
| 514 Label fall_through; | 520 Label fall_through; |
| 515 __ ldr(R0, Address(SP, + 1 * kWordSize)); // Index. | 521 __ ldr(R0, Address(SP, + 1 * kWordSize)); // Index. |
| 516 __ ldr(R1, Address(SP, + 2 * kWordSize)); // Array. | 522 __ ldr(R1, Address(SP, + 2 * kWordSize)); // Array. |
| 517 __ tst(R0, Operand(kSmiTagMask)); | 523 __ tst(R0, Operand(kSmiTagMask)); |
| 518 __ b(&fall_through, NE); // Index is not a smi, fall through. | 524 __ b(&fall_through, NE); // Index is not a smi, fall through. |
| 519 | 525 |
| 520 // Range check. | 526 // Range check. |
| 521 __ ldr(R6, FieldAddress(R1, TypedData::length_offset())); | 527 __ ldr(R6, FieldAddress(R1, TypedData::length_offset())); |
| 522 __ cmp(R0, Operand(R6)); | 528 __ cmp(R0, Operand(R6)); |
| 523 __ b(&fall_through, CS); | 529 __ b(&fall_through, CS); |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 616 Label fall_through; | 622 Label fall_through; |
| 617 TestBothArgumentsSmis(assembler, &fall_through); | 623 TestBothArgumentsSmis(assembler, &fall_through); |
| 618 __ subs(R0, R1, Operand(R0)); // Subtract. | 624 __ subs(R0, R1, Operand(R0)); // Subtract. |
| 619 __ bx(LR, VC); // Return if no overflow. | 625 __ bx(LR, VC); // Return if no overflow. |
| 620 // Otherwise fall through. | 626 // Otherwise fall through. |
| 621 __ Bind(&fall_through); | 627 __ Bind(&fall_through); |
| 622 } | 628 } |
| 623 | 629 |
| 624 | 630 |
| 625 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 631 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
| 626 Label fall_through; | |
| 627 | |
| 628 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
| 629 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. | |
| 630 | |
| 631 if (TargetCPUFeatures::arm_version() == ARMv7) { | 632 if (TargetCPUFeatures::arm_version() == ARMv7) { |
| 633 Label fall_through; |
| 634 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 635 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. |
| 632 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. | 636 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. |
| 633 __ cmp(IP, Operand(R0, ASR, 31)); | 637 __ cmp(IP, Operand(R0, ASR, 31)); |
| 634 __ bx(LR, EQ); | 638 __ bx(LR, EQ); |
| 635 } else { | 639 __ Bind(&fall_through); // Fall through on overflow. |
| 640 } else if (TargetCPUFeatures::can_divide()) { |
| 641 Label fall_through; |
| 642 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 643 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. |
| 636 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); | 644 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); |
| 637 __ mul(R0, R0, R1); | 645 __ mul(R0, R0, R1); |
| 638 __ Ret(); | 646 __ Ret(); |
| 647 __ Bind(&fall_through); // Fall through on overflow. |
| 639 } | 648 } |
| 640 | |
| 641 __ Bind(&fall_through); // Fall through on overflow. | |
| 642 } | 649 } |
| 643 | 650 |
| 644 | 651 |
| 645 void Intrinsifier::Integer_mul(Assembler* assembler) { | 652 void Intrinsifier::Integer_mul(Assembler* assembler) { |
| 646 Integer_mulFromInteger(assembler); | 653 Integer_mulFromInteger(assembler); |
| 647 } | 654 } |
| 648 | 655 |
| 649 | 656 |
| 650 // Optimizations: | 657 // Optimizations: |
| 651 // - result is 0 if: | 658 // - result is 0 if: |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 697 // Implementation: | 704 // Implementation: |
| 698 // res = left % right; | 705 // res = left % right; |
| 699 // if (res < 0) { | 706 // if (res < 0) { |
| 700 // if (right < 0) { | 707 // if (right < 0) { |
| 701 // res = res - right; | 708 // res = res - right; |
| 702 // } else { | 709 // } else { |
| 703 // res = res + right; | 710 // res = res + right; |
| 704 // } | 711 // } |
| 705 // } | 712 // } |
| 706 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 713 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
| 714 if (!TargetCPUFeatures::can_divide()) { |
| 715 return; |
| 716 } |
| 707 // Check to see if we have integer division | 717 // Check to see if we have integer division |
| 708 Label fall_through; | 718 Label fall_through; |
| 709 __ ldr(R1, Address(SP, + 0 * kWordSize)); | 719 __ ldr(R1, Address(SP, + 0 * kWordSize)); |
| 710 __ ldr(R0, Address(SP, + 1 * kWordSize)); | 720 __ ldr(R0, Address(SP, + 1 * kWordSize)); |
| 711 __ orr(TMP, R0, Operand(R1)); | 721 __ orr(TMP, R0, Operand(R1)); |
| 712 __ tst(TMP, Operand(kSmiTagMask)); | 722 __ tst(TMP, Operand(kSmiTagMask)); |
| 713 __ b(&fall_through, NE); | 723 __ b(&fall_through, NE); |
| 714 // R1: Tagged left (dividend). | 724 // R1: Tagged left (dividend). |
| 715 // R0: Tagged right (divisor). | 725 // R0: Tagged right (divisor). |
| 716 // Check if modulo by zero -> exception thrown in main function. | 726 // Check if modulo by zero -> exception thrown in main function. |
| (...skipping 11 matching lines...) Expand all Loading... |
| 728 __ sub(R0, R1, Operand(R0), LT); | 738 __ sub(R0, R1, Operand(R0), LT); |
| 729 __ add(R0, R1, Operand(R0), GE); | 739 __ add(R0, R1, Operand(R0), GE); |
| 730 __ SmiTag(R0); | 740 __ SmiTag(R0); |
| 731 __ Ret(); | 741 __ Ret(); |
| 732 | 742 |
| 733 __ Bind(&fall_through); | 743 __ Bind(&fall_through); |
| 734 } | 744 } |
| 735 | 745 |
| 736 | 746 |
| 737 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 747 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
| 748 if (!TargetCPUFeatures::can_divide()) { |
| 749 return; |
| 750 } |
| 738 // Check to see if we have integer division | 751 // Check to see if we have integer division |
| 739 Label fall_through; | 752 Label fall_through; |
| 740 | 753 |
| 741 TestBothArgumentsSmis(assembler, &fall_through); | 754 TestBothArgumentsSmis(assembler, &fall_through); |
| 742 __ cmp(R0, Operand(0)); | 755 __ cmp(R0, Operand(0)); |
| 743 __ b(&fall_through, EQ); // If b is 0, fall through. | 756 __ b(&fall_through, EQ); // If b is 0, fall through. |
| 744 | 757 |
| 745 __ SmiUntag(R0); | 758 __ SmiUntag(R0); |
| 746 __ SmiUntag(R1); | 759 __ SmiUntag(R1); |
| 747 | 760 |
| (...skipping 1019 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1767 Isolate* isolate = Isolate::Current(); | 1780 Isolate* isolate = Isolate::Current(); |
| 1768 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); | 1781 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); |
| 1769 // Set return value to Isolate::current_tag_. | 1782 // Set return value to Isolate::current_tag_. |
| 1770 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 1783 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
| 1771 __ Ret(); | 1784 __ Ret(); |
| 1772 } | 1785 } |
| 1773 | 1786 |
| 1774 } // namespace dart | 1787 } // namespace dart |
| 1775 | 1788 |
| 1776 #endif // defined TARGET_ARCH_ARM | 1789 #endif // defined TARGET_ARCH_ARM |
| OLD | NEW |