| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
| 6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
| 7 | 7 |
| 8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
| 9 | 9 |
| 10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 | 23 |
| 24 | 24 |
| 25 void Intrinsifier::Array_getLength(Assembler* assembler) { | 25 void Intrinsifier::Array_getLength(Assembler* assembler) { |
| 26 __ ldr(R0, Address(SP, 0 * kWordSize)); | 26 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 27 __ ldr(R0, FieldAddress(R0, Array::length_offset())); | 27 __ ldr(R0, FieldAddress(R0, Array::length_offset())); |
| 28 __ Ret(); | 28 __ Ret(); |
| 29 } | 29 } |
| 30 | 30 |
| 31 | 31 |
| 32 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { | 32 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { |
| 33 return Array_getLength(assembler); | 33 Array_getLength(assembler); |
| 34 } | 34 } |
| 35 | 35 |
| 36 | 36 |
| 37 void Intrinsifier::Array_getIndexed(Assembler* assembler) { | 37 void Intrinsifier::Array_getIndexed(Assembler* assembler) { |
| 38 Label fall_through; | 38 Label fall_through; |
| 39 | 39 |
| 40 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index | 40 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index |
| 41 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array | 41 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array |
| 42 | 42 |
| 43 __ tst(R0, ShifterOperand(kSmiTagMask)); | 43 __ tst(R0, ShifterOperand(kSmiTagMask)); |
| 44 __ b(&fall_through, NE); // Index is not an smi, fall through | 44 __ b(&fall_through, NE); // Index is not an smi, fall through |
| 45 | 45 |
| 46 // range check | 46 // range check |
| 47 __ ldr(R6, FieldAddress(R1, Array::length_offset())); | 47 __ ldr(R6, FieldAddress(R1, Array::length_offset())); |
| 48 __ cmp(R0, ShifterOperand(R6)); | 48 __ cmp(R0, ShifterOperand(R6)); |
| 49 | 49 |
| 50 ASSERT(kSmiTagShift == 1); | 50 ASSERT(kSmiTagShift == 1); |
| 51 // array element at R1 + R0*2 + Array::data_offset - 1 | 51 // array element at R1 + R0*2 + Array::data_offset - 1 |
| 52 __ add(R6, R1, ShifterOperand(R0, LSL, 1), CC); | 52 __ add(R6, R1, ShifterOperand(R0, LSL, 1), CC); |
| 53 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); | 53 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); |
| 54 __ bx(LR, CC); | 54 __ bx(LR, CC); |
| 55 __ Bind(&fall_through); | 55 __ Bind(&fall_through); |
| 56 } | 56 } |
| 57 | 57 |
| 58 | 58 |
| 59 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { | 59 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { |
| 60 return Array_getIndexed(assembler); | 60 Array_getIndexed(assembler); |
| 61 } | 61 } |
| 62 | 62 |
| 63 | 63 |
| 64 static intptr_t ComputeObjectArrayTypeArgumentsOffset() { | 64 static intptr_t ComputeObjectArrayTypeArgumentsOffset() { |
| 65 const Library& core_lib = Library::Handle(Library::CoreLibrary()); | 65 const Library& core_lib = Library::Handle(Library::CoreLibrary()); |
| 66 const Class& cls = Class::Handle( | 66 const Class& cls = Class::Handle( |
| 67 core_lib.LookupClassAllowPrivate(Symbols::_List())); | 67 core_lib.LookupClassAllowPrivate(Symbols::_List())); |
| 68 ASSERT(!cls.IsNull()); | 68 ASSERT(!cls.IsNull()); |
| 69 ASSERT(cls.NumTypeArguments() == 1); | 69 ASSERT(cls.NumTypeArguments() == 1); |
| 70 const intptr_t field_offset = cls.type_arguments_field_offset(); | 70 const intptr_t field_offset = cls.type_arguments_field_offset(); |
| (...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 306 __ Ret(); | 306 __ Ret(); |
| 307 __ Bind(&fall_through); | 307 __ Bind(&fall_through); |
| 308 } | 308 } |
| 309 | 309 |
| 310 | 310 |
| 311 // Add an element to growable array if it doesn't need to grow, otherwise | 311 // Add an element to growable array if it doesn't need to grow, otherwise |
| 312 // call into regular code. | 312 // call into regular code. |
| 313 // On stack: growable array (+1), value (+0). | 313 // On stack: growable array (+1), value (+0). |
| 314 void Intrinsifier::GrowableList_add(Assembler* assembler) { | 314 void Intrinsifier::GrowableList_add(Assembler* assembler) { |
| 315 // In checked mode we need to type-check the incoming argument. | 315 // In checked mode we need to type-check the incoming argument. |
| 316 if (FLAG_enable_type_checks) return; | 316 if (FLAG_enable_type_checks) { |
| 317 return; |
| 318 } |
| 317 Label fall_through; | 319 Label fall_through; |
| 318 // R0: Array. | 320 // R0: Array. |
| 319 __ ldr(R0, Address(SP, 1 * kWordSize)); | 321 __ ldr(R0, Address(SP, 1 * kWordSize)); |
| 320 // R1: length. | 322 // R1: length. |
| 321 __ ldr(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | 323 __ ldr(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); |
| 322 // R2: data. | 324 // R2: data. |
| 323 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::data_offset())); | 325 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::data_offset())); |
| 324 // R3: capacity. | 326 // R3: capacity. |
| 325 __ ldr(R3, FieldAddress(R2, Array::length_offset())); | 327 __ ldr(R3, FieldAddress(R2, Array::length_offset())); |
| 326 // Compare length with capacity. | 328 // Compare length with capacity. |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 481 Label fall_through; | 483 Label fall_through; |
| 482 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 484 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
| 483 __ adds(R0, R0, ShifterOperand(R1)); // Adds. | 485 __ adds(R0, R0, ShifterOperand(R1)); // Adds. |
| 484 __ bx(LR, VC); // Return if no overflow. | 486 __ bx(LR, VC); // Return if no overflow. |
| 485 // Otherwise fall through. | 487 // Otherwise fall through. |
| 486 __ Bind(&fall_through); | 488 __ Bind(&fall_through); |
| 487 } | 489 } |
| 488 | 490 |
| 489 | 491 |
| 490 void Intrinsifier::Integer_add(Assembler* assembler) { | 492 void Intrinsifier::Integer_add(Assembler* assembler) { |
| 491 return Integer_addFromInteger(assembler); | 493 Integer_addFromInteger(assembler); |
| 492 } | 494 } |
| 493 | 495 |
| 494 | 496 |
| 495 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 497 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
| 496 Label fall_through; | 498 Label fall_through; |
| 497 TestBothArgumentsSmis(assembler, &fall_through); | 499 TestBothArgumentsSmis(assembler, &fall_through); |
| 498 __ subs(R0, R0, ShifterOperand(R1)); // Subtract. | 500 __ subs(R0, R0, ShifterOperand(R1)); // Subtract. |
| 499 __ bx(LR, VC); // Return if no overflow. | 501 __ bx(LR, VC); // Return if no overflow. |
| 500 // Otherwise fall through. | 502 // Otherwise fall through. |
| 501 __ Bind(&fall_through); | 503 __ Bind(&fall_through); |
| 502 } | 504 } |
| 503 | 505 |
| 504 | 506 |
| 505 void Intrinsifier::Integer_sub(Assembler* assembler) { | 507 void Intrinsifier::Integer_sub(Assembler* assembler) { |
| 506 Label fall_through; | 508 Label fall_through; |
| 507 TestBothArgumentsSmis(assembler, &fall_through); | 509 TestBothArgumentsSmis(assembler, &fall_through); |
| 508 __ subs(R0, R1, ShifterOperand(R0)); // Subtract. | 510 __ subs(R0, R1, ShifterOperand(R0)); // Subtract. |
| 509 __ bx(LR, VC); // Return if no overflow. | 511 __ bx(LR, VC); // Return if no overflow. |
| 510 // Otherwise fall through. | 512 // Otherwise fall through. |
| 511 __ Bind(&fall_through); | 513 __ Bind(&fall_through); |
| 512 } | 514 } |
| 513 | 515 |
| 514 | 516 |
| 515 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 517 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
| 516 Label fall_through; | 518 Label fall_through; |
| 517 | 519 |
| 518 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 520 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 519 __ SmiUntag(R0); // untags R6. only want result shifted by one | 521 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. |
| 520 | 522 |
| 521 if (TargetCPUFeatures::arm_version() == ARMv7) { | 523 if (TargetCPUFeatures::arm_version() == ARMv7) { |
| 522 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. | 524 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. |
| 523 __ cmp(IP, ShifterOperand(R0, ASR, 31)); | 525 __ cmp(IP, ShifterOperand(R0, ASR, 31)); |
| 524 __ bx(LR, EQ); | 526 __ bx(LR, EQ); |
| 525 } else { | 527 } else { |
| 526 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); | 528 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); |
| 527 __ mul(R0, R0, R1); | 529 __ mul(R0, R0, R1); |
| 528 __ Ret(); | 530 __ Ret(); |
| 529 } | 531 } |
| 530 | 532 |
| 531 __ Bind(&fall_through); // Fall through on overflow. | 533 __ Bind(&fall_through); // Fall through on overflow. |
| 532 } | 534 } |
| 533 | 535 |
| 534 | 536 |
| 535 void Intrinsifier::Integer_mul(Assembler* assembler) { | 537 void Intrinsifier::Integer_mul(Assembler* assembler) { |
| 536 return Integer_mulFromInteger(assembler); | 538 Integer_mulFromInteger(assembler); |
| 537 } | 539 } |
| 538 | 540 |
| 539 | 541 |
| 540 // Optimizations: | 542 // Optimizations: |
| 541 // - result is 0 if: | 543 // - result is 0 if: |
| 542 // - left is 0 | 544 // - left is 0 |
| 543 // - left equals right | 545 // - left equals right |
| 544 // - result is left if | 546 // - result is left if |
| 545 // - left > 0 && left < right | 547 // - left > 0 && left < right |
| 546 // R1: Tagged left (dividend). | 548 // R1: Tagged left (dividend). |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 588 // res = left % right; | 590 // res = left % right; |
| 589 // if (res < 0) { | 591 // if (res < 0) { |
| 590 // if (right < 0) { | 592 // if (right < 0) { |
| 591 // res = res - right; | 593 // res = res - right; |
| 592 // } else { | 594 // } else { |
| 593 // res = res + right; | 595 // res = res + right; |
| 594 // } | 596 // } |
| 595 // } | 597 // } |
| 596 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 598 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
| 597 // Check to see if we have integer division | 599 // Check to see if we have integer division |
| 598 Label fall_through, subtract; | 600 Label fall_through; |
| 599 __ ldr(R1, Address(SP, + 0 * kWordSize)); | 601 __ ldr(R1, Address(SP, + 0 * kWordSize)); |
| 600 __ ldr(R0, Address(SP, + 1 * kWordSize)); | 602 __ ldr(R0, Address(SP, + 1 * kWordSize)); |
| 601 __ orr(TMP, R0, ShifterOperand(R1)); | 603 __ orr(TMP, R0, ShifterOperand(R1)); |
| 602 __ tst(TMP, ShifterOperand(kSmiTagMask)); | 604 __ tst(TMP, ShifterOperand(kSmiTagMask)); |
| 603 __ b(&fall_through, NE); | 605 __ b(&fall_through, NE); |
| 604 // R1: Tagged left (dividend). | 606 // R1: Tagged left (dividend). |
| 605 // R0: Tagged right (divisor). | 607 // R0: Tagged right (divisor). |
| 606 // Check if modulo by zero -> exception thrown in main function. | 608 // Check if modulo by zero -> exception thrown in main function. |
| 607 __ cmp(R0, ShifterOperand(0)); | 609 __ cmp(R0, ShifterOperand(0)); |
| 608 __ b(&fall_through, EQ); | 610 __ b(&fall_through, EQ); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 663 | 665 |
| 664 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 666 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 665 __ and_(R0, R0, ShifterOperand(R1)); | 667 __ and_(R0, R0, ShifterOperand(R1)); |
| 666 | 668 |
| 667 __ Ret(); | 669 __ Ret(); |
| 668 __ Bind(&fall_through); | 670 __ Bind(&fall_through); |
| 669 } | 671 } |
| 670 | 672 |
| 671 | 673 |
| 672 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | 674 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { |
| 673 return Integer_bitAndFromInteger(assembler); | 675 Integer_bitAndFromInteger(assembler); |
| 674 } | 676 } |
| 675 | 677 |
| 676 | 678 |
| 677 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | 679 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { |
| 678 Label fall_through; | 680 Label fall_through; |
| 679 | 681 |
| 680 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 682 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 681 __ orr(R0, R0, ShifterOperand(R1)); | 683 __ orr(R0, R0, ShifterOperand(R1)); |
| 682 | 684 |
| 683 __ Ret(); | 685 __ Ret(); |
| 684 __ Bind(&fall_through); | 686 __ Bind(&fall_through); |
| 685 } | 687 } |
| 686 | 688 |
| 687 | 689 |
| 688 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | 690 void Intrinsifier::Integer_bitOr(Assembler* assembler) { |
| 689 return Integer_bitOrFromInteger(assembler); | 691 Integer_bitOrFromInteger(assembler); |
| 690 } | 692 } |
| 691 | 693 |
| 692 | 694 |
| 693 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | 695 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { |
| 694 Label fall_through; | 696 Label fall_through; |
| 695 | 697 |
| 696 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 698 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 697 __ eor(R0, R0, ShifterOperand(R1)); | 699 __ eor(R0, R0, ShifterOperand(R1)); |
| 698 | 700 |
| 699 __ Ret(); | 701 __ Ret(); |
| 700 __ Bind(&fall_through); | 702 __ Bind(&fall_through); |
| 701 } | 703 } |
| 702 | 704 |
| 703 | 705 |
| 704 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | 706 void Intrinsifier::Integer_bitXor(Assembler* assembler) { |
| 705 return Integer_bitXorFromInteger(assembler); | 707 Integer_bitXorFromInteger(assembler); |
| 706 } | 708 } |
| 707 | 709 |
| 708 | 710 |
| 709 void Intrinsifier::Integer_shl(Assembler* assembler) { | 711 void Intrinsifier::Integer_shl(Assembler* assembler) { |
| 710 ASSERT(kSmiTagShift == 1); | 712 ASSERT(kSmiTagShift == 1); |
| 711 ASSERT(kSmiTag == 0); | 713 ASSERT(kSmiTag == 0); |
| 712 Label fall_through; | 714 Label fall_through; |
| 713 | 715 |
| 714 TestBothArgumentsSmis(assembler, &fall_through); | 716 TestBothArgumentsSmis(assembler, &fall_through); |
| 715 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); | 717 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 834 __ cmp(R2, ShifterOperand(R6)); // Compare left lo, right lo. | 836 __ cmp(R2, ShifterOperand(R6)); // Compare left lo, right lo. |
| 835 __ b(&is_false, lo_false_cond); | 837 __ b(&is_false, lo_false_cond); |
| 836 // Else is true. | 838 // Else is true. |
| 837 __ b(&is_true); | 839 __ b(&is_true); |
| 838 | 840 |
| 839 __ Bind(&fall_through); | 841 __ Bind(&fall_through); |
| 840 } | 842 } |
| 841 | 843 |
| 842 | 844 |
| 843 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | 845 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { |
| 844 return CompareIntegers(assembler, LT); | 846 CompareIntegers(assembler, LT); |
| 845 } | 847 } |
| 846 | 848 |
| 847 | 849 |
| 848 void Intrinsifier::Integer_lessThan(Assembler* assembler) { | 850 void Intrinsifier::Integer_lessThan(Assembler* assembler) { |
| 849 return Integer_greaterThanFromInt(assembler); | 851 Integer_greaterThanFromInt(assembler); |
| 850 } | 852 } |
| 851 | 853 |
| 852 | 854 |
| 853 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { | 855 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { |
| 854 return CompareIntegers(assembler, GT); | 856 CompareIntegers(assembler, GT); |
| 855 } | 857 } |
| 856 | 858 |
| 857 | 859 |
| 858 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { | 860 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { |
| 859 return CompareIntegers(assembler, LE); | 861 CompareIntegers(assembler, LE); |
| 860 } | 862 } |
| 861 | 863 |
| 862 | 864 |
| 863 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { | 865 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { |
| 864 return CompareIntegers(assembler, GE); | 866 CompareIntegers(assembler, GE); |
| 865 } | 867 } |
| 866 | 868 |
| 867 | 869 |
| 868 // This is called for Smi, Mint and Bigint receivers. The right argument | 870 // This is called for Smi, Mint and Bigint receivers. The right argument |
| 869 // can be Smi, Mint, Bigint or double. | 871 // can be Smi, Mint, Bigint or double. |
| 870 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 872 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
| 871 Label fall_through, true_label, check_for_mint; | 873 Label fall_through, true_label, check_for_mint; |
| 872 // For integer receiver '===' check first. | 874 // For integer receiver '===' check first. |
| 873 __ ldr(R0, Address(SP, 0 * kWordSize)); | 875 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 874 __ ldr(R1, Address(SP, 1 * kWordSize)); | 876 __ ldr(R1, Address(SP, 1 * kWordSize)); |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 912 __ b(&fall_through, NE); | 914 __ b(&fall_through, NE); |
| 913 __ LoadObject(R0, Bool::False()); | 915 __ LoadObject(R0, Bool::False()); |
| 914 __ Ret(); | 916 __ Ret(); |
| 915 // TODO(srdjan): Implement Mint == Mint comparison. | 917 // TODO(srdjan): Implement Mint == Mint comparison. |
| 916 | 918 |
| 917 __ Bind(&fall_through); | 919 __ Bind(&fall_through); |
| 918 } | 920 } |
| 919 | 921 |
| 920 | 922 |
| 921 void Intrinsifier::Integer_equal(Assembler* assembler) { | 923 void Intrinsifier::Integer_equal(Assembler* assembler) { |
| 922 return Integer_equalToInteger(assembler); | 924 Integer_equalToInteger(assembler); |
| 923 } | 925 } |
| 924 | 926 |
| 925 | 927 |
| 926 void Intrinsifier::Integer_sar(Assembler* assembler) { | 928 void Intrinsifier::Integer_sar(Assembler* assembler) { |
| 927 Label fall_through; | 929 Label fall_through; |
| 928 | 930 |
| 929 TestBothArgumentsSmis(assembler, &fall_through); | 931 TestBothArgumentsSmis(assembler, &fall_through); |
| 930 // Shift amount in R0. Value to shift in R1. | 932 // Shift amount in R0. Value to shift in R1. |
| 931 | 933 |
| 932 // Fall through if shift amount is negative. | 934 // Fall through if shift amount is negative. |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1138 __ Ret(); | 1140 __ Ret(); |
| 1139 } | 1141 } |
| 1140 } | 1142 } |
| 1141 | 1143 |
| 1142 | 1144 |
| 1143 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | 1145 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { |
| 1144 if (TargetCPUFeatures::vfp_supported()) { | 1146 if (TargetCPUFeatures::vfp_supported()) { |
| 1145 Label is_false, is_true, is_zero; | 1147 Label is_false, is_true, is_zero; |
| 1146 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1148 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1147 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1149 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
| 1148 __ LoadDImmediate(D1, 0.0, R1); | 1150 __ vcmpdz(D0); |
| 1149 __ vcmpd(D0, D1); | |
| 1150 __ vmstat(); | 1151 __ vmstat(); |
| 1151 __ b(&is_false, VS); // NaN -> false. | 1152 __ b(&is_false, VS); // NaN -> false. |
| 1152 __ b(&is_zero, EQ); // Check for negative zero. | 1153 __ b(&is_zero, EQ); // Check for negative zero. |
| 1153 __ b(&is_false, CS); // >= 0 -> false. | 1154 __ b(&is_false, CS); // >= 0 -> false. |
| 1154 | 1155 |
| 1155 __ Bind(&is_true); | 1156 __ Bind(&is_true); |
| 1156 __ LoadObject(R0, Bool::True()); | 1157 __ LoadObject(R0, Bool::True()); |
| 1157 __ Ret(); | 1158 __ Ret(); |
| 1158 | 1159 |
| 1159 __ Bind(&is_false); | 1160 __ Bind(&is_false); |
| (...skipping 503 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1663 Isolate* isolate = Isolate::Current(); | 1664 Isolate* isolate = Isolate::Current(); |
| 1664 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); | 1665 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); |
| 1665 // Set return value to Isolate::current_tag_. | 1666 // Set return value to Isolate::current_tag_. |
| 1666 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 1667 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
| 1667 __ Ret(); | 1668 __ Ret(); |
| 1668 } | 1669 } |
| 1669 | 1670 |
| 1670 } // namespace dart | 1671 } // namespace dart |
| 1671 | 1672 |
| 1672 #endif // defined TARGET_ARCH_ARM | 1673 #endif // defined TARGET_ARCH_ARM |
| OLD | NEW |