Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 391 // either r0 or r1 is not a number (not smi and not heap number object) the | 391 // either r0 or r1 is not a number (not smi and not heap number object) the |
| 392 // not_number label is jumped to with r0 and r1 intact. | 392 // not_number label is jumped to with r0 and r1 intact. |
| 393 static void LoadOperands(MacroAssembler* masm, | 393 static void LoadOperands(MacroAssembler* masm, |
| 394 FloatingPointHelper::Destination destination, | 394 FloatingPointHelper::Destination destination, |
| 395 Register heap_number_map, | 395 Register heap_number_map, |
| 396 Register scratch1, | 396 Register scratch1, |
| 397 Register scratch2, | 397 Register scratch2, |
| 398 Label* not_number); | 398 Label* not_number); |
| 399 | 399 |
| 400 // Loads the number from object into dst as a 32-bit integer if possible. If | 400 // Loads the number from object into dst as a 32-bit integer if possible. If |
| 401 // the object is not a 32-bit integer control continues at the label | 401 // the object cannot be converted to a 32-bit integer control continues at |
| 402 // not_int32. If VFP is supported double_scratch is used but not scratch2. | 402 // the label not_int32. If VFP is supported double_scratch is used |
| 403 // but not scratch2. | |
| 404 // Floating point value in the 32bit integer range will be rounded | |
| 405 // to an integer. | |
| 403 static void LoadNumberAsInteger(MacroAssembler* masm, | 406 static void LoadNumberAsInteger(MacroAssembler* masm, |
| 404 Register object, | 407 Register object, |
| 405 Register dst, | 408 Register dst, |
| 406 Register heap_number_map, | 409 Register heap_number_map, |
| 407 Register scratch1, | 410 Register scratch1, |
| 408 Register scratch2, | 411 Register scratch2, |
| 409 DwVfpRegister double_scratch, | 412 DwVfpRegister double_scratch, |
| 410 Label* not_int32); | 413 Label* not_int32); |
| 411 | 414 |
| 415 // Load the number from object into double_dst in the double format. | |
| 416 // Control will jump to not_int32 if the value cannot be exactly represented | |
| 417 // by a 32bit integer. | |
| 418 // Floating point value in the 32bit integer range that are not exact integer | |
| 419 // won't be loaded. | |
| 420 static void LoadNumberAsInt32Double(MacroAssembler* masm, | |
| 421 Register object, | |
| 422 Destination destination, | |
| 423 DwVfpRegister double_dst, | |
| 424 Register dst1, | |
| 425 Register dst2, | |
| 426 Register heap_number_map, | |
| 427 Register scratch1, | |
| 428 Register scratch2, | |
| 429 SwVfpRegister single_scratch, | |
| 430 Label* not_int32); | |
| 431 | |
| 432 // Loads the number from object into dst as a 32bit integer. | |
| 433 // Control will jump to not_int32 if the object cannot be exactly represented | |
| 434 // by a 32bit integer. | |
| 435 // Floating point value in the 32bit integer range that are not exact integer | |
| 436 // won't be converted. | |
| 437 // scratch3 is not used when VFP3 is supported. | |
| 438 static void LoadNumberAsInt32(MacroAssembler* masm, | |
| 439 Register object, | |
| 440 Register dst, | |
| 441 Register heap_number_map, | |
| 442 Register scratch1, | |
| 443 Register scratch2, | |
| 444 Register scratch3, | |
| 445 DwVfpRegister double_scratch, | |
| 446 Label* not_int32); | |
| 447 | |
| 448 // Generate non VFP3 code to check if a double can be exactly represented by a | |
| 449 // 32bit integer. This does not check for 0 or -0, which need | |
| 450 // to be checked for separately. | |
| 451 // Control jumps to not_int32 if the value is not a 32bit integer, and falls | |
| 452 // through otherwise. | |
| 453 // src1 and src2 will be cloberred. | |
| 454 // | |
| 455 // Expected input: | |
| 456 // - src1: higher (exponent) part of the double value. | |
| 457 // - src2: lower (mantissa) part of the double value. | |
| 458 // Output status: | |
| 459 // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) | |
| 460 // - src2: contains 1. | |
| 461 // - other registers are clobbered. | |
| 462 static void DoubleIs32BitInteger(MacroAssembler* masm, | |
| 463 Register src1, | |
| 464 Register src2, | |
| 465 Register dst, | |
| 466 Register scratch, | |
| 467 Label* not_int32); | |
| 468 | |
| 469 // Generates code to call a C function to do a double operation using core | |
| 470 // registers. (Used when VFP3 is not supported.) | |
| 471 // This code never falls through, but returns with a heap number containing | |
| 472 // the result in r0. | |
| 473 // Register heapnumber_result must be a heap number in which the | |
| 474 // result of the operation will be stored. | |
| 475 // Requires the following layout on entry: | |
| 476 // r0: Left value (least significant part of mantissa). | |
| 477 // r1: Left value (sign, exponent, top of mantissa). | |
| 478 // r2: Right value (least significant part of mantissa). | |
| 479 // r3: Right value (sign, exponent, top of mantissa). | |
| 480 static void CallCCodeForDoubleOperation(MacroAssembler* masm, | |
| 481 Token::Value op, | |
| 482 Register heap_number_result, | |
| 483 Register scratch); | |
| 484 | |
| 412 private: | 485 private: |
| 413 static void LoadNumber(MacroAssembler* masm, | 486 static void LoadNumber(MacroAssembler* masm, |
| 414 FloatingPointHelper::Destination destination, | 487 FloatingPointHelper::Destination destination, |
| 415 Register object, | 488 Register object, |
| 416 DwVfpRegister dst, | 489 DwVfpRegister dst, |
| 417 Register dst1, | 490 Register dst1, |
| 418 Register dst2, | 491 Register dst2, |
| 419 Register heap_number_map, | 492 Register heap_number_map, |
| 420 Register scratch1, | 493 Register scratch1, |
| 421 Register scratch2, | 494 Register scratch2, |
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 553 __ b(ne, not_int32); | 626 __ b(ne, not_int32); |
| 554 __ ConvertToInt32( | 627 __ ConvertToInt32( |
| 555 object, dst, scratch1, scratch2, double_scratch, not_int32); | 628 object, dst, scratch1, scratch2, double_scratch, not_int32); |
| 556 __ jmp(&done); | 629 __ jmp(&done); |
| 557 __ bind(&is_smi); | 630 __ bind(&is_smi); |
| 558 __ SmiUntag(dst, object); | 631 __ SmiUntag(dst, object); |
| 559 __ bind(&done); | 632 __ bind(&done); |
| 560 } | 633 } |
| 561 | 634 |
| 562 | 635 |
| 636 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | |
| 637 Register object, | |
| 638 Destination destination, | |
| 639 DwVfpRegister double_dst, | |
| 640 Register dst1, | |
| 641 Register dst2, | |
| 642 Register heap_number_map, | |
| 643 Register scratch1, | |
| 644 Register scratch2, | |
| 645 SwVfpRegister single_scratch, | |
| 646 Label* not_int32) { | |
| 647 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
| 648 ASSERT(!scratch1.is(scratch2)); | |
| 649 ASSERT(!heap_number_map.is(object) && | |
| 650 !heap_number_map.is(scratch1) && | |
| 651 !heap_number_map.is(scratch2)); | |
| 652 | |
| 653 Label done, obj_is_heap_number; | |
| 654 | |
| 655 __ JumpIfNotSmi(object, &obj_is_heap_number); | |
| 656 __ SmiUntag(scratch1, object); | |
| 657 if (CpuFeatures::IsSupported(VFP3)) { | |
| 658 CpuFeatures::Scope scope(VFP3); | |
| 659 __ vmov(single_scratch, scratch1); | |
| 660 __ vcvt_f64_s32(double_dst, single_scratch); | |
| 661 if (destination == kCoreRegisters) { | |
| 662 __ vmov(dst1, dst2, double_dst); | |
| 663 } | |
| 664 } else { | |
| 665 Label fewer_than_20_useful_bits; | |
| 666 // Expected output: | |
| 667 // | dst1 | dst2 | | |
| 668 // | s | exp | mantissa | | |
| 669 | |
| 670 // Check for zero. | |
| 671 __ cmp(scratch1, Operand(0)); | |
| 672 __ mov(dst1, scratch1); | |
| 673 __ mov(dst2, scratch1); | |
| 674 __ b(eq, &done); | |
| 675 | |
| 676 // Preload the sign of the value. | |
| 677 __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); | |
| 678 // Get the absolute value of the object (as an unsigned integer). | |
| 679 __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); | |
| 680 | |
| 681 // Get mantisssa[51:20]. | |
| 682 | |
| 683 // Get the position of the first set bit. | |
| 684 __ CountLeadingZeros(dst2, scratch1, scratch2); | |
| 685 __ rsb(dst2, dst2, Operand(31)); | |
| 686 | |
| 687 // Set the exponent. | |
| 688 __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); | |
| 689 __ Bfi(dst1, scratch2, scratch2, | |
| 690 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 691 | |
| 692 // Clear the first non null bit. | |
| 693 __ mov(scratch2, Operand(1)); | |
| 694 __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); | |
| 695 | |
| 696 __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
| 697 // Get the number of bits to set in the lower part of the mantissa. | |
| 698 __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | |
| 699 __ b(mi, &fewer_than_20_useful_bits); | |
| 700 // Set the higher 20 bits of the mantissa. | |
| 701 __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); | |
| 702 __ rsb(scratch2, scratch2, Operand(32)); | |
| 703 __ mov(dst2, Operand(scratch1, LSL, scratch2)); | |
| 704 __ b(&done); | |
| 705 | |
| 706 __ bind(&fewer_than_20_useful_bits); | |
| 707 __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
| 708 __ mov(scratch2, Operand(scratch1, LSL, scratch2)); | |
| 709 __ orr(dst1, dst1, scratch2); | |
| 710 // Set dst2 to 0. | |
| 711 __ mov(dst2, Operand(0)); | |
| 712 } | |
| 713 | |
| 714 __ b(&done); | |
| 715 | |
| 716 __ bind(&obj_is_heap_number); | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
Please change this label to obj_is_not_smi.
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 717 if (FLAG_debug_code) { | |
| 718 __ AbortIfNotRootValue(heap_number_map, | |
| 719 Heap::kHeapNumberMapRootIndex, | |
| 720 "HeapNumberMap register clobbered."); | |
| 721 } | |
| 722 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | |
| 723 | |
| 724 // Load the number. | |
| 725 if (CpuFeatures::IsSupported(VFP3)) { | |
| 726 CpuFeatures::Scope scope(VFP3); | |
| 727 // Load the double value. | |
| 728 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
| 729 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | |
| 730 | |
| 731 __ EmitVFPTruncate(kRoundToZero, | |
| 732 single_scratch, | |
| 733 double_dst, | |
| 734 scratch1, | |
| 735 scratch2, | |
| 736 kCheckForInexactConversion); | |
| 737 | |
| 738 // Jump to not_int32 if the operation did not succeed. | |
| 739 __ b(ne, not_int32); | |
| 740 | |
| 741 if (destination == kCoreRegisters) { | |
| 742 __ vmov(dst1, dst2, double_dst); | |
| 743 } | |
| 744 | |
| 745 } else { | |
| 746 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
| 747 // Load the double value in the destination registers.. | |
| 748 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 749 | |
| 750 // Check for 0 and -0. | |
| 751 __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); | |
| 752 __ orr(scratch1, scratch1, Operand(dst2)); | |
| 753 __ cmp(scratch1, Operand(0)); | |
| 754 __ b(eq, &done); | |
| 755 | |
| 756 // Check that the value can be exactly represented by a 32bit integer. | |
| 757 // Jump to not_int32 if that's not the case. | |
| 758 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); | |
| 759 | |
| 760 // dst1 and dst2 were trashed. Reload the double value. | |
| 761 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
| 762 } | |
| 763 | |
| 764 __ bind(&done); | |
| 765 } | |
| 766 | |
| 767 | |
| 768 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | |
| 769 Register object, | |
| 770 Register dst, | |
| 771 Register heap_number_map, | |
| 772 Register scratch1, | |
| 773 Register scratch2, | |
| 774 Register scratch3, | |
| 775 DwVfpRegister double_scratch, | |
| 776 Label* not_int32) { | |
| 777 ASSERT(!dst.is(object)); | |
| 778 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | |
| 779 ASSERT(!scratch1.is(scratch2) && | |
| 780 !scratch1.is(scratch3) && | |
| 781 !scratch2.is(scratch3)); | |
| 782 | |
| 783 Label done; | |
| 784 | |
| 785 // Untag the object in the destination register. | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
in -> into
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 786 __ SmiUntag(dst, object); | |
| 787 // Just return if the object is a smi. | |
| 788 __ JumpIfSmi(object, &done); | |
| 789 | |
| 790 if (FLAG_debug_code) { | |
| 791 __ AbortIfNotRootValue(heap_number_map, | |
| 792 Heap::kHeapNumberMapRootIndex, | |
| 793 "HeapNumberMap register clobbered."); | |
| 794 } | |
| 795 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | |
| 796 | |
| 797 // Object is a heap number. | |
| 798 // Convert the floating point value to a 32bit integer. | |
| 799 if (CpuFeatures::IsSupported(VFP3)) { | |
| 800 CpuFeatures::Scope scope(VFP3); | |
| 801 SwVfpRegister single_scratch = double_scratch.low(); | |
| 802 // Load the double value. | |
| 803 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
| 804 __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); | |
| 805 | |
| 806 __ EmitVFPTruncate(kRoundToZero, | |
| 807 single_scratch, | |
| 808 double_scratch, | |
| 809 scratch1, | |
| 810 scratch2, | |
| 811 kCheckForInexactConversion); | |
| 812 | |
| 813 // Jump to not_int32 if the operation did not succeed. | |
| 814 __ b(ne, not_int32); | |
| 815 // Get the result in the destination register. | |
| 816 __ vmov(dst, single_scratch); | |
| 817 | |
| 818 } else { | |
| 819 // Load the double value in the destination registers.. | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
.. -> .
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 820 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 821 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
| 822 | |
| 823 // Check for 0 and -0. | |
| 824 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); | |
| 825 __ orr(dst, scratch2, Operand(dst)); | |
| 826 __ cmp(dst, Operand(0)); | |
| 827 __ b(eq, &done); | |
| 828 | |
| 829 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); | |
| 830 | |
| 831 // Registers state after DoubleIs32BitInteger. | |
| 832 // dst: mantissa[51:20]. | |
| 833 // scratch2: 1 | |
| 834 | |
| 835 // Shift back the higher bits of the mantissa. | |
| 836 __ mov(dst, Operand(dst, LSR, scratch3)); | |
| 837 // Set the implicit first bit. | |
| 838 __ rsb(scratch3, scratch3, Operand(32)); | |
| 839 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); | |
| 840 // Set the sign. | |
| 841 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
| 842 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | |
| 843 __ rsb(dst, dst, Operand(0), LeaveCC, mi); | |
| 844 } | |
| 845 | |
| 846 __ bind(&done); | |
| 847 } | |
| 848 | |
| 849 | |
| 850 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | |
| 851 Register src1, | |
| 852 Register src2, | |
| 853 Register dst, | |
| 854 Register scratch, | |
| 855 Label* not_int32) { | |
| 856 // Get exponent alone in scratch. | |
| 857 __ Ubfx(scratch, | |
| 858 src1, | |
| 859 HeapNumber::kExponentShift, | |
| 860 HeapNumber::kExponentBits); | |
| 861 | |
| 862 // Substract the bias from the exponent. | |
| 863 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); | |
| 864 | |
| 865 // src1: higher (exponent) part of the double value. | |
| 866 // src2: lower (mantissa) part of the double value. | |
| 867 // scratch: unbiased exponent. | |
| 868 | |
| 869 // Fast cases. Check for obvious non 32bit integer values. | |
| 870 // Negative exponent cannot yield 32bit integers. | |
| 871 __ b(mi, not_int32); | |
| 872 // Exponent greater than 31 cannot yield 32bit integers. | |
| 873 // Also, a positive value with an exponent equal to 31 is outside of the | |
| 874 // signed 32bit integer range. | |
| 875 __ tst(src1, Operand(HeapNumber::kSignMask)); | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
Maybe add a bit more commenting here would be nice
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 876 __ cmp(scratch, Operand(30), eq); | |
| 877 __ cmp(scratch, Operand(31), ne); | |
| 878 __ b(gt, not_int32); | |
| 879 // - Bits [21:0] in the mantissa are not null. | |
| 880 __ tst(src2, Operand(0x3fffff)); | |
| 881 __ b(ne, not_int32); | |
| 882 | |
| 883 // Otherwise the exponent needs to be big enough to shift left all the | |
| 884 // non zero bits left. So we need the (30 - exponent) last bits of the | |
| 885 // 31 higher bits of the mantissa to be null. | |
| 886 // Because bits [21:20] are null, we can check instead that the | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
20 -> 0?
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 887 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. | |
| 888 | |
| 889 // Get the 32 higher bits of the mantissa in dst. | |
| 890 __ Ubfx(dst, src2, HeapNumber::kMantissaBitsInTopWord, 12); | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
12 -> 32 - HeapNumber::kMantissaBitsInTopWord
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 891 __ orr(dst, | |
| 892 dst, | |
| 893 Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | |
| 894 | |
| 895 // Create the mask and test the lower bits (of the higher bits). | |
| 896 __ rsb(scratch, scratch, Operand(32)); | |
| 897 __ mov(src2, Operand(1)); | |
| 898 __ mov(src1, Operand(src2, LSL, scratch)); | |
| 899 __ sub(src1, src1, Operand(1)); | |
| 900 __ tst(dst, src1); | |
| 901 __ b(ne, not_int32); | |
| 902 } | |
| 903 | |
| 904 | |
| 905 void FloatingPointHelper::CallCCodeForDoubleOperation( | |
| 906 MacroAssembler* masm, | |
| 907 Token::Value op, | |
| 908 Register heap_number_result, | |
| 909 Register scratch) { | |
| 910 // Using core registers: | |
| 911 // r0: Left value (least significant part of mantissa). | |
| 912 // r1: Left value (sign, exponent, top of mantissa). | |
| 913 // r2: Right value (least significant part of mantissa). | |
| 914 // r3: Right value (sign, exponent, top of mantissa). | |
| 915 | |
| 916 // Assert that heap_number_result is callee-saved. | |
| 917 // We currently always use r5 to pass it. | |
| 918 ASSERT(heap_number_result.is(r5)); | |
| 919 | |
| 920 // Push the current return address before the C call. Return will be | |
| 921 // through pop(pc) below. | |
| 922 __ push(lr); | |
| 923 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | |
| 924 // Call C routine that may not cause GC or other trouble. | |
| 925 __ CallCFunction(ExternalReference::double_fp_operation(op), 4); | |
| 926 // Store answer in the overwritable heap number. | |
| 927 #if !defined(USE_ARM_EABI) | |
| 928 // Double returned in fp coprocessor register 0 and 1, encoded as | |
| 929 // register cr8. Offsets must be divisible by 4 for coprocessor so we | |
| 930 // need to substract the tag from heap_number_result. | |
| 931 __ sub(scratch, heap_number_result, Operand(kHeapObjectTag)); | |
| 932 __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset)); | |
| 933 #else | |
| 934 // Double returned in registers 0 and 1. | |
| 935 __ Strd(r0, r1, FieldMemOperand(heap_number_result, | |
| 936 HeapNumber::kValueOffset)); | |
| 937 #endif | |
| 938 // Place heap_number_result in r0 and return to the pushed return address. | |
| 939 __ mov(r0, Operand(heap_number_result)); | |
| 940 __ pop(pc); | |
| 941 } | |
| 942 | |
| 563 | 943 |
| 564 // See comment for class. | 944 // See comment for class. |
| 565 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 945 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
| 566 Label max_negative_int; | 946 Label max_negative_int; |
| 567 // the_int_ has the answer which is a signed int32 but not a Smi. | 947 // the_int_ has the answer which is a signed int32 but not a Smi. |
| 568 // We test for the special value that has a different exponent. This test | 948 // We test for the special value that has a different exponent. This test |
| 569 // has the neat side effect of setting the flags according to the sign. | 949 // has the neat side effect of setting the flags according to the sign. |
| 570 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 950 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 571 __ cmp(the_int_, Operand(0x80000000u)); | 951 __ cmp(the_int_, Operand(0x80000000u)); |
| 572 __ b(eq, &max_negative_int); | 952 __ b(eq, &max_negative_int); |
| (...skipping 2124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2697 break; | 3077 break; |
| 2698 default: | 3078 default: |
| 2699 UNREACHABLE(); | 3079 UNREACHABLE(); |
| 2700 } | 3080 } |
| 2701 | 3081 |
| 2702 __ sub(r0, result, Operand(kHeapObjectTag)); | 3082 __ sub(r0, result, Operand(kHeapObjectTag)); |
| 2703 __ vstr(d5, r0, HeapNumber::kValueOffset); | 3083 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 2704 __ add(r0, r0, Operand(kHeapObjectTag)); | 3084 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 2705 __ Ret(); | 3085 __ Ret(); |
| 2706 } else { | 3086 } else { |
| 2707 // Using core registers: | 3087 // Call the C function to handle the double operation. |
| 2708 // r0: Left value (least significant part of mantissa). | 3088 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
| 2709 // r1: Left value (sign, exponent, top of mantissa). | 3089 op_, |
| 2710 // r2: Right value (least significant part of mantissa). | 3090 result, |
| 2711 // r3: Right value (sign, exponent, top of mantissa). | 3091 scratch1); |
| 2712 | |
| 2713 // Push the current return address before the C call. Return will be | |
| 2714 // through pop(pc) below. | |
| 2715 __ push(lr); | |
| 2716 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. | |
| 2717 // Call C routine that may not cause GC or other trouble. r5 is callee | |
| 2718 // save. | |
| 2719 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | |
| 2720 // Store answer in the overwritable heap number. | |
| 2721 #if !defined(USE_ARM_EABI) | |
| 2722 // Double returned in fp coprocessor register 0 and 1, encoded as | |
| 2723 // register cr8. Offsets must be divisible by 4 for coprocessor so we | |
| 2724 // need to substract the tag from r5. | |
| 2725 __ sub(scratch1, result, Operand(kHeapObjectTag)); | |
| 2726 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | |
| 2727 #else | |
| 2728 // Double returned in registers 0 and 1. | |
| 2729 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
| 2730 #endif | |
| 2731 // Plase result in r0 and return to the pushed return address. | |
| 2732 __ mov(r0, Operand(result)); | |
| 2733 __ pop(pc); | |
| 2734 } | 3092 } |
| 2735 break; | 3093 break; |
| 2736 } | 3094 } |
| 2737 case Token::BIT_OR: | 3095 case Token::BIT_OR: |
| 2738 case Token::BIT_XOR: | 3096 case Token::BIT_XOR: |
| 2739 case Token::BIT_AND: | 3097 case Token::BIT_AND: |
| 2740 case Token::SAR: | 3098 case Token::SAR: |
| 2741 case Token::SHR: | 3099 case Token::SHR: |
| 2742 case Token::SHL: { | 3100 case Token::SHL: { |
| 2743 if (smi_operands) { | 3101 if (smi_operands) { |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 2769 __ orr(r2, r3, Operand(r2)); | 3127 __ orr(r2, r3, Operand(r2)); |
| 2770 break; | 3128 break; |
| 2771 case Token::BIT_XOR: | 3129 case Token::BIT_XOR: |
| 2772 __ eor(r2, r3, Operand(r2)); | 3130 __ eor(r2, r3, Operand(r2)); |
| 2773 break; | 3131 break; |
| 2774 case Token::BIT_AND: | 3132 case Token::BIT_AND: |
| 2775 __ and_(r2, r3, Operand(r2)); | 3133 __ and_(r2, r3, Operand(r2)); |
| 2776 break; | 3134 break; |
| 2777 case Token::SAR: | 3135 case Token::SAR: |
| 2778 // Use only the 5 least significant bits of the shift count. | 3136 // Use only the 5 least significant bits of the shift count. |
| 2779 __ and_(r2, r2, Operand(0x1f)); | |
| 2780 __ GetLeastBitsFromInt32(r2, r2, 5); | 3137 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2781 __ mov(r2, Operand(r3, ASR, r2)); | 3138 __ mov(r2, Operand(r3, ASR, r2)); |
| 2782 break; | 3139 break; |
| 2783 case Token::SHR: | 3140 case Token::SHR: |
| 2784 // Use only the 5 least significant bits of the shift count. | 3141 // Use only the 5 least significant bits of the shift count. |
| 2785 __ GetLeastBitsFromInt32(r2, r2, 5); | 3142 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2786 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 3143 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 2787 // SHR is special because it is required to produce a positive answer. | 3144 // SHR is special because it is required to produce a positive answer. |
| 2788 // The code below for writing into heap numbers isn't capable of | 3145 // The code below for writing into heap numbers isn't capable of |
| 2789 // writing the register as an unsigned int so we go to slow case if we | 3146 // writing the register as an unsigned int so we go to slow case if we |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2914 // Try to add arguments as strings, otherwise, transition to the generic | 3271 // Try to add arguments as strings, otherwise, transition to the generic |
| 2915 // TRBinaryOpIC type. | 3272 // TRBinaryOpIC type. |
| 2916 GenerateAddStrings(masm); | 3273 GenerateAddStrings(masm); |
| 2917 GenerateTypeTransition(masm); | 3274 GenerateTypeTransition(masm); |
| 2918 } | 3275 } |
| 2919 | 3276 |
| 2920 | 3277 |
| 2921 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 3278 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 2922 ASSERT(operands_type_ == TRBinaryOpIC::INT32); | 3279 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
| 2923 | 3280 |
| 3281 Register left = r1; | |
| 3282 Register right = r0; | |
| 3283 Register scratch1 = r7; | |
| 3284 Register scratch2 = r9; | |
| 3285 DwVfpRegister double_scratch = d0; | |
| 3286 SwVfpRegister single_scratch = s3; | |
| 3287 | |
| 3288 Register heap_number_result = no_reg; | |
| 3289 Register heap_number_map = r6; | |
| 3290 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 3291 | |
| 3292 Label call_runtime, restore_left_and_call_runtime; | |
| 3293 // Labels for type transition, used for wrong input or output types. | |
| 3294 // Both label are currently actually bound to the same position. We use two | |
| 3295 // different label to differentiate the cause leading to type transition. | |
| 3296 Label input_transition, output_transition; | |
| 3297 | |
| 3298 // Smi-smi fast case. | |
| 3299 Label skip; | |
| 3300 __ orr(scratch1, left, right); | |
| 3301 __ JumpIfNotSmi(scratch1, &skip); | |
| 3302 GenerateSmiSmiOperation(masm); | |
| 3303 // Fall through if the result is not a smi. | |
| 3304 __ bind(&skip); | |
| 3305 | |
| 3306 switch (op_) { | |
| 3307 case Token::ADD: | |
| 3308 case Token::SUB: | |
| 3309 case Token::MUL: | |
| 3310 case Token::DIV: | |
| 3311 case Token::MOD: { | |
| 3312 // Load both operands and chech that they are 32bit integer. | |
| 3313 // Jump to type transition if they are not. | |
| 3314 FloatingPointHelper::Destination destination = | |
| 3315 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | |
| 3316 FloatingPointHelper::kVFPRegisters : | |
| 3317 FloatingPointHelper::kCoreRegisters; | |
| 3318 | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
Maybe add a comment here that r0 and r1 are preser
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 3319 FloatingPointHelper::LoadNumberAsInt32Double(masm, | |
| 3320 right, | |
| 3321 destination, | |
| 3322 d7, | |
| 3323 r2, | |
| 3324 r3, | |
| 3325 heap_number_map, | |
| 3326 scratch1, | |
| 3327 scratch2, | |
| 3328 s0, | |
| 3329 &input_transition); | |
| 3330 FloatingPointHelper::LoadNumberAsInt32Double(masm, | |
| 3331 left, | |
| 3332 destination, | |
| 3333 d6, | |
| 3334 r4, | |
| 3335 r5, | |
| 3336 heap_number_map, | |
| 3337 scratch1, | |
| 3338 scratch2, | |
| 3339 s0, | |
| 3340 &input_transition); | |
| 3341 | |
| 3342 if (destination == FloatingPointHelper::kVFPRegisters) { | |
| 3343 CpuFeatures::Scope scope(VFP3); | |
| 3344 Label return_heap_number; | |
| 3345 switch (op_) { | |
| 3346 case Token::ADD: | |
| 3347 __ vadd(d5, d6, d7); | |
| 3348 break; | |
| 3349 case Token::SUB: | |
| 3350 __ vsub(d5, d6, d7); | |
| 3351 break; | |
| 3352 case Token::MUL: | |
| 3353 __ vmul(d5, d6, d7); | |
| 3354 break; | |
| 3355 case Token::DIV: | |
| 3356 __ vdiv(d5, d6, d7); | |
| 3357 break; | |
| 3358 default: | |
| 3359 UNREACHABLE(); | |
| 3360 } | |
| 3361 | |
| 3362 if (op_ != Token::DIV) { | |
| 3363 // These operations produce an integer result. | |
| 3364 // Try to return a smi if we can. | |
| 3365 // Otherwise return a heap number if allowed, or jump to type | |
| 3366 // transition. | |
| 3367 | |
| 3368 __ EmitVFPTruncate(kRoundToZero, | |
| 3369 single_scratch, | |
| 3370 d5, | |
| 3371 scratch1, | |
| 3372 scratch2); | |
| 3373 | |
| 3374 if (result_type_ <= TRBinaryOpIC::INT32) { | |
| 3375 // If the ne condition is set, result does | |
| 3376 // not fit in a 32bit integer. | |
| 3377 __ b(ne, &output_transition); | |
| 3378 } | |
| 3379 | |
| 3380 // Check if the result fits in a smi. | |
| 3381 __ vmov(scratch1, single_scratch); | |
| 3382 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | |
| 3383 // If not try to return a heap number. | |
| 3384 __ b(mi, &return_heap_number); | |
| 3385 // Tag the result and return. | |
| 3386 __ SmiTag(r0, scratch1); | |
| 3387 __ Ret(); | |
| 3388 } | |
| 3389 | |
| 3390 if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER | |
| 3391 : TRBinaryOpIC::INT32) { | |
| 3392 __ bind(&return_heap_number); | |
| 3393 // We are using vfp registers so r5 is available. | |
| 3394 heap_number_result = r5; | |
| 3395 GenerateHeapResultAllocation(masm, | |
| 3396 heap_number_result, | |
| 3397 heap_number_map, | |
| 3398 scratch1, | |
| 3399 scratch2, | |
| 3400 &call_runtime); | |
| 3401 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | |
| 3402 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
| 3403 __ mov(r0, heap_number_result); | |
| 3404 __ Ret(); | |
| 3405 } | |
| 3406 | |
| 3407 // A DIV operation expecting an integer result falls through | |
| 3408 // to type transition. | |
| 3409 | |
| 3410 } else { | |
| 3411 // We preserved r0 and r1 to be able to call runtime. | |
| 3412 // Save the left value on the stack. | |
| 3413 __ Push(r5, r4); | |
| 3414 | |
| 3415 // Allocate a heap number to store the result. | |
| 3416 heap_number_result = r5; | |
| 3417 GenerateHeapResultAllocation(masm, | |
| 3418 heap_number_result, | |
| 3419 heap_number_map, | |
| 3420 scratch1, | |
| 3421 scratch2, | |
| 3422 &call_runtime); | |
| 3423 | |
| 3424 // Load the left value from the value saved on the stack. | |
| 3425 __ Pop(r1, r0); | |
| 3426 | |
| 3427 // Call the C function to handle the double operation. | |
| 3428 FloatingPointHelper::CallCCodeForDoubleOperation( | |
| 3429 masm, op_, heap_number_result, scratch1); | |
| 3430 } | |
| 3431 | |
| 3432 break; | |
| 3433 } | |
| 3434 | |
| 3435 case Token::BIT_OR: | |
| 3436 case Token::BIT_XOR: | |
| 3437 case Token::BIT_AND: | |
| 3438 case Token::SAR: | |
| 3439 case Token::SHR: | |
| 3440 case Token::SHL: { | |
| 3441 Label return_heap_number; | |
| 3442 Register scratch3 = r5; | |
| 3443 // Convert operands to 32-bit integers. Right in r2 and left in r3. | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
Add "Preserve r0 and r1 for the runtime call to th
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 3444 FloatingPointHelper::LoadNumberAsInt32(masm, | |
| 3445 left, | |
| 3446 r3, | |
| 3447 heap_number_map, | |
| 3448 scratch1, | |
| 3449 scratch2, | |
| 3450 scratch3, | |
| 3451 d0, | |
| 3452 &input_transition); | |
| 3453 FloatingPointHelper::LoadNumberAsInt32(masm, | |
| 3454 right, | |
| 3455 r2, | |
| 3456 heap_number_map, | |
| 3457 scratch1, | |
| 3458 scratch2, | |
| 3459 scratch3, | |
| 3460 d0, | |
| 3461 &input_transition); | |
| 3462 | |
| 3463 // The ECMA-262 standard specifies that, for shift operations, only the | |
| 3464 // 5 least significant bits of the shift value should be used. | |
| 3465 switch (op_) { | |
| 3466 case Token::BIT_OR: | |
| 3467 __ orr(r2, r3, Operand(r2)); | |
| 3468 break; | |
| 3469 case Token::BIT_XOR: | |
| 3470 __ eor(r2, r3, Operand(r2)); | |
| 3471 break; | |
| 3472 case Token::BIT_AND: | |
| 3473 __ and_(r2, r3, Operand(r2)); | |
| 3474 break; | |
| 3475 case Token::SAR: | |
| 3476 __ and_(r2, r2, Operand(0x1f)); | |
| 3477 __ mov(r2, Operand(r3, ASR, r2)); | |
| 3478 break; | |
| 3479 case Token::SHR: | |
| 3480 __ and_(r2, r2, Operand(0x1f)); | |
| 3481 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
| 3482 // SHR is special because it is required to produce a positive answer. | |
| 3483 // We only get a negative result if the shift value (r2) is 0. | |
| 3484 // This result cannot be respresented as a signed 32bit integer, try | |
| 3485 // to return a heap number if we can. | |
| 3486 // The non vfp3 code does not support this special case, so jump to | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
Can we fix WriteInt32ToHeapNumberStub to avoid a r
Søren Thygesen Gjesse
2011/03/02 09:33:08
Postponed.
| |
| 3487 // runtime if we don't support it. | |
| 3488 if (CpuFeatures::IsSupported(VFP3)) { | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
No need to actually enter VFP3 scope here, as the
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
| |
| 3489 CpuFeatures::Scope scope(VFP3); | |
| 3490 __ b(mi, | |
| 3491 (result_type_ <= TRBinaryOpIC::INT32) ? &output_transition | |
| 3492 : &return_heap_number); | |
| 3493 } else { | |
| 3494 __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &output_transition | |
| 3495 : &call_runtime); | |
| 3496 } | |
| 3497 break; | |
| 3498 case Token::SHL: | |
| 3499 __ and_(r2, r2, Operand(0x1f)); | |
| 3500 __ mov(r2, Operand(r3, LSL, r2)); | |
| 3501 break; | |
| 3502 default: | |
| 3503 UNREACHABLE(); | |
| 3504 } | |
| 3505 | |
| 3506 // Check if the result fits in a smi. | |
| 3507 __ add(scratch1, r2, Operand(0x40000000), SetCC); | |
| 3508 // If not try to return a heap number. (We know the result is an int32.) | |
| 3509 __ b(mi, &return_heap_number); | |
| 3510 // Tag the result and return. | |
| 3511 __ SmiTag(r0, r2); | |
| 3512 __ Ret(); | |
| 3513 | |
| 3514 __ bind(&return_heap_number); | |
| 3515 if (CpuFeatures::IsSupported(VFP3)) { | |
| 3516 CpuFeatures::Scope scope(VFP3); | |
| 3517 heap_number_result = r5; | |
| 3518 GenerateHeapResultAllocation(masm, | |
| 3519 heap_number_result, | |
| 3520 heap_number_map, | |
| 3521 scratch1, | |
| 3522 scratch2, | |
| 3523 &call_runtime); | |
| 3524 | |
| 3525 if (op_ != Token::SHR) { | |
| 3526 // Convert the result to a floating point value. | |
| 3527 __ vmov(double_scratch.low(), r2); | |
| 3528 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | |
| 3529 } else { | |
| 3530 // The result must be interpreted as an unsigned 32bit integer. | |
| 3531 __ vmov(double_scratch.low(), r2); | |
| 3532 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | |
| 3533 } | |
| 3534 | |
| 3535 // Store the result. | |
| 3536 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | |
| 3537 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | |
| 3538 __ mov(r0, heap_number_result); | |
| 3539 __ Ret(); | |
| 3540 } else { | |
| 3541 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
| 3542 // r3 as scratch. r0 is preserved and returned. | |
| 3543 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
| 3544 __ TailCallStub(&stub); | |
| 3545 } | |
| 3546 | |
| 3547 break; | |
| 3548 } | |
| 3549 | |
| 3550 default: | |
| 3551 UNREACHABLE(); | |
| 3552 } | |
| 3553 | |
| 3554 __ bind(&input_transition); | |
| 3555 __ bind(&output_transition); | |
| 2924 GenerateTypeTransition(masm); | 3556 GenerateTypeTransition(masm); |
| 3557 | |
|
Søren Thygesen Gjesse
2011/02/28 09:54:32
I see no jump to this label - dead code?
Søren Thygesen Gjesse
2011/03/02 09:33:08
Removed.
| |
| 3558 __ bind(&restore_left_and_call_runtime); | |
| 3559 __ Pop(r1, r0); | |
| 3560 __ bind(&call_runtime); | |
| 3561 GenerateCallRuntime(masm); | |
| 2925 } | 3562 } |
| 2926 | 3563 |
| 2927 | 3564 |
| 2928 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 3565 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 2929 Label not_numbers, call_runtime; | 3566 Label not_numbers, call_runtime; |
| 2930 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 3567 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
| 2931 | 3568 |
| 2932 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); | 3569 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); |
| 2933 | 3570 |
| 2934 __ bind(¬_numbers); | 3571 __ bind(¬_numbers); |
| (...skipping 3192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6127 __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); | 6764 __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); |
| 6128 __ Ret(); | 6765 __ Ret(); |
| 6129 } | 6766 } |
| 6130 | 6767 |
| 6131 | 6768 |
| 6132 #undef __ | 6769 #undef __ |
| 6133 | 6770 |
| 6134 } } // namespace v8::internal | 6771 } } // namespace v8::internal |
| 6135 | 6772 |
| 6136 #endif // V8_TARGET_ARCH_ARM | 6773 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |