Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(80)

Side by Side Diff: src/arm/ic-arm.cc

Issue 2024002: Pass key and receiver in registers for keyed load IC on ARM... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 665 matching lines...) Expand 10 before | Expand all | Expand 10 after
676 } 676 }
677 677
678 678
679 Object* KeyedLoadIC_Miss(Arguments args); 679 Object* KeyedLoadIC_Miss(Arguments args);
680 680
681 681
682 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { 682 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
683 // ---------- S t a t e -------------- 683 // ---------- S t a t e --------------
684 // -- lr : return address 684 // -- lr : return address
685 // -- r0 : key 685 // -- r0 : key
686 // -- sp[0] : key 686 // -- r1 : receiver
687 // -- sp[4] : receiver
688 // ----------------------------------- 687 // -----------------------------------
689 688
690 __ ldr(r1, MemOperand(sp, kPointerSize));
691 __ Push(r1, r0); 689 __ Push(r1, r0);
692 690
693 ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); 691 ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
694 __ TailCallExternalReference(ref, 2, 1); 692 __ TailCallExternalReference(ref, 2, 1);
695 } 693 }
696 694
697 695
698 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 696 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
699 // ---------- S t a t e -------------- 697 // ---------- S t a t e --------------
700 // -- lr : return address 698 // -- lr : return address
701 // -- r0 : key 699 // -- r0 : key
702 // -- sp[0] : key 700 // -- r1 : receiver
703 // -- sp[4] : receiver
704 // ----------------------------------- 701 // -----------------------------------
705 702
706 __ ldr(r1, MemOperand(sp, kPointerSize));
707 __ Push(r1, r0); 703 __ Push(r1, r0);
708 704
709 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); 705 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
710 } 706 }
711 707
712 708
713 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { 709 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
714 // ---------- S t a t e -------------- 710 // ---------- S t a t e --------------
715 // -- lr : return address 711 // -- lr : return address
716 // -- r0 : key 712 // -- r0 : key
717 // -- sp[0] : key 713 // -- r1 : receiver
718 // -- sp[4] : receiver
719 // ----------------------------------- 714 // -----------------------------------
720 Label slow, fast, check_pixel_array, check_number_dictionary; 715 Label slow, fast, check_pixel_array, check_number_dictionary;
721 716
722 // Get the object from the stack. 717 Register key = r0;
723 __ ldr(r1, MemOperand(sp, kPointerSize)); 718 Register receiver = r1;
724 719
725 // Check that the object isn't a smi. 720 // Check that the object isn't a smi.
726 __ BranchOnSmi(r1, &slow); 721 __ BranchOnSmi(receiver, &slow);
727 // Get the map of the receiver. 722 // Get the map of the receiver.
728 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 723 __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
729 // Check bit field. 724 // Check bit field.
730 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); 725 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
731 __ tst(r3, Operand(kSlowCaseBitFieldMask)); 726 __ tst(r3, Operand(kSlowCaseBitFieldMask));
732 __ b(ne, &slow); 727 __ b(ne, &slow);
733 // Check that the object is some kind of JS object EXCEPT JS Value type. 728 // Check that the object is some kind of JS object EXCEPT JS Value type.
734 // In the case that the object is a value-wrapper object, 729 // In the case that the object is a value-wrapper object,
735 // we enter the runtime system to make sure that indexing into string 730 // we enter the runtime system to make sure that indexing into string
736 // objects work as intended. 731 // objects work as intended.
737 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); 732 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
738 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); 733 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
739 __ cmp(r2, Operand(JS_OBJECT_TYPE)); 734 __ cmp(r2, Operand(JS_OBJECT_TYPE));
740 __ b(lt, &slow); 735 __ b(lt, &slow);
741 736
742 // Check that the key is a smi. 737 // Check that the key is a smi.
743 __ BranchOnNotSmi(r0, &slow); 738 __ BranchOnNotSmi(key, &slow);
744 // Save key in r2 in case we want it for the number dictionary case. 739 // Untag key into r2..
745 __ mov(r2, r0); 740 __ mov(r2, Operand(key, ASR, kSmiTagSize));
746 __ mov(r0, Operand(r0, ASR, kSmiTagSize));
747 741
748 // Get the elements array of the object. 742 // Get the elements array of the object.
749 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); 743 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
750 // Check that the object is in fast mode (not dictionary). 744 // Check that the object is in fast mode (not dictionary).
751 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); 745 __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
752 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 746 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
753 __ cmp(r3, ip); 747 __ cmp(r3, ip);
754 __ b(ne, &check_pixel_array); 748 __ b(ne, &check_pixel_array);
755 // Check that the key (index) is within bounds. 749 // Check that the key (index) is within bounds.
756 __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); 750 __ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
757 __ cmp(r0, r3); 751 __ cmp(r2, r3);
758 __ b(hs, &slow); 752 __ b(hs, &slow);
759 // Fast case: Do the load. 753 // Fast case: Do the load.
760 __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 754 __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
761 __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); 755 __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
762 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 756 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
763 __ cmp(r0, ip); 757 __ cmp(r2, ip);
764 // In case the loaded value is the_hole we have to consult GetProperty 758 // In case the loaded value is the_hole we have to consult GetProperty
765 // to ensure the prototype chain is searched. 759 // to ensure the prototype chain is searched.
766 __ b(eq, &slow); 760 __ b(eq, &slow);
761 __ mov(r0, r2);
767 __ Ret(); 762 __ Ret();
768 763
769 // Check whether the elements is a pixel array. 764 // Check whether the elements is a pixel array.
765 // r0: key
766 // r2: untagged index
767 // r3: elements map
768 // r4: elements
770 __ bind(&check_pixel_array); 769 __ bind(&check_pixel_array);
771 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); 770 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
772 __ cmp(r3, ip); 771 __ cmp(r3, ip);
773 __ b(ne, &check_number_dictionary); 772 __ b(ne, &check_number_dictionary);
774 __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); 773 __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
775 __ cmp(r0, ip); 774 __ cmp(r2, ip);
776 __ b(hs, &slow); 775 __ b(hs, &slow);
777 __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset)); 776 __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
778 __ ldrb(r0, MemOperand(ip, r0)); 777 __ ldrb(r2, MemOperand(ip, r2));
779 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. 778 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi.
780 __ Ret(); 779 __ Ret();
781 780
782 __ bind(&check_number_dictionary); 781 __ bind(&check_number_dictionary);
783 // Check whether the elements is a number dictionary. 782 // Check whether the elements is a number dictionary.
784 // r0: untagged index 783 // r0: key
785 // r1: elements 784 // r2: untagged index
786 // r2: key 785 // r3: elements map
786 // r4: elements
787 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 787 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
788 __ cmp(r3, ip); 788 __ cmp(r3, ip);
789 __ b(ne, &slow); 789 __ b(ne, &slow);
790 GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4); 790 GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
791 __ mov(r0, r2);
791 __ Ret(); 792 __ Ret();
792 793
793 // Slow case: Push extra copies of the arguments (2). 794 // Slow case, key and receiver still in r0 and r1.
794 __ bind(&slow); 795 __ bind(&slow);
795 __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); 796 __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
796 __ ldr(r0, MemOperand(sp, 0));
797 GenerateRuntimeGetProperty(masm); 797 GenerateRuntimeGetProperty(masm);
798 } 798 }
799 799
800 800
801 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { 801 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
802 // ---------- S t a t e -------------- 802 // ---------- S t a t e --------------
803 // -- lr : return address 803 // -- lr : return address
804 // -- r0 : key 804 // -- r0 : key
805 // -- sp[0] : key 805 // -- r1 : receiver
806 // -- sp[4] : receiver
807 // ----------------------------------- 806 // -----------------------------------
808 Label miss; 807 Label miss;
809 Label index_not_smi; 808 Label index_not_smi;
810 Label index_out_of_range; 809 Label index_out_of_range;
811 Label slow_char_code; 810 Label slow_char_code;
812 Label got_char_code; 811 Label got_char_code;
813 812
814 // Get the object from the stack.
815 __ ldr(r1, MemOperand(sp, kPointerSize));
816
817 Register object = r1; 813 Register object = r1;
818 Register index = r0; 814 Register index = r0;
819 Register code = r2; 815 Register code = r2;
820 Register scratch = r3; 816 Register scratch = r3;
821 817
822 StringHelper::GenerateFastCharCodeAt(masm, 818 StringHelper::GenerateFastCharCodeAt(masm,
823 object, 819 object,
824 index, 820 index,
825 scratch, 821 scratch,
826 code, 822 code,
(...skipping 11 matching lines...) Expand all
838 #endif 834 #endif
839 835
840 // Check if key is a heap number. 836 // Check if key is a heap number.
841 __ bind(&index_not_smi); 837 __ bind(&index_not_smi);
842 __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true); 838 __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
843 839
844 // Push receiver and key on the stack (now that we know they are a 840 // Push receiver and key on the stack (now that we know they are a
845 // string and a number), and call runtime. 841 // string and a number), and call runtime.
846 __ bind(&slow_char_code); 842 __ bind(&slow_char_code);
847 __ EnterInternalFrame(); 843 __ EnterInternalFrame();
844 ASSERT(object.code() > index.code());
848 __ Push(object, index); 845 __ Push(object, index);
849 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 846 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
850 ASSERT(!code.is(r0)); 847 ASSERT(!code.is(r0));
851 __ mov(code, r0); 848 __ mov(code, r0);
852 __ LeaveInternalFrame(); 849 __ LeaveInternalFrame();
853 850
854 // Check if the runtime call returned NaN char code. If yes, return 851 // Check if the runtime call returned NaN char code. If yes, return
855 // undefined. Otherwise, we can continue. 852 // undefined. Otherwise, we can continue.
856 if (FLAG_debug_code) { 853 if (FLAG_debug_code) {
857 __ BranchOnSmi(code, &got_char_code); 854 __ BranchOnSmi(code, &got_char_code);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); 903 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
907 } 904 }
908 } 905 }
909 906
910 907
911 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, 908 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
912 ExternalArrayType array_type) { 909 ExternalArrayType array_type) {
913 // ---------- S t a t e -------------- 910 // ---------- S t a t e --------------
914 // -- lr : return address 911 // -- lr : return address
915 // -- r0 : key 912 // -- r0 : key
916 // -- sp[0] : key 913 // -- r1 : receiver
917 // -- sp[4] : receiver
918 // ----------------------------------- 914 // -----------------------------------
919 Label slow, failed_allocation; 915 Label slow, failed_allocation;
920 916
921 // Get the object from the stack. 917 Register key = r0;
922 __ ldr(r1, MemOperand(sp, kPointerSize)); 918 Register receiver = r1;
923
924 // r0: key
925 // r1: receiver object
926 919
927 // Check that the object isn't a smi 920 // Check that the object isn't a smi
928 __ BranchOnSmi(r1, &slow); 921 __ BranchOnSmi(receiver, &slow);
929 922
930 // Check that the key is a smi. 923 // Check that the key is a smi.
931 __ BranchOnNotSmi(r0, &slow); 924 __ BranchOnNotSmi(key, &slow);
932 925
933 // Check that the object is a JS object. Load map into r2. 926 // Check that the object is a JS object. Load map into r2.
934 __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); 927 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
935 __ b(lt, &slow); 928 __ b(lt, &slow);
936 929
937 // Check that the receiver does not require access checks. We need 930 // Check that the receiver does not require access checks. We need
938 // to check this explicitly since this generic stub does not perform 931 // to check this explicitly since this generic stub does not perform
939 // map checks. 932 // map checks.
940 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); 933 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
941 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); 934 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
942 __ b(ne, &slow); 935 __ b(ne, &slow);
943 936
944 // Check that the elements array is the appropriate type of 937 // Check that the elements array is the appropriate type of
945 // ExternalArray. 938 // ExternalArray.
946 // r0: index (as a smi) 939 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
947 // r1: JSObject 940 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
948 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
949 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
950 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); 941 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
951 __ cmp(r2, ip); 942 __ cmp(r2, ip);
952 __ b(ne, &slow); 943 __ b(ne, &slow);
953 944
954 // Check that the index is in range. 945 // Check that the index is in range.
955 __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset)); 946 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
956 __ cmp(r1, Operand(r0, ASR, kSmiTagSize)); 947 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
957 // Unsigned comparison catches both negative and too-large values. 948 // Unsigned comparison catches both negative and too-large values.
958 __ b(lo, &slow); 949 __ b(lo, &slow);
959 950
960 // r0: index (smi) 951 // r3: elements array
961 // r1: elements array 952 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
962 __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset)); 953 // r3: base pointer of external storage
963 // r1: base pointer of external storage
964 954
965 // We are not untagging smi key and instead work with it 955 // We are not untagging smi key and instead work with it
966 // as if it was premultiplied by 2. 956 // as if it was premultiplied by 2.
967 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); 957 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
968 958
959 Register value = r2;
969 switch (array_type) { 960 switch (array_type) {
970 case kExternalByteArray: 961 case kExternalByteArray:
971 __ ldrsb(r0, MemOperand(r1, r0, LSR, 1)); 962 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
972 break; 963 break;
973 case kExternalUnsignedByteArray: 964 case kExternalUnsignedByteArray:
974 __ ldrb(r0, MemOperand(r1, r0, LSR, 1)); 965 __ ldrb(value, MemOperand(r3, key, LSR, 1));
975 break; 966 break;
976 case kExternalShortArray: 967 case kExternalShortArray:
977 __ ldrsh(r0, MemOperand(r1, r0, LSL, 0)); 968 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
978 break; 969 break;
979 case kExternalUnsignedShortArray: 970 case kExternalUnsignedShortArray:
980 __ ldrh(r0, MemOperand(r1, r0, LSL, 0)); 971 __ ldrh(value, MemOperand(r3, key, LSL, 0));
981 break; 972 break;
982 case kExternalIntArray: 973 case kExternalIntArray:
983 case kExternalUnsignedIntArray: 974 case kExternalUnsignedIntArray:
984 __ ldr(r0, MemOperand(r1, r0, LSL, 1)); 975 __ ldr(value, MemOperand(r3, key, LSL, 1));
985 break; 976 break;
986 case kExternalFloatArray: 977 case kExternalFloatArray:
987 if (CpuFeatures::IsSupported(VFP3)) { 978 if (CpuFeatures::IsSupported(VFP3)) {
988 CpuFeatures::Scope scope(VFP3); 979 CpuFeatures::Scope scope(VFP3);
989 __ add(r0, r1, Operand(r0, LSL, 1)); 980 __ add(r2, r3, Operand(key, LSL, 1));
990 __ vldr(s0, r0, 0); 981 __ vldr(s0, r2, 0);
991 } else { 982 } else {
992 __ ldr(r0, MemOperand(r1, r0, LSL, 1)); 983 __ ldr(value, MemOperand(r3, key, LSL, 1));
993 } 984 }
994 break; 985 break;
995 default: 986 default:
996 UNREACHABLE(); 987 UNREACHABLE();
997 break; 988 break;
998 } 989 }
999 990
1000 // For integer array types: 991 // For integer array types:
1001 // r0: value 992 // r2: value
1002 // For floating-point array type 993 // For floating-point array type
1003 // s0: value (if VFP3 is supported) 994 // s0: value (if VFP3 is supported)
1004 // r0: value (if VFP3 is not supported) 995 // r2: value (if VFP3 is not supported)
1005 996
1006 if (array_type == kExternalIntArray) { 997 if (array_type == kExternalIntArray) {
1007 // For the Int and UnsignedInt array types, we need to see whether 998 // For the Int and UnsignedInt array types, we need to see whether
1008 // the value can be represented in a Smi. If not, we need to convert 999 // the value can be represented in a Smi. If not, we need to convert
1009 // it to a HeapNumber. 1000 // it to a HeapNumber.
1010 Label box_int; 1001 Label box_int;
1011 __ cmp(r0, Operand(0xC0000000)); 1002 __ cmp(value, Operand(0xC0000000));
1012 __ b(mi, &box_int); 1003 __ b(mi, &box_int);
1013 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 1004 // Tag integer as smi and return it.
1005 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1014 __ Ret(); 1006 __ Ret();
1015 1007
1016 __ bind(&box_int); 1008 __ bind(&box_int);
1017 1009 // Allocate a HeapNumber for the result and perform int-to-double
1018 __ mov(r1, r0); 1010 // conversion. Use r0 for result as key is not needed any more.
1019 // Allocate a HeapNumber for the int and perform int-to-double
1020 // conversion.
1021 __ AllocateHeapNumber(r0, r3, r4, &slow); 1011 __ AllocateHeapNumber(r0, r3, r4, &slow);
1022 1012
1023 if (CpuFeatures::IsSupported(VFP3)) { 1013 if (CpuFeatures::IsSupported(VFP3)) {
1024 CpuFeatures::Scope scope(VFP3); 1014 CpuFeatures::Scope scope(VFP3);
1025 __ vmov(s0, r1); 1015 __ vmov(s0, value);
1026 __ vcvt_f64_s32(d0, s0); 1016 __ vcvt_f64_s32(d0, s0);
1027 __ sub(r1, r0, Operand(kHeapObjectTag)); 1017 __ sub(r3, r0, Operand(kHeapObjectTag));
1028 __ vstr(d0, r1, HeapNumber::kValueOffset); 1018 __ vstr(d0, r3, HeapNumber::kValueOffset);
1029 __ Ret(); 1019 __ Ret();
1030 } else { 1020 } else {
1031 WriteInt32ToHeapNumberStub stub(r1, r0, r3); 1021 WriteInt32ToHeapNumberStub stub(value, r0, r3);
1032 __ TailCallStub(&stub); 1022 __ TailCallStub(&stub);
1033 } 1023 }
1034 } else if (array_type == kExternalUnsignedIntArray) { 1024 } else if (array_type == kExternalUnsignedIntArray) {
1035 // The test is different for unsigned int values. Since we need 1025 // The test is different for unsigned int values. Since we need
1036 // the value to be in the range of a positive smi, we can't 1026 // the value to be in the range of a positive smi, we can't
1037 // handle either of the top two bits being set in the value. 1027 // handle either of the top two bits being set in the value.
1038 if (CpuFeatures::IsSupported(VFP3)) { 1028 if (CpuFeatures::IsSupported(VFP3)) {
1039 CpuFeatures::Scope scope(VFP3); 1029 CpuFeatures::Scope scope(VFP3);
1040 Label box_int, done; 1030 Label box_int, done;
1041 __ tst(r0, Operand(0xC0000000)); 1031 __ tst(value, Operand(0xC0000000));
1042 __ b(ne, &box_int); 1032 __ b(ne, &box_int);
1043 1033 // Tag integer as smi and return it.
1044 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 1034 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1045 __ Ret(); 1035 __ Ret();
1046 1036
1047 __ bind(&box_int); 1037 __ bind(&box_int);
1048 __ vmov(s0, r0); 1038 __ vmov(s0, value);
1049 __ AllocateHeapNumber(r0, r1, r2, &slow); 1039 // Allocate a HeapNumber for the result and perform int-to-double
1040 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
1041 // registers - also when jumping due to exhausted young space.
1042 __ AllocateHeapNumber(r2, r3, r4, &slow);
1050 1043
1051 __ vcvt_f64_u32(d0, s0); 1044 __ vcvt_f64_u32(d0, s0);
1052 __ sub(r1, r0, Operand(kHeapObjectTag)); 1045 __ sub(r1, r2, Operand(kHeapObjectTag));
1053 __ vstr(d0, r1, HeapNumber::kValueOffset); 1046 __ vstr(d0, r1, HeapNumber::kValueOffset);
1047
1048 __ mov(r0, r2);
1054 __ Ret(); 1049 __ Ret();
1055 } else { 1050 } else {
1056 // Check whether unsigned integer fits into smi. 1051 // Check whether unsigned integer fits into smi.
1057 Label box_int_0, box_int_1, done; 1052 Label box_int_0, box_int_1, done;
1058 __ tst(r0, Operand(0x80000000)); 1053 __ tst(value, Operand(0x80000000));
1059 __ b(ne, &box_int_0); 1054 __ b(ne, &box_int_0);
1060 __ tst(r0, Operand(0x40000000)); 1055 __ tst(value, Operand(0x40000000));
1061 __ b(ne, &box_int_1); 1056 __ b(ne, &box_int_1);
1057 // Tag integer as smi and return it.
1058 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1059 __ Ret();
1062 1060
1063 // Tag integer as smi and return it. 1061 Register hiword = value; // r2.
1064 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 1062 Register loword = r3;
1065 __ Ret();
1066 1063
1067 __ bind(&box_int_0); 1064 __ bind(&box_int_0);
1068 // Integer does not have leading zeros. 1065 // Integer does not have leading zeros.
1069 GenerateUInt2Double(masm, r0, r1, r2, 0); 1066 GenerateUInt2Double(masm, hiword, loword, r4, 0);
1070 __ b(&done); 1067 __ b(&done);
1071 1068
1072 __ bind(&box_int_1); 1069 __ bind(&box_int_1);
1073 // Integer has one leading zero. 1070 // Integer has one leading zero.
1074 GenerateUInt2Double(masm, r0, r1, r2, 1); 1071 GenerateUInt2Double(masm, hiword, loword, r4, 1);
1072
1075 1073
1076 __ bind(&done); 1074 __ bind(&done);
1077 // Integer was converted to double in registers r0:r1. 1075 // Integer was converted to double in registers hiword:loword.
1078 // Wrap it into a HeapNumber. 1076 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
1079 __ AllocateHeapNumber(r2, r3, r5, &slow); 1077 // clobbers all registers - also when jumping due to exhausted young
1078 // space.
1079 __ AllocateHeapNumber(r4, r5, r6, &slow);
1080 1080
1081 __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset)); 1081 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
1082 __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); 1082 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
1083 1083
1084 __ mov(r0, r2); 1084 __ mov(r0, r4);
1085
1086 __ Ret(); 1085 __ Ret();
1087 } 1086 }
1088 } else if (array_type == kExternalFloatArray) { 1087 } else if (array_type == kExternalFloatArray) {
1089 // For the floating-point array type, we need to always allocate a 1088 // For the floating-point array type, we need to always allocate a
1090 // HeapNumber. 1089 // HeapNumber.
1091 if (CpuFeatures::IsSupported(VFP3)) { 1090 if (CpuFeatures::IsSupported(VFP3)) {
1092 CpuFeatures::Scope scope(VFP3); 1091 CpuFeatures::Scope scope(VFP3);
1093 __ AllocateHeapNumber(r0, r1, r2, &slow); 1092 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1093 // AllocateHeapNumber clobbers all registers - also when jumping due to
1094 // exhausted young space.
1095 __ AllocateHeapNumber(r2, r3, r4, &slow);
1094 __ vcvt_f64_f32(d0, s0); 1096 __ vcvt_f64_f32(d0, s0);
1095 __ sub(r1, r0, Operand(kHeapObjectTag)); 1097 __ sub(r1, r2, Operand(kHeapObjectTag));
1096 __ vstr(d0, r1, HeapNumber::kValueOffset); 1098 __ vstr(d0, r1, HeapNumber::kValueOffset);
1099
1100 __ mov(r0, r2);
1097 __ Ret(); 1101 __ Ret();
1098 } else { 1102 } else {
1099 __ AllocateHeapNumber(r3, r1, r2, &slow); 1103 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1104 // AllocateHeapNumber clobbers all registers - also when jumping due to
1105 // exhausted young space.
1106 __ AllocateHeapNumber(r3, r4, r5, &slow);
1100 // VFP is not available, do manual single to double conversion. 1107 // VFP is not available, do manual single to double conversion.
1101 1108
1102 // r0: floating point value (binary32) 1109 // r2: floating point value (binary32)
1110 // r3: heap number for result
1103 1111
1104 // Extract mantissa to r1. 1112 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
1105 __ and_(r1, r0, Operand(kBinary32MantissaMask)); 1113 // the slow case from here.
1114 __ and_(r0, value, Operand(kBinary32MantissaMask));
1106 1115
1107 // Extract exponent to r2. 1116 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
1108 __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits)); 1117 // the slow case from here.
1109 __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); 1118 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
1119 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
1110 1120
1111 Label exponent_rebiased; 1121 Label exponent_rebiased;
1112 __ teq(r2, Operand(0x00)); 1122 __ teq(r1, Operand(0x00));
1113 __ b(eq, &exponent_rebiased); 1123 __ b(eq, &exponent_rebiased);
1114 1124
1115 __ teq(r2, Operand(0xff)); 1125 __ teq(r1, Operand(0xff));
1116 __ mov(r2, Operand(0x7ff), LeaveCC, eq); 1126 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
1117 __ b(eq, &exponent_rebiased); 1127 __ b(eq, &exponent_rebiased);
1118 1128
1119 // Rebias exponent. 1129 // Rebias exponent.
1120 __ add(r2, 1130 __ add(r1,
1121 r2, 1131 r1,
1122 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); 1132 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
1123 1133
1124 __ bind(&exponent_rebiased); 1134 __ bind(&exponent_rebiased);
1125 __ and_(r0, r0, Operand(kBinary32SignMask)); 1135 __ and_(r2, value, Operand(kBinary32SignMask));
1126 __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord)); 1136 value = no_reg;
1137 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
1127 1138
1128 // Shift mantissa. 1139 // Shift mantissa.
1129 static const int kMantissaShiftForHiWord = 1140 static const int kMantissaShiftForHiWord =
1130 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; 1141 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1131 1142
1132 static const int kMantissaShiftForLoWord = 1143 static const int kMantissaShiftForLoWord =
1133 kBitsPerInt - kMantissaShiftForHiWord; 1144 kBitsPerInt - kMantissaShiftForHiWord;
1134 1145
1135 __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord)); 1146 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
1136 __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord)); 1147 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
1137 1148
1138 __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset)); 1149 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
1139 __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); 1150 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
1151
1140 __ mov(r0, r3); 1152 __ mov(r0, r3);
1141 __ Ret(); 1153 __ Ret();
1142 } 1154 }
1143 1155
1144 } else { 1156 } else {
1145 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 1157 // Tag integer as smi and return it.
1158 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1146 __ Ret(); 1159 __ Ret();
1147 } 1160 }
1148 1161
1149 // Slow case: Load name and receiver from stack and jump to runtime. 1162 // Slow case, key and receiver still in r0 and r1.
1150 __ bind(&slow); 1163 __ bind(&slow);
1151 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); 1164 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
1152 __ ldr(r0, MemOperand(sp, 0));
1153 GenerateRuntimeGetProperty(masm); 1165 GenerateRuntimeGetProperty(masm);
1154 } 1166 }
1155 1167
1156 1168
1157 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 1169 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1158 // ---------- S t a t e -------------- 1170 // ---------- S t a t e --------------
1159 // -- lr : return address 1171 // -- lr : return address
1160 // -- r0 : key 1172 // -- r0 : key
1161 // -- sp[0] : key 1173 // -- r1 : receiver
1162 // -- sp[4] : receiver
1163 // ----------------------------------- 1174 // -----------------------------------
1164 Label slow; 1175 Label slow;
1165 1176
1166 // Get the object from the stack.
1167 __ ldr(r1, MemOperand(sp, kPointerSize));
1168
1169 // Check that the receiver isn't a smi. 1177 // Check that the receiver isn't a smi.
1170 __ BranchOnSmi(r1, &slow); 1178 __ BranchOnSmi(r1, &slow);
1171 1179
1172 // Check that the key is a smi. 1180 // Check that the key is a smi.
1173 __ BranchOnNotSmi(r0, &slow); 1181 __ BranchOnNotSmi(r0, &slow);
1174 1182
1175 // Get the map of the receiver. 1183 // Get the map of the receiver.
1176 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 1184 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1177 1185
1178 // Check that it has indexed interceptor and access checks 1186 // Check that it has indexed interceptor and access checks
(...skipping 645 matching lines...) Expand 10 before | Expand all | Expand 10 after
1824 __ bind(&miss); 1832 __ bind(&miss);
1825 1833
1826 GenerateMiss(masm); 1834 GenerateMiss(masm);
1827 } 1835 }
1828 1836
1829 1837
1830 #undef __ 1838 #undef __
1831 1839
1832 1840
1833 } } // namespace v8::internal 1841 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698