Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1652)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 155047: ARM improvements to constant div, mod and mul.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 11 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/disasm-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 43
44 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 44 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
45 Label* slow, 45 Label* slow,
46 Condition cc); 46 Condition cc);
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48 Label* rhs_not_nan, 48 Label* rhs_not_nan,
49 Label* slow, 49 Label* slow,
50 bool strict); 50 bool strict);
51 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); 51 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
52 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm); 52 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
53 static void MultiplyByKnownInt(MacroAssembler* masm,
54 Register source,
55 Register destination,
56 int known_int);
57 static bool IsEasyToMultiplyBy(int x);
53 58
54 59
55 60
56 // ------------------------------------------------------------------------- 61 // -------------------------------------------------------------------------
57 // Platform-specific DeferredCode functions. 62 // Platform-specific DeferredCode functions.
58 63
59 void DeferredCode::SaveRegisters() { 64 void DeferredCode::SaveRegisters() {
60 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { 65 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
61 int action = registers_[i]; 66 int action = registers_[i];
62 if (action == kPush) { 67 if (action == kPush) {
(...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after
688 // Convert the result (r0) to a condition code. 693 // Convert the result (r0) to a condition code.
689 __ cmp(r0, Operand(Factory::false_value())); 694 __ cmp(r0, Operand(Factory::false_value()));
690 695
691 cc_reg_ = ne; 696 cc_reg_ = ne;
692 } 697 }
693 698
694 699
695 class GenericBinaryOpStub : public CodeStub { 700 class GenericBinaryOpStub : public CodeStub {
696 public: 701 public:
697 GenericBinaryOpStub(Token::Value op, 702 GenericBinaryOpStub(Token::Value op,
698 OverwriteMode mode) 703 OverwriteMode mode,
699 : op_(op), mode_(mode) { } 704 int known_rhs = CodeGenerator::kUnknownIntValue)
705 : op_(op),
706 mode_(mode),
707 known_rhs_(known_rhs),
708 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, known_rhs)) { }
700 709
701 private: 710 private:
702 Token::Value op_; 711 Token::Value op_;
703 OverwriteMode mode_; 712 OverwriteMode mode_;
713 int known_rhs_;
714 bool specialized_on_rhs_;
715
716 static const int kMaxKnownRhs = 0x40000000;
704 717
705 // Minor key encoding in 16 bits. 718 // Minor key encoding in 16 bits.
706 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 719 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
707 class OpBits: public BitField<Token::Value, 2, 14> {}; 720 class OpBits: public BitField<Token::Value, 2, 6> {};
721 class KnownIntBits: public BitField<int, 8, 8> {};
708 722
709 Major MajorKey() { return GenericBinaryOp; } 723 Major MajorKey() { return GenericBinaryOp; }
710 int MinorKey() { 724 int MinorKey() {
711 // Encode the parameters in a unique 16 bit value. 725 // Encode the parameters in a unique 16 bit value.
712 return OpBits::encode(op_) 726 return OpBits::encode(op_)
713 | ModeBits::encode(mode_); 727 | ModeBits::encode(mode_)
728 | KnownIntBits::encode(MinorKeyForKnownInt());
714 } 729 }
715 730
716 void Generate(MacroAssembler* masm); 731 void Generate(MacroAssembler* masm);
717 void HandleNonSmiBitwiseOp(MacroAssembler* masm); 732 void HandleNonSmiBitwiseOp(MacroAssembler* masm);
718 733
734 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int known_rhs) {
735 if (known_rhs == CodeGenerator::kUnknownIntValue) return false;
736 if (op == Token::DIV) return known_rhs >= 2 && known_rhs <= 3;
737 if (op == Token::MOD) {
738 if (known_rhs <= 1) return false;
739 if (known_rhs <= 10) return true;
740 if (known_rhs <= kMaxKnownRhs && IsPowerOf2(known_rhs)) return true;
741 return false;
742 }
743 return false;
744 }
745
746 int MinorKeyForKnownInt() {
747 if (!specialized_on_rhs_) return 0;
748 if (known_rhs_ <= 10) return known_rhs_ + 1;
749 ASSERT(IsPowerOf2(known_rhs_));
750 int key = 12;
751 int d = known_rhs_;
752 while ((d & 1) == 0) {
753 key++;
754 d >>= 1;
755 }
756 return key;
757 }
758
719 const char* GetName() { 759 const char* GetName() {
720 switch (op_) { 760 switch (op_) {
721 case Token::ADD: return "GenericBinaryOpStub_ADD"; 761 case Token::ADD: return "GenericBinaryOpStub_ADD";
722 case Token::SUB: return "GenericBinaryOpStub_SUB"; 762 case Token::SUB: return "GenericBinaryOpStub_SUB";
723 case Token::MUL: return "GenericBinaryOpStub_MUL"; 763 case Token::MUL: return "GenericBinaryOpStub_MUL";
724 case Token::DIV: return "GenericBinaryOpStub_DIV"; 764 case Token::DIV: return "GenericBinaryOpStub_DIV";
765 case Token::MOD: return "GenericBinaryOpStub_MOD";
725 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; 766 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
726 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; 767 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
727 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; 768 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
728 case Token::SAR: return "GenericBinaryOpStub_SAR"; 769 case Token::SAR: return "GenericBinaryOpStub_SAR";
729 case Token::SHL: return "GenericBinaryOpStub_SHL"; 770 case Token::SHL: return "GenericBinaryOpStub_SHL";
730 case Token::SHR: return "GenericBinaryOpStub_SHR"; 771 case Token::SHR: return "GenericBinaryOpStub_SHR";
731 default: return "GenericBinaryOpStub"; 772 default: return "GenericBinaryOpStub";
732 } 773 }
733 } 774 }
734 775
735 #ifdef DEBUG 776 #ifdef DEBUG
736 void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); } 777 void Print() {
778 if (specialized_on_rhs_) {
779 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
William Hesse 2009/07/03 11:27:01 Why not make this message more informative, by inc
Erik Corry 2009/07/03 17:43:38 Oops, that is what I was trying to do, but I got t
780 } else {
781 PrintF("GenericBinaryOpStub (%s by %d)\n",
782 Token::String(op_),
783 known_rhs_);
784 }
785 }
737 #endif 786 #endif
738 }; 787 };
739 788
740 789
741 void CodeGenerator::GenericBinaryOperation(Token::Value op, 790 void CodeGenerator::GenericBinaryOperation(Token::Value op,
742 OverwriteMode overwrite_mode) { 791 OverwriteMode overwrite_mode,
792 int known_rhs) {
William Hesse 2009/07/03 11:27:01 Would "constant_rhs" be a better name than "known_
Erik Corry 2009/07/03 17:43:38 Probably
743 VirtualFrame::SpilledScope spilled_scope; 793 VirtualFrame::SpilledScope spilled_scope;
744 // sp[0] : y 794 // sp[0] : y
745 // sp[1] : x 795 // sp[1] : x
746 // result : r0 796 // result : r0
747 797
748 // Stub is entered with a call: 'return address' is in lr. 798 // Stub is entered with a call: 'return address' is in lr.
749 switch (op) { 799 switch (op) {
750 case Token::ADD: // fall through. 800 case Token::ADD: // fall through.
751 case Token::SUB: // fall through. 801 case Token::SUB: // fall through.
752 case Token::MUL: 802 case Token::MUL:
803 case Token::DIV:
804 case Token::MOD:
753 case Token::BIT_OR: 805 case Token::BIT_OR:
754 case Token::BIT_AND: 806 case Token::BIT_AND:
755 case Token::BIT_XOR: 807 case Token::BIT_XOR:
756 case Token::SHL: 808 case Token::SHL:
757 case Token::SHR: 809 case Token::SHR:
758 case Token::SAR: { 810 case Token::SAR: {
759 frame_->EmitPop(r0); // r0 : y 811 frame_->EmitPop(r0); // r0 : y
760 frame_->EmitPop(r1); // r1 : x 812 frame_->EmitPop(r1); // r1 : x
761 GenericBinaryOpStub stub(op, overwrite_mode); 813 GenericBinaryOpStub stub(op, overwrite_mode, known_rhs);
762 frame_->CallStub(&stub, 0); 814 frame_->CallStub(&stub, 0);
763 break; 815 break;
764 } 816 }
765 817
766 case Token::DIV: {
767 Result arg_count = allocator_->Allocate(r0);
768 ASSERT(arg_count.is_valid());
769 __ mov(arg_count.reg(), Operand(1));
770 frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
771 break;
772 }
773
774 case Token::MOD: {
775 Result arg_count = allocator_->Allocate(r0);
776 ASSERT(arg_count.is_valid());
777 __ mov(arg_count.reg(), Operand(1));
778 frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
779 break;
780 }
781
782 case Token::COMMA: 818 case Token::COMMA:
783 frame_->EmitPop(r0); 819 frame_->EmitPop(r0);
784 // simply discard left value 820 // simply discard left value
785 frame_->Drop(); 821 frame_->Drop();
786 break; 822 break;
787 823
788 default: 824 default:
789 // Other cases should have been handled before this point. 825 // Other cases should have been handled before this point.
790 UNREACHABLE(); 826 UNREACHABLE();
791 break; 827 break;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
835 if (reversed_) { 871 if (reversed_) {
836 __ rsb(r0, r0, Operand(Smi::FromInt(value_))); 872 __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
837 __ mov(r1, Operand(Smi::FromInt(value_))); 873 __ mov(r1, Operand(Smi::FromInt(value_)));
838 } else { 874 } else {
839 __ add(r1, r0, Operand(Smi::FromInt(value_))); 875 __ add(r1, r0, Operand(Smi::FromInt(value_)));
840 __ mov(r0, Operand(Smi::FromInt(value_))); 876 __ mov(r0, Operand(Smi::FromInt(value_)));
841 } 877 }
842 break; 878 break;
843 } 879 }
844 880
881 // For these operations there is no optimistic operation that needs to be
882 // reverted.
883 case Token::MUL:
884 case Token::MOD:
845 case Token::BIT_OR: 885 case Token::BIT_OR:
846 case Token::BIT_XOR: 886 case Token::BIT_XOR:
847 case Token::BIT_AND: { 887 case Token::BIT_AND: {
848 if (reversed_) { 888 if (reversed_) {
849 __ mov(r1, Operand(Smi::FromInt(value_))); 889 __ mov(r1, Operand(Smi::FromInt(value_)));
850 } else { 890 } else {
851 __ mov(r1, Operand(r0)); 891 __ mov(r1, Operand(r0));
852 __ mov(r0, Operand(Smi::FromInt(value_))); 892 __ mov(r0, Operand(Smi::FromInt(value_)));
853 } 893 }
854 break; 894 break;
(...skipping 10 matching lines...) Expand all
865 } 905 }
866 break; 906 break;
867 } 907 }
868 908
869 default: 909 default:
870 // Other cases should have been handled before this point. 910 // Other cases should have been handled before this point.
871 UNREACHABLE(); 911 UNREACHABLE();
872 break; 912 break;
873 } 913 }
874 914
875 GenericBinaryOpStub stub(op_, overwrite_mode_); 915 GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
876 __ CallStub(&stub); 916 __ CallStub(&stub);
877 } 917 }
878 918
879 919
920 static bool PopCountLessThanEqual2(unsigned int x) {
921 int popcnt = 0;
922 while (x != 0) {
923 if ((x & 1) != 0) {
924 popcnt++;
925 if (popcnt > 2) return false;
William Hesse 2009/07/03 11:27:01 The whole function can be replaced by: x &= x - 1;
Erik Corry 2009/07/03 17:43:38 Cool
926 }
927 x >>= 1;
928 }
929 return true;
930 }
931
932
933 // Return an int that is <= x that can be encoded as an 8 bit constant shifted
934 // left by an even number of bits.
935 static int ArmEncodableLessThan(unsigned int x) {
936 int shift_distance = 0;
937 while (x > 0xffff) {
938 x >>= 8;
939 shift_distance += 8;
940 }
941 while (x > 0xff) {
942 x >>= 2;
943 shift_distance += 2;
944 }
945 return x << shift_distance;
946 }
947
948
949 // Returns the index of the lowest bit set.
950 static int BitPosition(unsigned x) {
951 int bit_posn = 0;
952 while ((x & 0xf) == 0) {
953 bit_posn += 4;
954 x >>= 4;
955 }
956 while ((x & 1) == 0) {
957 bit_posn++;
958 x >>= 1;
959 }
960 return bit_posn;
961 }
962
963
880 void CodeGenerator::SmiOperation(Token::Value op, 964 void CodeGenerator::SmiOperation(Token::Value op,
881 Handle<Object> value, 965 Handle<Object> value,
882 bool reversed, 966 bool reversed,
883 OverwriteMode mode) { 967 OverwriteMode mode) {
884 VirtualFrame::SpilledScope spilled_scope; 968 VirtualFrame::SpilledScope spilled_scope;
885 // NOTE: This is an attempt to inline (a bit) more of the code for 969 // NOTE: This is an attempt to inline (a bit) more of the code for
886 // some possible smi operations (like + and -) when (at least) one 970 // some possible smi operations (like + and -) when (at least) one
887 // of the operands is a literal smi. With this optimization, the 971 // of the operands is a literal smi. With this optimization, the
888 // performance of the system is increased by ~15%, and the generated 972 // performance of the system is increased by ~15%, and the generated
889 // code size is increased by ~1% (measured on a combination of 973 // code size is increased by ~1% (measured on a combination of
890 // different benchmarks). 974 // different benchmarks).
891 975
892 // sp[0] : operand 976 // sp[0] : operand
893 977
894 int int_value = Smi::cast(*value)->value(); 978 int int_value = Smi::cast(*value)->value();
895 979
896 JumpTarget exit; 980 JumpTarget exit;
897 frame_->EmitPop(r0); 981 frame_->EmitPop(r0);
898 982
983 bool something_to_inline = true;
899 switch (op) { 984 switch (op) {
900 case Token::ADD: { 985 case Token::ADD: {
901 DeferredCode* deferred = 986 DeferredCode* deferred =
902 new DeferredInlineSmiOperation(op, int_value, reversed, mode); 987 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
903 988
904 __ add(r0, r0, Operand(value), SetCC); 989 __ add(r0, r0, Operand(value), SetCC);
905 deferred->Branch(vs); 990 deferred->Branch(vs);
906 __ tst(r0, Operand(kSmiTagMask)); 991 __ tst(r0, Operand(kSmiTagMask));
907 deferred->Branch(ne); 992 deferred->Branch(ne);
908 deferred->BindExit(); 993 deferred->BindExit();
909 break; 994 break;
910 } 995 }
911 996
912 case Token::SUB: { 997 case Token::SUB: {
913 DeferredCode* deferred = 998 DeferredCode* deferred =
914 new DeferredInlineSmiOperation(op, int_value, reversed, mode); 999 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
915 1000
916 if (reversed) { 1001 if (reversed) {
917 __ rsb(r0, r0, Operand(value), SetCC); 1002 __ rsb(r0, r0, Operand(value), SetCC);
918 } else { 1003 } else {
919 __ sub(r0, r0, Operand(value), SetCC); 1004 __ sub(r0, r0, Operand(value), SetCC);
920 } 1005 }
921 deferred->Branch(vs); 1006 deferred->Branch(vs);
922 __ tst(r0, Operand(kSmiTagMask)); 1007 __ tst(r0, Operand(kSmiTagMask));
923 deferred->Branch(ne); 1008 deferred->Branch(ne);
924 deferred->BindExit(); 1009 deferred->BindExit();
925 break; 1010 break;
926 } 1011 }
927 1012
1013
928 case Token::BIT_OR: 1014 case Token::BIT_OR:
929 case Token::BIT_XOR: 1015 case Token::BIT_XOR:
930 case Token::BIT_AND: { 1016 case Token::BIT_AND: {
931 DeferredCode* deferred = 1017 DeferredCode* deferred =
932 new DeferredInlineSmiOperation(op, int_value, reversed, mode); 1018 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
933 __ tst(r0, Operand(kSmiTagMask)); 1019 __ tst(r0, Operand(kSmiTagMask));
934 deferred->Branch(ne); 1020 deferred->Branch(ne);
935 switch (op) { 1021 switch (op) {
936 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; 1022 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
937 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; 1023 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
938 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break; 1024 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
939 default: UNREACHABLE(); 1025 default: UNREACHABLE();
940 } 1026 }
941 deferred->BindExit(); 1027 deferred->BindExit();
942 break; 1028 break;
943 } 1029 }
944 1030
945 case Token::SHL: 1031 case Token::SHL:
946 case Token::SHR: 1032 case Token::SHR:
947 case Token::SAR: { 1033 case Token::SAR: {
948 if (reversed) { 1034 if (reversed) {
949 __ mov(ip, Operand(value)); 1035 something_to_inline = false;
950 frame_->EmitPush(ip); 1036 break;
951 frame_->EmitPush(r0); 1037 }
952 GenericBinaryOperation(op, mode); 1038 int shift_value = int_value & 0x1f; // least significant 5 bits
1039 DeferredCode* deferred =
1040 new DeferredInlineSmiOperation(op, shift_value, false, mode);
1041 __ tst(r0, Operand(kSmiTagMask));
1042 deferred->Branch(ne);
1043 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
1044 switch (op) {
1045 case Token::SHL: {
William Hesse 2009/07/03 11:27:01 Why no check of shift_value with 0 for SHL? If it
Erik Corry 2009/07/03 17:43:38 Shift left by zero is the way reg-reg mov is encod
1046 __ mov(r2, Operand(r2, LSL, shift_value));
1047 // check that the *unsigned* result fits in a smi
1048 __ add(r3, r2, Operand(0x40000000), SetCC);
1049 deferred->Branch(mi);
1050 break;
1051 }
1052 case Token::SHR: {
1053 // LSR by immediate 0 means shifting 32 bits.
1054 if (shift_value != 0) {
1055 __ mov(r2, Operand(r2, LSR, shift_value));
1056 }
1057 // check that the *unsigned* result fits in a smi
1058 // neither of the two high-order bits can be set:
1059 // - 0x80000000: high bit would be lost when smi tagging
1060 // - 0x40000000: this number would convert to negative when
1061 // smi tagging these two cases can only happen with shifts
1062 // by 0 or 1 when handed a valid smi
1063 __ and_(r3, r2, Operand(0xc0000000), SetCC);
1064 deferred->Branch(ne);
1065 break;
1066 }
1067 case Token::SAR: {
1068 if (shift_value != 0) {
1069 // ASR by immediate 0 means shifting 32 bits.
1070 __ mov(r2, Operand(r2, ASR, shift_value));
1071 }
1072 break;
1073 }
1074 default: UNREACHABLE();
1075 }
1076 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
1077 deferred->BindExit();
1078 break;
1079 }
953 1080
954 } else { 1081 case Token::MOD: {
955 int shift_value = int_value & 0x1f; // least significant 5 bits 1082 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
956 DeferredCode* deferred = 1083 something_to_inline = false;
957 new DeferredInlineSmiOperation(op, shift_value, false, mode); 1084 break;
958 __ tst(r0, Operand(kSmiTagMask));
959 deferred->Branch(ne);
960 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
961 switch (op) {
962 case Token::SHL: {
963 __ mov(r2, Operand(r2, LSL, shift_value));
964 // check that the *unsigned* result fits in a smi
965 __ add(r3, r2, Operand(0x40000000), SetCC);
966 deferred->Branch(mi);
967 break;
968 }
969 case Token::SHR: {
970 // LSR by immediate 0 means shifting 32 bits.
971 if (shift_value != 0) {
972 __ mov(r2, Operand(r2, LSR, shift_value));
973 }
974 // check that the *unsigned* result fits in a smi
975 // neither of the two high-order bits can be set:
976 // - 0x80000000: high bit would be lost when smi tagging
977 // - 0x40000000: this number would convert to negative when
978 // smi tagging these two cases can only happen with shifts
979 // by 0 or 1 when handed a valid smi
980 __ and_(r3, r2, Operand(0xc0000000), SetCC);
981 deferred->Branch(ne);
982 break;
983 }
984 case Token::SAR: {
985 if (shift_value != 0) {
986 // ASR by immediate 0 means shifting 32 bits.
987 __ mov(r2, Operand(r2, ASR, shift_value));
988 }
989 break;
990 }
991 default: UNREACHABLE();
992 }
993 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
994 deferred->BindExit();
995 } 1085 }
1086 DeferredCode* deferred =
1087 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1088 unsigned mask = (0x80000000u | kSmiTagMask);
1089 __ tst(r0, Operand(mask));
1090 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1091 mask = (int_value << kSmiTagSize) - 1;
1092 __ and_(r0, r0, Operand(mask));
1093 deferred->BindExit();
1094 break;
1095 }
1096
1097 case Token::MUL: {
1098 if (!IsEasyToMultiplyBy(int_value)) {
1099 something_to_inline = false;
1100 break;
1101 }
1102 DeferredCode* deferred =
1103 new DeferredInlineSmiOperation(op, int_value, reversed, mode);
1104 unsigned max_smi_that_wont_overflow =
1105 ArmEncodableLessThan(Smi::kMaxValue / int_value);
1106 max_smi_that_wont_overflow <<= kSmiTagSize;
1107 unsigned mask = 0x80000000u;
1108 while ((mask & max_smi_that_wont_overflow) == 0) {
1109 mask |= mask >> 1;
1110 }
1111 mask |= kSmiTagMask;
1112 // This does a single mask that checks for a too high value in a
1113 // conservative way and for a non-Smi. It also filters out negative
1114 // numbers, unfortunately, but since this code is inline we prefer
1115 // brevity to comprehensiveness.
1116 __ tst(r0, Operand(mask));
1117 deferred->Branch(ne);
1118 MultiplyByKnownInt(masm_, r0, r0, int_value);
1119 deferred->BindExit();
996 break; 1120 break;
997 } 1121 }
998 1122
999 default: 1123 default:
1000 if (!reversed) { 1124 something_to_inline = false;
1001 frame_->EmitPush(r0);
1002 __ mov(r0, Operand(value));
1003 frame_->EmitPush(r0);
1004 } else {
1005 __ mov(ip, Operand(value));
1006 frame_->EmitPush(ip);
1007 frame_->EmitPush(r0);
1008 }
1009 GenericBinaryOperation(op, mode);
1010 break; 1125 break;
1011 } 1126 }
1012 1127
1128 if (!something_to_inline) {
1129 if (!reversed) {
1130 frame_->EmitPush(r0);
1131 __ mov(r0, Operand(value));
1132 frame_->EmitPush(r0);
1133 GenericBinaryOperation(op, mode, int_value);
1134 } else {
1135 __ mov(ip, Operand(value));
1136 frame_->EmitPush(ip);
1137 frame_->EmitPush(r0);
1138 GenericBinaryOperation(op, mode, kUnknownIntValue);
1139 }
1140 }
1141
1013 exit.Bind(); 1142 exit.Bind();
1014 } 1143 }
1015 1144
1016 1145
1017 void CodeGenerator::Comparison(Condition cc, 1146 void CodeGenerator::Comparison(Condition cc,
1018 Expression* left, 1147 Expression* left,
1019 Expression* right, 1148 Expression* right,
1020 bool strict) { 1149 bool strict) {
1021 if (left != NULL) LoadAndSpill(left); 1150 if (left != NULL) LoadAndSpill(left);
1022 if (right != NULL) LoadAndSpill(right); 1151 if (right != NULL) LoadAndSpill(right);
(...skipping 2278 matching lines...) Expand 10 before | Expand all | Expand 10 after
3301 __ mov(r0, Operand(Factory::undefined_value())); 3430 __ mov(r0, Operand(Factory::undefined_value()));
3302 frame_->EmitPush(r0); 3431 frame_->EmitPush(r0);
3303 } 3432 }
3304 3433
3305 3434
3306 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { 3435 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3307 VirtualFrame::SpilledScope spilled_scope; 3436 VirtualFrame::SpilledScope spilled_scope;
3308 ASSERT(args->length() == 1); 3437 ASSERT(args->length() == 1);
3309 LoadAndSpill(args->at(0)); 3438 LoadAndSpill(args->at(0));
3310 frame_->EmitPop(r0); 3439 frame_->EmitPop(r0);
3311 __ tst(r0, Operand(kSmiTagMask | 0x80000000)); 3440 __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
3312 cc_reg_ = eq; 3441 cc_reg_ = eq;
3313 } 3442 }
3314 3443
3315 3444
3316 // This should generate code that performs a charCodeAt() call or returns 3445 // This should generate code that performs a charCodeAt() call or returns
3317 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. 3446 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
3318 // It is not yet implemented on ARM, so it always goes to the slow case. 3447 // It is not yet implemented on ARM, so it always goes to the slow case.
3319 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { 3448 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3320 VirtualFrame::SpilledScope spilled_scope; 3449 VirtualFrame::SpilledScope spilled_scope;
3321 ASSERT(args->length() == 2); 3450 ASSERT(args->length() == 2);
(...skipping 1025 matching lines...) Expand 10 before | Expand all | Expand 10 after
4347 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); 4476 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
4348 // Top 4. 4477 // Top 4.
4349 __ tst(scratch, Operand(0xf0000000)); 4478 __ tst(scratch, Operand(0xf0000000));
4350 __ add(zeros, zeros, Operand(4), LeaveCC, eq); 4479 __ add(zeros, zeros, Operand(4), LeaveCC, eq);
4351 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); 4480 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
4352 // Top 2. 4481 // Top 2.
4353 __ tst(scratch, Operand(0xc0000000)); 4482 __ tst(scratch, Operand(0xc0000000));
4354 __ add(zeros, zeros, Operand(2), LeaveCC, eq); 4483 __ add(zeros, zeros, Operand(2), LeaveCC, eq);
4355 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); 4484 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
4356 // Top bit. 4485 // Top bit.
4357 __ tst(scratch, Operand(0x80000000)); 4486 __ tst(scratch, Operand(0x80000000u));
4358 __ add(zeros, zeros, Operand(1), LeaveCC, eq); 4487 __ add(zeros, zeros, Operand(1), LeaveCC, eq);
4359 #endif 4488 #endif
4360 } 4489 }
4361 4490
4362 4491
4363 // Takes a Smi and converts to an IEEE 64 bit floating point value in two 4492 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
4364 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and 4493 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
4365 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a 4494 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
4366 // scratch register. Destroys the source register. No GC occurs during this 4495 // scratch register. Destroys the source register. No GC occurs during this
4367 // stub so you don't have to set up the frame. 4496 // stub so you don't have to set up the frame.
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
4499 }; 4628 };
4500 4629
4501 4630
4502 // See comment for class. 4631 // See comment for class.
4503 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { 4632 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
4504 Label max_negative_int; 4633 Label max_negative_int;
4505 // the_int_ has the answer which is a signed int32 but not a Smi. 4634 // the_int_ has the answer which is a signed int32 but not a Smi.
4506 // We test for the special value that has a different exponent. This test 4635 // We test for the special value that has a different exponent. This test
4507 // has the neat side effect of setting the flags according to the sign. 4636 // has the neat side effect of setting the flags according to the sign.
4508 ASSERT(HeapNumber::kSignMask == 0x80000000u); 4637 ASSERT(HeapNumber::kSignMask == 0x80000000u);
4509 __ cmp(the_int_, Operand(0x80000000)); 4638 __ cmp(the_int_, Operand(0x80000000u));
4510 __ b(eq, &max_negative_int); 4639 __ b(eq, &max_negative_int);
4511 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 4640 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
4512 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 4641 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
4513 uint32_t non_smi_exponent = 4642 uint32_t non_smi_exponent =
4514 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 4643 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
4515 __ mov(scratch_, Operand(non_smi_exponent)); 4644 __ mov(scratch_, Operand(non_smi_exponent));
4516 // Set the sign bit in scratch_ if the value was negative. 4645 // Set the sign bit in scratch_ if the value was negative.
4517 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); 4646 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
4518 // Subtract from 0 if the value was negative. 4647 // Subtract from 0 if the value was negative.
4519 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); 4648 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
(...skipping 793 matching lines...) Expand 10 before | Expand all | Expand 10 after
5313 break; 5442 break;
5314 case Token::SHL: 5443 case Token::SHL:
5315 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); 5444 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
5316 break; 5445 break;
5317 default: 5446 default:
5318 UNREACHABLE(); 5447 UNREACHABLE();
5319 } 5448 }
5320 } 5449 }
5321 5450
5322 5451
5452 // Can we multiply by x with max two shifts and an add.
5453 // This answers yes to all integers from 2 to 10.
5454 static bool IsEasyToMultiplyBy(int x) {
5455 if (x < 2) return false; // Avoid special cases.
5456 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
5457 if (IsPowerOf2(x)) return true; // Simple shift.
5458 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
5459 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
5460 return false;
5461 }
5462
5463
5464 // Can multiply by anything that IsEasyToMultiplyBy returns true for.
5465 // Source and destination may be the same register.
5466 static void MultiplyByKnownInt(MacroAssembler* masm,
5467 Register source,
5468 Register destination,
5469 int known_int) {
5470 if (IsPowerOf2(known_int)) {
5471 __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
5472 } else if (PopCountLessThanEqual2(known_int)) {
5473 int first_bit = BitPosition(known_int);
5474 int second_bit = BitPosition(known_int ^ (1 << first_bit));
5475 __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
5476 if (first_bit != 0) {
5477 __ mov(destination, Operand(destination, LSL, first_bit));
William Hesse 2009/07/03 11:27:01 Are you going to find out if the value overflowed
Erik Corry 2009/07/03 17:43:38 No, the flags are not set by this routine. Commen
5478 }
5479 } else {
5480 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
5481 int the_bit = BitPosition(known_int + 1);
5482 __ rsb(destination, source, Operand(source, LSL, the_bit));
5483 }
5484 }
5485
5486
5487 // This function (as opposed to MultiplyByKnownInt) takes the known int in a
5488 // a register for the cases where it doesn't know a good trick, and may deliver
5489 // a result that needs shifting.
5490 static void MultiplyByKnownInt2(
5491 MacroAssembler* masm,
5492 Register result,
5493 Register source,
5494 Register known_int_register, // Smi tagged.
5495 int known_int,
5496 int* result_needs_shifting) { // Including Smi tag shift
William Hesse 2009/07/03 11:27:01 I would call this shift_amount or required_shift.
5497 switch (known_int) {
5498 case 3:
5499 __ add(result, source, Operand(source, LSL, 1));
5500 *result_needs_shifting = 1;
5501 break;
5502 case 5:
5503 __ add(result, source, Operand(source, LSL, 2));
5504 *result_needs_shifting = 1;
5505 break;
5506 case 6:
5507 __ add(result, source, Operand(source, LSL, 1));
5508 *result_needs_shifting = 2;
5509 break;
5510 case 7:
5511 __ rsb(result, source, Operand(source, LSL, 3));
5512 *result_needs_shifting = 1;
5513 break;
5514 case 9:
5515 __ add(result, source, Operand(source, LSL, 3));
5516 *result_needs_shifting = 1;
5517 break;
5518 case 10:
5519 __ add(result, source, Operand(source, LSL, 2));
5520 *result_needs_shifting = 2;
5521 break;
5522 default:
5523 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
5524 __ mul(result, source, known_int_register);
5525 *result_needs_shifting = 0;
5526 }
5527 }
5528
5529
5323 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 5530 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
5324 // r1 : x 5531 // r1 : x
5325 // r0 : y 5532 // r0 : y
5326 // result : r0 5533 // result : r0
5327 5534
5328 // All ops need to know whether we are dealing with two Smis. Set up r2 to 5535 // All ops need to know whether we are dealing with two Smis. Set up r2 to
5329 // tell us that. 5536 // tell us that.
5330 __ orr(r2, r1, Operand(r0)); // r2 = x | y; 5537 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
5331 5538
5332 switch (op_) { 5539 switch (op_) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
5391 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); 5598 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
5392 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. 5599 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
5393 // Slow case. We fall through here if we multiplied a negative number 5600 // Slow case. We fall through here if we multiplied a negative number
5394 // with 0, because that would mean we should produce -0. 5601 // with 0, because that would mean we should produce -0.
5395 __ bind(&slow); 5602 __ bind(&slow);
5396 5603
5397 HandleBinaryOpSlowCases(masm, 5604 HandleBinaryOpSlowCases(masm,
5398 &not_smi, 5605 &not_smi,
5399 Builtins::MUL, 5606 Builtins::MUL,
5400 Token::MUL, 5607 Token::MUL,
5401 mode_); 5608 mode_);
5609 break;
5610 }
5611
5612 case Token::DIV:
5613 case Token::MOD: {
5614 Label not_smi;
5615 if (specialized_on_rhs_) {
5616 Label smi_is_unsuitable;
5617 __ BranchOnNotSmi(r1, &not_smi);
5618 if (IsPowerOf2(known_rhs_)) {
5619 if (op_ == Token::MOD) {
5620 __ and_(r0,
5621 r1,
5622 Operand(0x80000000u | ((known_rhs_ << kSmiTagSize) - 1)),
5623 SetCC);
5624 // We now have the answer, but if the input was negative we also
5625 // have the sign bit. Our work is done if the result is
5626 // positive or zero:
5627 __ Ret(pl);
5628 // A mod of a negative left hand side must return a negative number.
5629 // Unfortunately if the answer is 0 then we must return -0. And we
5630 // already optimistically trashed r0 so we may need to restore it.
5631 __ eor(r0, r0, Operand(0x80000000u), SetCC);
5632 __ mov(r0, Operand(Smi::FromInt(known_rhs_)), LeaveCC, eq); // -0.
5633 __ b(eq, &smi_is_unsuitable); // -0.
5634 __ sub(r0, r0, Operand(Smi::FromInt(known_rhs_))); // -3 % 4 == -3.
5635 } else {
5636 ASSERT(op_ == Token::DIV);
5637 __ tst(r1,
5638 Operand(0x80000000u | ((known_rhs_ << kSmiTagSize) - 1)));
5639 __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
5640 int shift = 0;
5641 int d = known_rhs_;
5642 while ((d & 1) == 0) {
5643 d >>= 1;
5644 shift++;
5645 }
5646 __ mov(r0, Operand(r1, LSR, shift));
5647 __ bic(r0, r0, Operand(kSmiTagMask));
5648 }
5649 } else {
5650 // Not a power of 2.
5651 __ tst(r1, Operand(0x80000000u));
5652 __ b(ne, &smi_is_unsuitable);
5653 // Find a fixed point reciprocal of the divisor so we can divide by
5654 // multiplying.
5655 double divisor = 1.0 / known_rhs_;
5656 int shift = 32;
5657 double scale = 4294967296.0; // 1 << 32.
5658 uint32_t mul;
5659 // Maximise the precision of the fixed point reciprocal.
5660 while (true) {
5661 mul = static_cast<uint32_t>(scale * divisor);
5662 if (mul >= 0x7fffffff) break;
5663 scale *= 2.0;
5664 shift++;
5665 }
5666 mul++;
5667 __ mov(r2, Operand(mul));
5668 __ umull(r3, r2, r2, r1);
5669 __ mov(r2, Operand(r2, LSR, shift - 31));
5670 // r2 is r1 / rhs. r2 is not Smi tagged.
5671 // r0 is still the known rhs. r0 is Smi tagged.
5672 // r1 is still the unkown lhs. r1 is Smi tagged.
5673 int r4_needs_shifting = 0; // Including the Smi tag shift of 1.
5674 // r4 = r2 * r0.
5675 MultiplyByKnownInt2(masm, r4, r2, r0, known_rhs_, &r4_needs_shifting);
5676 // r4 << r4_needs_shifting is now the Smi tagged rhs * (r1 / rhs).
5677 if (op_ == Token::DIV) {
5678 __ sub(r3, r1, Operand(r4, LSL, r4_needs_shifting), SetCC);
5679 __ b(ne, &smi_is_unsuitable); // There was a remainder.
5680 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
5681 } else {
5682 ASSERT(op_ == Token::MOD);
5683 __ sub(r0, r1, Operand(r4, LSL, r4_needs_shifting));
5684 }
5685 }
5686 __ Ret();
5687 __ bind(&smi_is_unsuitable);
5688 } else {
5689 __ jmp(&not_smi);
5690 }
5691 HandleBinaryOpSlowCases(masm,
5692 &not_smi,
5693 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
5694 op_,
5695 mode_);
5402 break; 5696 break;
5403 } 5697 }
5404 5698
5405 case Token::BIT_OR: 5699 case Token::BIT_OR:
5406 case Token::BIT_AND: 5700 case Token::BIT_AND:
5407 case Token::BIT_XOR: 5701 case Token::BIT_XOR:
5408 case Token::SAR: 5702 case Token::SAR:
5409 case Token::SHR: 5703 case Token::SHR:
5410 case Token::SHL: { 5704 case Token::SHL: {
5411 Label slow; 5705 Label slow;
5412 ASSERT(kSmiTag == 0); // adjust code below 5706 ASSERT(kSmiTag == 0); // adjust code below
5413 __ tst(r2, Operand(kSmiTagMask)); 5707 __ tst(r2, Operand(kSmiTagMask));
5414 __ b(ne, &slow); 5708 __ b(ne, &slow);
5415 switch (op_) { 5709 switch (op_) {
5416 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; 5710 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
5417 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; 5711 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
5418 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; 5712 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
5419 case Token::SAR: 5713 case Token::SAR:
5420 // Remove tags from right operand. 5714 // Remove tags from right operand.
5421 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y 5715 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5422 // Use only the 5 least significant bits of the shift count. 5716 // Use only the 5 least significant bits of the shift count.
5423 __ and_(r2, r2, Operand(0x1f)); 5717 __ and_(r2, r2, Operand(0x1f));
5424 __ mov(r0, Operand(r1, ASR, r2)); 5718 __ mov(r0, Operand(r1, ASR, r2));
5425 // Smi tag result. 5719 // Smi tag result.
5426 __ and_(r0, r0, Operand(~kSmiTagMask)); 5720 __ bic(r0, r0, Operand(kSmiTagMask));
5427 break; 5721 break;
5428 case Token::SHR: 5722 case Token::SHR:
5429 // Remove tags from operands. We can't do this on a 31 bit number 5723 // Remove tags from operands. We can't do this on a 31 bit number
5430 // because then the 0s get shifted into bit 30 instead of bit 31. 5724 // because then the 0s get shifted into bit 30 instead of bit 31.
5431 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x 5725 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
5432 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y 5726 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
5433 // Use only the 5 least significant bits of the shift count. 5727 // Use only the 5 least significant bits of the shift count.
5434 __ and_(r2, r2, Operand(0x1f)); 5728 __ and_(r2, r2, Operand(0x1f));
5435 __ mov(r3, Operand(r3, LSR, r2)); 5729 __ mov(r3, Operand(r3, LSR, r2));
5436 // Unsigned shift is not allowed to produce a negative number, so 5730 // Unsigned shift is not allowed to produce a negative number, so
(...skipping 663 matching lines...) Expand 10 before | Expand all | Expand 10 after
6100 int CompareStub::MinorKey() { 6394 int CompareStub::MinorKey() {
6101 // Encode the two parameters in a unique 16 bit value. 6395 // Encode the two parameters in a unique 16 bit value.
6102 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); 6396 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
6103 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); 6397 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
6104 } 6398 }
6105 6399
6106 6400
6107 #undef __ 6401 #undef __
6108 6402
6109 } } // namespace v8::internal 6403 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/disasm-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698