Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(265)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2452002: ARM: Track Smis on top 4 stack positions and Smi loop variables.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/jump-target-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
102 void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 102 void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
103 masm->LeaveInternalFrame(); 103 masm->LeaveInternalFrame();
104 } 104 }
105 105
106 106
107 // ------------------------------------------------------------------------- 107 // -------------------------------------------------------------------------
108 // CodeGenState implementation. 108 // CodeGenState implementation.
109 109
110 CodeGenState::CodeGenState(CodeGenerator* owner) 110 CodeGenState::CodeGenState(CodeGenerator* owner)
111 : owner_(owner), 111 : owner_(owner),
112 true_target_(NULL), 112 previous_(owner->state()) {
113 false_target_(NULL), 113 owner->set_state(this);
114 previous_(NULL) {
115 owner_->set_state(this);
116 } 114 }
117 115
118 116
119 CodeGenState::CodeGenState(CodeGenerator* owner, 117 ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
120 JumpTarget* true_target, 118 JumpTarget* true_target,
121 JumpTarget* false_target) 119 JumpTarget* false_target)
122 : owner_(owner), 120 : CodeGenState(owner),
123 true_target_(true_target), 121 true_target_(true_target),
124 false_target_(false_target), 122 false_target_(false_target) {
125 previous_(owner->state()) { 123 owner->set_state(this);
126 owner_->set_state(this); 124 }
125
126
127 TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
128 Slot* slot,
129 TypeInfo type_info)
130 : CodeGenState(owner),
131 slot_(slot) {
132 owner->set_state(this);
133 old_type_info_ = owner->set_type_info(slot, type_info);
127 } 134 }
128 135
129 136
130 CodeGenState::~CodeGenState() { 137 CodeGenState::~CodeGenState() {
131 ASSERT(owner_->state() == this); 138 ASSERT(owner_->state() == this);
132 owner_->set_state(previous_); 139 owner_->set_state(previous_);
133 } 140 }
134 141
135 142
143 TypeInfoCodeGenState::~TypeInfoCodeGenState() {
144 owner()->set_type_info(slot_, old_type_info_);
145 }
146
136 // ------------------------------------------------------------------------- 147 // -------------------------------------------------------------------------
137 // CodeGenerator implementation 148 // CodeGenerator implementation
138 149
139 CodeGenerator::CodeGenerator(MacroAssembler* masm) 150 CodeGenerator::CodeGenerator(MacroAssembler* masm)
140 : deferred_(8), 151 : deferred_(8),
141 masm_(masm), 152 masm_(masm),
142 info_(NULL), 153 info_(NULL),
143 frame_(NULL), 154 frame_(NULL),
144 allocator_(NULL), 155 allocator_(NULL),
145 cc_reg_(al), 156 cc_reg_(al),
146 state_(NULL), 157 state_(NULL),
147 loop_nesting_(0), 158 loop_nesting_(0),
159 type_info_(NULL),
148 function_return_is_shadowed_(false) { 160 function_return_is_shadowed_(false) {
149 } 161 }
150 162
151 163
152 // Calling conventions: 164 // Calling conventions:
153 // fp: caller's frame pointer 165 // fp: caller's frame pointer
154 // sp: stack pointer 166 // sp: stack pointer
155 // r1: called JS function 167 // r1: called JS function
156 // cp: callee's context 168 // cp: callee's context
157 169
158 void CodeGenerator::Generate(CompilationInfo* info) { 170 void CodeGenerator::Generate(CompilationInfo* info) {
159 // Record the position for debugging purposes. 171 // Record the position for debugging purposes.
160 CodeForFunctionPosition(info->function()); 172 CodeForFunctionPosition(info->function());
161 Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); 173 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
162 174
163 // Initialize state. 175 // Initialize state.
164 info_ = info; 176 info_ = info;
177
178 int slots = scope()->num_parameters() + scope()->num_stack_slots();
179 ScopedVector<TypeInfo> type_info_array(slots);
180 type_info_ = &type_info_array;
181
165 ASSERT(allocator_ == NULL); 182 ASSERT(allocator_ == NULL);
166 RegisterAllocator register_allocator(this); 183 RegisterAllocator register_allocator(this);
167 allocator_ = &register_allocator; 184 allocator_ = &register_allocator;
168 ASSERT(frame_ == NULL); 185 ASSERT(frame_ == NULL);
169 frame_ = new VirtualFrame(); 186 frame_ = new VirtualFrame();
170 cc_reg_ = al; 187 cc_reg_ = al;
171 188
172 // Adjust for function-level loop nesting. 189 // Adjust for function-level loop nesting.
173 ASSERT_EQ(0, loop_nesting_); 190 ASSERT_EQ(0, loop_nesting_);
174 loop_nesting_ = info->loop_nesting(); 191 loop_nesting_ = info->loop_nesting();
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 ASSERT(!function_return_is_shadowed_); 403 ASSERT(!function_return_is_shadowed_);
387 function_return_.Unuse(); 404 function_return_.Unuse();
388 DeleteFrame(); 405 DeleteFrame();
389 406
390 // Process any deferred code using the register allocator. 407 // Process any deferred code using the register allocator.
391 if (!HasStackOverflow()) { 408 if (!HasStackOverflow()) {
392 ProcessDeferred(); 409 ProcessDeferred();
393 } 410 }
394 411
395 allocator_ = NULL; 412 allocator_ = NULL;
413 type_info_ = NULL;
414 }
415
416
417 int CodeGenerator::NumberOfSlot(Slot* slot) {
418 if (slot == NULL) return kInvalidSlotNumber;
419 switch(slot->type()) {
420 case Slot::PARAMETER:
421 return slot->index();
422 case Slot::LOCAL:
423 return slot->index() + scope()->num_parameters();
424 default:
425 break;
426 }
427 return kInvalidSlotNumber;
396 } 428 }
397 429
398 430
399 MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { 431 MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
400 // Currently, this assertion will fail if we try to assign to 432 // Currently, this assertion will fail if we try to assign to
401 // a constant variable that is constant because it is read-only 433 // a constant variable that is constant because it is read-only
402 // (such as the variable referring to a named function expression). 434 // (such as the variable referring to a named function expression).
403 // We need to implement assignments to read-only variables. 435 // We need to implement assignments to read-only variables.
404 // Ideally, we should do this during AST generation (by converting 436 // Ideally, we should do this during AST generation (by converting
405 // such assignments into expression statements); however, in general 437 // such assignments into expression statements); however, in general
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
483 // condition code register and no value is pushed. If the condition code 515 // condition code register and no value is pushed. If the condition code
484 // register was set, has_cc() is true and cc_reg_ contains the condition to 516 // register was set, has_cc() is true and cc_reg_ contains the condition to
485 // test for 'true'. 517 // test for 'true'.
486 void CodeGenerator::LoadCondition(Expression* x, 518 void CodeGenerator::LoadCondition(Expression* x,
487 JumpTarget* true_target, 519 JumpTarget* true_target,
488 JumpTarget* false_target, 520 JumpTarget* false_target,
489 bool force_cc) { 521 bool force_cc) {
490 ASSERT(!has_cc()); 522 ASSERT(!has_cc());
491 int original_height = frame_->height(); 523 int original_height = frame_->height();
492 524
493 { CodeGenState new_state(this, true_target, false_target); 525 { ConditionCodeGenState new_state(this, true_target, false_target);
494 Visit(x); 526 Visit(x);
495 527
496 // If we hit a stack overflow, we may not have actually visited 528 // If we hit a stack overflow, we may not have actually visited
497 // the expression. In that case, we ensure that we have a 529 // the expression. In that case, we ensure that we have a
498 // valid-looking frame state because we will continue to generate 530 // valid-looking frame state because we will continue to generate
499 // code as we unwind the C++ stack. 531 // code as we unwind the C++ stack.
500 // 532 //
501 // It's possible to have both a stack overflow and a valid frame 533 // It's possible to have both a stack overflow and a valid frame
502 // state (eg, a subexpression overflowed, visiting it returned 534 // state (eg, a subexpression overflowed, visiting it returned
503 // with a dummied frame state, and visiting this expression 535 // with a dummied frame state, and visiting this expression
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
782 frame_->EmitPush(r0); 814 frame_->EmitPush(r0);
783 frame_->CallRuntime(Runtime::kToBool, 1); 815 frame_->CallRuntime(Runtime::kToBool, 1);
784 // Convert the result (r0) to a condition code. 816 // Convert the result (r0) to a condition code.
785 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 817 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
786 __ cmp(r0, ip); 818 __ cmp(r0, ip);
787 819
788 cc_reg_ = ne; 820 cc_reg_ = ne;
789 } 821 }
790 822
791 823
792 void CodeGenerator::GenericBinaryOperation(Token::Value op,
793 OverwriteMode overwrite_mode,
794 int constant_rhs) {
795 VirtualFrame::SpilledScope spilled_scope(frame_);
796 // sp[0] : y
797 // sp[1] : x
798 // result : r0
799
800 // Stub is entered with a call: 'return address' is in lr.
801 switch (op) {
802 case Token::ADD:
803 case Token::SUB:
804 case Token::MUL:
805 case Token::DIV:
806 case Token::MOD:
807 case Token::BIT_OR:
808 case Token::BIT_AND:
809 case Token::BIT_XOR:
810 case Token::SHL:
811 case Token::SHR:
812 case Token::SAR: {
813 frame_->EmitPop(r0); // r0 : y
814 frame_->EmitPop(r1); // r1 : x
815 GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
816 frame_->CallStub(&stub, 0);
817 break;
818 }
819
820 case Token::COMMA:
821 frame_->EmitPop(r0);
822 // Simply discard left value.
823 frame_->Drop();
824 break;
825
826 default:
827 // Other cases should have been handled before this point.
828 UNREACHABLE();
829 break;
830 }
831 }
832
833
834 void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op, 824 void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
Søren Thygesen Gjesse 2010/06/02 09:24:01 VirtualFrameBinaryOperation -> GenericBinaryOperat
835 OverwriteMode overwrite_mode, 825 OverwriteMode overwrite_mode,
826 GenerateInlineSmi inline_smi,
836 int constant_rhs) { 827 int constant_rhs) {
837 // top of virtual frame: y 828 // top of virtual frame: y
838 // 2nd elt. on virtual frame : x 829 // 2nd elt. on virtual frame : x
839 // result : top of virtual frame 830 // result : top of virtual frame
840 831
841 // Stub is entered with a call: 'return address' is in lr. 832 // Stub is entered with a call: 'return address' is in lr.
842 switch (op) { 833 switch (op) {
843 case Token::ADD: // fall through. 834 case Token::ADD:
844 case Token::SUB: // fall through. 835 case Token::SUB:
836 if (inline_smi) {
837 JumpTarget done;
838 Register rhs = frame_->PopToRegister();
839 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register .
840 Register scratch = VirtualFrame::scratch0();
841 __ orr(scratch, rhs, Operand(lhs));
842 // Check they are both small and positive.
843 __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
844 ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
845 ASSERT_EQ(0, kSmiTag);
846 if (op == Token::ADD) {
847 __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
848 } else {
849 __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
850 }
851 done.Branch(eq);
852 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
853 frame_->SpillAll();
854 frame_->CallStub(&stub, 0);
855 done.Bind();
856 frame_->EmitPush(r0);
857 break;
858 } else {
859 // Fall through!
860 }
861 case Token::BIT_OR:
862 case Token::BIT_AND:
863 case Token::BIT_XOR:
864 if (inline_smi) {
865 bool rhs_is_smi = frame_->KnownSmiAt(0);
866 bool lhs_is_smi = frame_->KnownSmiAt(1);
867 Register rhs = frame_->PopToRegister();
868 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register .
Søren Thygesen Gjesse 2010/06/02 09:24:01 Long line.
869 Register smi_test_reg;
870 Condition cond;
871 if (!rhs_is_smi || !lhs_is_smi) {
872 if (!rhs_is_smi) {
873 smi_test_reg = rhs;
874 } else if (!lhs_is_smi) {
875 smi_test_reg = lhs;
876 } else {
877 smi_test_reg = VirtualFrame::scratch0();
878 __ orr(smi_test_reg, rhs, Operand(lhs));
879 }
880 // Check they are both Smis.
881 __ tst(smi_test_reg, Operand(kSmiTagMask));
882 cond = eq;
883 } else {
884 cond = al;
885 }
886 ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
887 if (op == Token::BIT_OR) {
888 __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
889 } else if (op == Token::BIT_AND) {
890 __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
891 } else {
892 ASSERT(op == Token::BIT_XOR);
893 ASSERT_EQ(0, kSmiTag);
894 __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
895 }
896 if (cond != al) {
897 JumpTarget done;
898 done.Branch(cond);
899 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
900 frame_->SpillAll();
901 frame_->CallStub(&stub, 0);
902 done.Bind();
903 }
904 frame_->EmitPush(r0);
905 break;
906 } else {
907 // Fall through!
908 }
845 case Token::MUL: 909 case Token::MUL:
846 case Token::DIV: 910 case Token::DIV:
847 case Token::MOD: 911 case Token::MOD:
848 case Token::BIT_OR:
849 case Token::BIT_AND:
850 case Token::BIT_XOR:
851 case Token::SHL: 912 case Token::SHL:
852 case Token::SHR: 913 case Token::SHR:
853 case Token::SAR: { 914 case Token::SAR: {
854 Register rhs = frame_->PopToRegister(); 915 Register rhs = frame_->PopToRegister();
855 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. 916 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
856 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); 917 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
857 frame_->SpillAll(); 918 frame_->SpillAll();
858 frame_->CallStub(&stub, 0); 919 frame_->CallStub(&stub, 0);
859 frame_->EmitPush(r0); 920 frame_->EmitPush(r0);
860 break; 921 break;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
965 if (!reversed_) { 1026 if (!reversed_) {
966 if (tos_register_.is(r1)) { 1027 if (tos_register_.is(r1)) {
967 __ mov(r0, Operand(Smi::FromInt(value_))); 1028 __ mov(r0, Operand(Smi::FromInt(value_)));
968 } else { 1029 } else {
969 ASSERT(tos_register_.is(r0)); 1030 ASSERT(tos_register_.is(r0));
970 __ mov(r1, Operand(Smi::FromInt(value_))); 1031 __ mov(r1, Operand(Smi::FromInt(value_)));
971 lhs = r0; 1032 lhs = r0;
972 rhs = r1; 1033 rhs = r1;
973 } 1034 }
974 } else { 1035 } else {
975 UNREACHABLE(); // Should have been handled in SmiOperation. 1036 ASSERT(op_ == Token::SHL);
1037 __ mov(r1, Operand(Smi::FromInt(value_)));
976 } 1038 }
977 break; 1039 break;
978 } 1040 }
979 1041
980 default: 1042 default:
981 // Other cases should have been handled before this point. 1043 // Other cases should have been handled before this point.
982 UNREACHABLE(); 1044 UNREACHABLE();
983 break; 1045 break;
984 } 1046 }
985 1047
(...skipping 27 matching lines...) Expand all
1013 return bit_posn; 1075 return bit_posn;
1014 } 1076 }
1015 1077
1016 1078
1017 void CodeGenerator::SmiOperation(Token::Value op, 1079 void CodeGenerator::SmiOperation(Token::Value op,
1018 Handle<Object> value, 1080 Handle<Object> value,
1019 bool reversed, 1081 bool reversed,
1020 OverwriteMode mode) { 1082 OverwriteMode mode) {
1021 int int_value = Smi::cast(*value)->value(); 1083 int int_value = Smi::cast(*value)->value();
1022 1084
1085 bool both_sides_are_smi = frame_->KnownSmiAt(0);
1086
1023 bool something_to_inline; 1087 bool something_to_inline;
1024 switch (op) { 1088 switch (op) {
1025 case Token::ADD: 1089 case Token::ADD:
1026 case Token::SUB: 1090 case Token::SUB:
1027 case Token::BIT_AND: 1091 case Token::BIT_AND:
1028 case Token::BIT_OR: 1092 case Token::BIT_OR:
1029 case Token::BIT_XOR: { 1093 case Token::BIT_XOR: {
1030 something_to_inline = true; 1094 something_to_inline = true;
1031 break; 1095 break;
1032 } 1096 }
1033 case Token::SHL: 1097 case Token::SHL: {
1098 something_to_inline = (both_sides_are_smi || !reversed);
1099 break;
1100 }
1034 case Token::SHR: 1101 case Token::SHR:
1035 case Token::SAR: { 1102 case Token::SAR: {
1036 if (reversed) { 1103 if (reversed) {
1037 something_to_inline = false; 1104 something_to_inline = false;
1038 } else { 1105 } else {
1039 something_to_inline = true; 1106 something_to_inline = true;
1040 } 1107 }
1041 break; 1108 break;
1042 } 1109 }
1043 case Token::MOD: { 1110 case Token::MOD: {
(...skipping 16 matching lines...) Expand all
1060 something_to_inline = false; 1127 something_to_inline = false;
1061 break; 1128 break;
1062 } 1129 }
1063 } 1130 }
1064 1131
1065 if (!something_to_inline) { 1132 if (!something_to_inline) {
1066 if (!reversed) { 1133 if (!reversed) {
1067 // Push the rhs onto the virtual frame by putting it in a TOS register. 1134 // Push the rhs onto the virtual frame by putting it in a TOS register.
1068 Register rhs = frame_->GetTOSRegister(); 1135 Register rhs = frame_->GetTOSRegister();
1069 __ mov(rhs, Operand(value)); 1136 __ mov(rhs, Operand(value));
1070 frame_->EmitPush(rhs); 1137 frame_->EmitPush(rhs, TypeInfo::Smi());
1071 VirtualFrameBinaryOperation(op, mode, int_value); 1138 VirtualFrameBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1072 } else { 1139 } else {
1073 // Pop the rhs, then push lhs and rhs in the right order. Only performs 1140 // Pop the rhs, then push lhs and rhs in the right order. Only performs
1074 // at most one pop, the rest takes place in TOS registers. 1141 // at most one pop, the rest takes place in TOS registers.
1075 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing. 1142 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
1076 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this. 1143 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
1077 __ mov(lhs, Operand(value)); 1144 __ mov(lhs, Operand(value));
1078 frame_->EmitPush(lhs); 1145 frame_->EmitPush(lhs, TypeInfo::Smi());
1079 frame_->EmitPush(rhs); 1146 TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1080 VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); 1147 frame_->EmitPush(rhs, t);
1148 VirtualFrameBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValu e);
Søren Thygesen Gjesse 2010/06/02 09:24:01 Long line.
1081 } 1149 }
1082 return; 1150 return;
1083 } 1151 }
1084 1152
1085 // We move the top of stack to a register (normally no move is invoved). 1153 // We move the top of stack to a register (normally no move is invoved).
1086 Register tos = frame_->PopToRegister(); 1154 Register tos = frame_->PopToRegister();
1087 // All other registers are spilled. The deferred code expects one argument 1155 // All other registers are spilled. The deferred code expects one argument
1088 // in a register and all other values are flushed to the stack. The 1156 // in a register and all other values are flushed to the stack. The
1089 // answer is returned in the same register that the top of stack argument was 1157 // answer is returned in the same register that the top of stack argument was
1090 // in. 1158 // in.
1091 frame_->SpillAll(); 1159 frame_->SpillAll();
1092 1160
1093 switch (op) { 1161 switch (op) {
1094 case Token::ADD: { 1162 case Token::ADD: {
1095 DeferredCode* deferred = 1163 DeferredCode* deferred =
1096 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1164 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1097 1165
1098 __ add(tos, tos, Operand(value), SetCC); 1166 __ add(tos, tos, Operand(value), SetCC);
1099 deferred->Branch(vs); 1167 deferred->Branch(vs);
1100 __ tst(tos, Operand(kSmiTagMask)); 1168 if (!both_sides_are_smi) {
1101 deferred->Branch(ne); 1169 __ tst(tos, Operand(kSmiTagMask));
1170 deferred->Branch(ne);
1171 }
1102 deferred->BindExit(); 1172 deferred->BindExit();
1103 frame_->EmitPush(tos); 1173 frame_->EmitPush(tos);
1104 break; 1174 break;
1105 } 1175 }
1106 1176
1107 case Token::SUB: { 1177 case Token::SUB: {
1108 DeferredCode* deferred = 1178 DeferredCode* deferred =
1109 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1179 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1110 1180
1111 if (reversed) { 1181 if (reversed) {
1112 __ rsb(tos, tos, Operand(value), SetCC); 1182 __ rsb(tos, tos, Operand(value), SetCC);
1113 } else { 1183 } else {
1114 __ sub(tos, tos, Operand(value), SetCC); 1184 __ sub(tos, tos, Operand(value), SetCC);
1115 } 1185 }
1116 deferred->Branch(vs); 1186 deferred->Branch(vs);
1117 __ tst(tos, Operand(kSmiTagMask)); 1187 if (!both_sides_are_smi) {
1118 deferred->Branch(ne); 1188 __ tst(tos, Operand(kSmiTagMask));
1189 deferred->Branch(ne);
1190 }
1119 deferred->BindExit(); 1191 deferred->BindExit();
1120 frame_->EmitPush(tos); 1192 frame_->EmitPush(tos);
1121 break; 1193 break;
1122 } 1194 }
1123 1195
1124 1196
1125 case Token::BIT_OR: 1197 case Token::BIT_OR:
1126 case Token::BIT_XOR: 1198 case Token::BIT_XOR:
1127 case Token::BIT_AND: { 1199 case Token::BIT_AND: {
1128 DeferredCode* deferred = 1200 if (both_sides_are_smi) {
1129 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1201 switch (op) {
1130 __ tst(tos, Operand(kSmiTagMask)); 1202 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1131 deferred->Branch(ne); 1203 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1132 switch (op) { 1204 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
1133 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; 1205 default: UNREACHABLE();
1134 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; 1206 }
1135 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; 1207 frame_->EmitPush(tos, TypeInfo::Smi());
1136 default: UNREACHABLE(); 1208 } else {
1209 DeferredCode* deferred =
1210 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1211 __ tst(tos, Operand(kSmiTagMask));
1212 deferred->Branch(ne);
1213 switch (op) {
1214 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1215 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1216 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
1217 default: UNREACHABLE();
1218 }
1219 deferred->BindExit();
1220 TypeInfo result_type =
1221 (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1222 frame_->EmitPush(tos, result_type);
1137 } 1223 }
1138 deferred->BindExit();
1139 frame_->EmitPush(tos);
1140 break; 1224 break;
1141 } 1225 }
1142 1226
1143 case Token::SHL: 1227 case Token::SHL:
1228 if (reversed) {
1229 ASSERT(both_sides_are_smi);
1230 int max_shift = 0;
1231 int max_result = int_value == 0 ? 1 : int_value;
1232 while (Smi::IsValid(max_result << 1)) {
1233 max_shift++;
1234 max_result <<= 1;
1235 }
1236 DeferredCode* deferred =
1237 new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1238 // Mask off the last 5 bits of the shift operand (rhs). This is part
1239 // of the definition of shift in JS and we know we have a Smi so we
1240 // can safely do this. The masked version gets passed to the
1241 // deferred code, but that makes no difference.
1242 __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1243 __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1244 deferred->Branch(ge);
1245 Register scratch = VirtualFrame::scratch0();
1246 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
1247 __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
1248 __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
1249 deferred->BindExit();
1250 TypeInfo result = TypeInfo::Integer32();
1251 frame_->EmitPush(tos, result);
1252 break;
1253 }
1254 // Fall through!
1144 case Token::SHR: 1255 case Token::SHR:
1145 case Token::SAR: { 1256 case Token::SAR: {
1146 ASSERT(!reversed); 1257 ASSERT(!reversed);
1258 TypeInfo result = TypeInfo::Integer32();
1147 Register scratch = VirtualFrame::scratch0(); 1259 Register scratch = VirtualFrame::scratch0();
1148 Register scratch2 = VirtualFrame::scratch1(); 1260 Register scratch2 = VirtualFrame::scratch1();
1149 int shift_value = int_value & 0x1f; // least significant 5 bits 1261 int shift_value = int_value & 0x1f; // least significant 5 bits
1150 DeferredCode* deferred = 1262 DeferredCode* deferred =
1151 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); 1263 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1152 uint32_t problematic_mask = kSmiTagMask; 1264 uint32_t problematic_mask = kSmiTagMask;
1153 // For unsigned shift by zero all negative smis are problematic. 1265 // For unsigned shift by zero all negative smis are problematic.
1154 if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000; 1266 bool skip_smi_test = both_sides_are_smi;
1155 __ tst(tos, Operand(problematic_mask)); 1267 if (shift_value == 0 && op == Token::SHR) {
1156 deferred->Branch(ne); // Go slow for problematic input. 1268 problematic_mask |= 0x80000000;
1269 skip_smi_test = false;
1270 }
1271 if (!skip_smi_test) {
1272 __ tst(tos, Operand(problematic_mask));
1273 deferred->Branch(ne); // Go slow for problematic input.
1274 }
1157 switch (op) { 1275 switch (op) {
1158 case Token::SHL: { 1276 case Token::SHL: {
1159 if (shift_value != 0) { 1277 if (shift_value != 0) {
1160 int adjusted_shift = shift_value - kSmiTagSize; 1278 int adjusted_shift = shift_value - kSmiTagSize;
1161 ASSERT(adjusted_shift >= 0); 1279 ASSERT(adjusted_shift >= 0);
1162 if (adjusted_shift != 0) { 1280 if (adjusted_shift != 0) {
1163 __ mov(scratch, Operand(tos, LSL, adjusted_shift)); 1281 __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1164 // Check that the *signed* result fits in a smi. 1282 // Check that the *signed* result fits in a smi.
1165 __ add(scratch2, scratch, Operand(0x40000000), SetCC); 1283 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1166 deferred->Branch(mi); 1284 deferred->Branch(mi);
(...skipping 14 matching lines...) Expand all
1181 __ mov(scratch, Operand(scratch, LSR, shift_value)); 1299 __ mov(scratch, Operand(scratch, LSR, shift_value));
1182 if (shift_value == 1) { 1300 if (shift_value == 1) {
1183 // check that the *unsigned* result fits in a smi 1301 // check that the *unsigned* result fits in a smi
1184 // neither of the two high-order bits can be set: 1302 // neither of the two high-order bits can be set:
1185 // - 0x80000000: high bit would be lost when smi tagging 1303 // - 0x80000000: high bit would be lost when smi tagging
1186 // - 0x40000000: this number would convert to negative when 1304 // - 0x40000000: this number would convert to negative when
1187 // smi tagging these two cases can only happen with shifts 1305 // smi tagging these two cases can only happen with shifts
1188 // by 0 or 1 when handed a valid smi 1306 // by 0 or 1 when handed a valid smi
1189 __ tst(scratch, Operand(0xc0000000)); 1307 __ tst(scratch, Operand(0xc0000000));
1190 deferred->Branch(ne); 1308 deferred->Branch(ne);
1309 } else {
1310 ASSERT(shift_value >= 2);
1311 result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
1191 } 1312 }
1192 __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); 1313 __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1193 } 1314 }
1194 break; 1315 break;
1195 } 1316 }
1196 case Token::SAR: { 1317 case Token::SAR: {
1197 // In the ARM instructions set, ASR by immediate 0 means shifting 32 1318 // In the ARM instructions set, ASR by immediate 0 means shifting 32
1198 // bits. 1319 // bits.
1199 if (shift_value != 0) { 1320 if (shift_value != 0) {
1200 // Do the shift and the tag removal in one operation. If the shift 1321 // Do the shift and the tag removal in one operation. If the shift
1201 // is 31 bits (the highest possible value) then we emit the 1322 // is 31 bits (the highest possible value) then we emit the
1202 // instruction as a shift by 0 which means shift arithmetically by 1323 // instruction as a shift by 0 which means shift arithmetically by
1203 // 32. 1324 // 32.
1204 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); 1325 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1205 // Put tag back. 1326 // Put tag back.
1206 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); 1327 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1328 // SAR by at least 1 gives a Smi.
1329 result = TypeInfo::Smi();
1207 } 1330 }
1208 break; 1331 break;
1209 } 1332 }
1210 default: UNREACHABLE(); 1333 default: UNREACHABLE();
1211 } 1334 }
1212 deferred->BindExit(); 1335 deferred->BindExit();
1213 frame_->EmitPush(tos); 1336 frame_->EmitPush(tos, result);
1214 break; 1337 break;
1215 } 1338 }
1216 1339
1217 case Token::MOD: { 1340 case Token::MOD: {
1218 ASSERT(!reversed); 1341 ASSERT(!reversed);
1219 ASSERT(int_value >= 2); 1342 ASSERT(int_value >= 2);
1220 ASSERT(IsPowerOf2(int_value)); 1343 ASSERT(IsPowerOf2(int_value));
1221 DeferredCode* deferred = 1344 DeferredCode* deferred =
1222 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1345 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1223 unsigned mask = (0x80000000u | kSmiTagMask); 1346 unsigned mask = (0x80000000u | kSmiTagMask);
1224 __ tst(tos, Operand(mask)); 1347 __ tst(tos, Operand(mask));
1225 deferred->Branch(ne); // Go to deferred code on non-Smis and negative. 1348 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1226 mask = (int_value << kSmiTagSize) - 1; 1349 mask = (int_value << kSmiTagSize) - 1;
1227 __ and_(tos, tos, Operand(mask)); 1350 __ and_(tos, tos, Operand(mask));
1228 deferred->BindExit(); 1351 deferred->BindExit();
1229 frame_->EmitPush(tos); 1352 // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1353 frame_->EmitPush(
1354 tos,
1355 both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1230 break; 1356 break;
1231 } 1357 }
1232 1358
1233 case Token::MUL: { 1359 case Token::MUL: {
1234 ASSERT(IsEasyToMultiplyBy(int_value)); 1360 ASSERT(IsEasyToMultiplyBy(int_value));
1235 DeferredCode* deferred = 1361 DeferredCode* deferred =
1236 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1362 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1237 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; 1363 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1238 max_smi_that_wont_overflow <<= kSmiTagSize; 1364 max_smi_that_wont_overflow <<= kSmiTagSize;
1239 unsigned mask = 0x80000000u; 1365 unsigned mask = 0x80000000u;
1240 while ((mask & max_smi_that_wont_overflow) == 0) { 1366 while ((mask & max_smi_that_wont_overflow) == 0) {
1241 mask |= mask >> 1; 1367 mask |= mask >> 1;
1242 } 1368 }
1243 mask |= kSmiTagMask; 1369 mask |= kSmiTagMask;
1244 // This does a single mask that checks for a too high value in a 1370 // This does a single mask that checks for a too high value in a
1245 // conservative way and for a non-Smi. It also filters out negative 1371 // conservative way and for a non-Smi. It also filters out negative
1246 // numbers, unfortunately, but since this code is inline we prefer 1372 // numbers, unfortunately, but since this code is inline we prefer
(...skipping 25 matching lines...) Expand all
1272 // sp[0] : y 1398 // sp[0] : y
1273 // sp[1] : x 1399 // sp[1] : x
1274 // result : cc register 1400 // result : cc register
1275 1401
1276 // Strict only makes sense for equality comparisons. 1402 // Strict only makes sense for equality comparisons.
1277 ASSERT(!strict || cc == eq); 1403 ASSERT(!strict || cc == eq);
1278 1404
1279 Register lhs; 1405 Register lhs;
1280 Register rhs; 1406 Register rhs;
1281 1407
1408 bool lhs_is_smi;
1409 bool rhs_is_smi;
1410
1282 // We load the top two stack positions into registers chosen by the virtual 1411 // We load the top two stack positions into registers chosen by the virtual
1283 // frame. This should keep the register shuffling to a minimum. 1412 // frame. This should keep the register shuffling to a minimum.
1284 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. 1413 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1285 if (cc == gt || cc == le) { 1414 if (cc == gt || cc == le) {
1286 cc = ReverseCondition(cc); 1415 cc = ReverseCondition(cc);
1416 lhs_is_smi = frame_->KnownSmiAt(0);
1417 rhs_is_smi = frame_->KnownSmiAt(1);
1287 lhs = frame_->PopToRegister(); 1418 lhs = frame_->PopToRegister();
1288 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! 1419 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
1289 } else { 1420 } else {
1421 rhs_is_smi = frame_->KnownSmiAt(0);
1422 lhs_is_smi = frame_->KnownSmiAt(1);
1290 rhs = frame_->PopToRegister(); 1423 rhs = frame_->PopToRegister();
1291 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! 1424 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
1292 } 1425 }
1293 1426
1427 bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1428
1294 ASSERT(rhs.is(r0) || rhs.is(r1)); 1429 ASSERT(rhs.is(r0) || rhs.is(r1));
1295 ASSERT(lhs.is(r0) || lhs.is(r1)); 1430 ASSERT(lhs.is(r0) || lhs.is(r1));
1296 1431
1297 // Now we have the two sides in r0 and r1. We flush any other registers 1432 JumpTarget exit;
1298 // because the stub doesn't know about register allocation.
1299 frame_->SpillAll();
1300 Register scratch = VirtualFrame::scratch0();
1301 __ orr(scratch, lhs, Operand(rhs));
1302 __ tst(scratch, Operand(kSmiTagMask));
1303 JumpTarget smi;
1304 smi.Branch(eq);
1305 1433
1306 // Perform non-smi comparison by stub. 1434 if (!both_sides_are_smi) {
1307 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. 1435 // Now we have the two sides in r0 and r1. We flush any other registers
1308 // We call with 0 args because there are 0 on the stack. 1436 // because the stub doesn't know about register allocation.
1309 if (!rhs.is(r0)) { 1437 frame_->SpillAll();
1310 __ Swap(rhs, lhs, ip); 1438 Register scratch = VirtualFrame::scratch0();
1439 Register smi_test_reg;
1440 if (lhs_is_smi) {
1441 smi_test_reg = rhs;
1442 } else if (rhs_is_smi) {
1443 smi_test_reg = lhs;
1444 } else {
1445 __ orr(scratch, lhs, Operand(rhs));
1446 smi_test_reg = scratch;
1447 }
1448 __ tst(smi_test_reg, Operand(kSmiTagMask));
1449 JumpTarget smi;
1450 smi.Branch(eq);
1451
1452 // Perform non-smi comparison by stub.
1453 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1454 // We call with 0 args because there are 0 on the stack.
1455 if (!rhs.is(r0)) {
1456 __ Swap(rhs, lhs, ip);
1457 }
1458
1459 CompareStub stub(cc, strict);
1460 frame_->CallStub(&stub, 0);
1461 __ cmp(r0, Operand(0));
1462 exit.Jump();
1463
1464 smi.Bind();
1311 } 1465 }
1312 1466
1313 CompareStub stub(cc, strict);
1314 frame_->CallStub(&stub, 0);
1315 __ cmp(r0, Operand(0));
1316 JumpTarget exit;
1317 exit.Jump();
1318
1319 // Do smi comparisons by pointer comparison. 1467 // Do smi comparisons by pointer comparison.
1320 smi.Bind();
1321 __ cmp(lhs, Operand(rhs)); 1468 __ cmp(lhs, Operand(rhs));
1322 1469
1323 exit.Bind(); 1470 exit.Bind();
1324 cc_reg_ = cc; 1471 cc_reg_ = cc;
1325 } 1472 }
1326 1473
1327 1474
1328 // Call the function on the stack with the given arguments. 1475 // Call the function on the stack with the given arguments.
1329 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, 1476 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1330 CallFunctionFlags flags, 1477 CallFunctionFlags flags,
(...skipping 752 matching lines...) Expand 10 before | Expand all | Expand 10 after
2083 } 2230 }
2084 2231
2085 // If the test is never true there is no need to compile the test or 2232 // If the test is never true there is no need to compile the test or
2086 // body. 2233 // body.
2087 ConditionAnalysis info = AnalyzeCondition(node->cond()); 2234 ConditionAnalysis info = AnalyzeCondition(node->cond());
2088 if (info == ALWAYS_FALSE) return; 2235 if (info == ALWAYS_FALSE) return;
2089 2236
2090 node->break_target()->SetExpectedHeight(); 2237 node->break_target()->SetExpectedHeight();
2091 IncrementLoopNesting(); 2238 IncrementLoopNesting();
2092 2239
2240 // We know that the loop index is a smi if it is not modified in the
2241 // loop body and it is checked against a constant limit in the loop
2242 // condition. In this case, we reset the static type information of the
2243 // loop index to smi before compiling the body, the update expression, and
2244 // the bottom check of the loop condition.
2245 TypeInfoCodeGenState type_info_scope(this,
2246 node->is_fast_smi_loop() ?
2247 node->loop_variable()->slot() :
2248 NULL,
2249 TypeInfo::Smi());
2250
2093 // If there is no update statement, label the top of the loop with the 2251 // If there is no update statement, label the top of the loop with the
2094 // continue target, otherwise with the loop target. 2252 // continue target, otherwise with the loop target.
2095 JumpTarget loop(JumpTarget::BIDIRECTIONAL); 2253 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2096 if (node->next() == NULL) { 2254 if (node->next() == NULL) {
2097 node->continue_target()->SetExpectedHeight(); 2255 node->continue_target()->SetExpectedHeight();
2098 node->continue_target()->Bind(); 2256 node->continue_target()->Bind();
2099 } else { 2257 } else {
2100 node->continue_target()->SetExpectedHeight(); 2258 node->continue_target()->SetExpectedHeight();
2101 loop.Bind(); 2259 loop.Bind();
2102 } 2260 }
(...skipping 700 matching lines...) Expand 10 before | Expand all | Expand 10 after
2803 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); 2961 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2804 } else { 2962 } else {
2805 frame_->CallRuntime(Runtime::kLoadContextSlot, 2); 2963 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2806 } 2964 }
2807 2965
2808 done.Bind(); 2966 done.Bind();
2809 frame_->EmitPush(r0); 2967 frame_->EmitPush(r0);
2810 2968
2811 } else { 2969 } else {
2812 Register scratch = VirtualFrame::scratch0(); 2970 Register scratch = VirtualFrame::scratch0();
2813 frame_->EmitPush(SlotOperand(slot, scratch)); 2971 TypeInfo info = type_info(slot);
2972 frame_->EmitPush(SlotOperand(slot, scratch), info);
2814 if (slot->var()->mode() == Variable::CONST) { 2973 if (slot->var()->mode() == Variable::CONST) {
2815 // Const slots may contain 'the hole' value (the constant hasn't been 2974 // Const slots may contain 'the hole' value (the constant hasn't been
2816 // initialized yet) which needs to be converted into the 'undefined' 2975 // initialized yet) which needs to be converted into the 'undefined'
2817 // value. 2976 // value.
2818 Comment cmnt(masm_, "[ Unhole const"); 2977 Comment cmnt(masm_, "[ Unhole const");
2819 frame_->EmitPop(scratch); 2978 frame_->EmitPop(scratch);
2820 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2979 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2821 __ cmp(scratch, ip); 2980 __ cmp(scratch, ip);
2822 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq); 2981 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
2823 frame_->EmitPush(scratch); 2982 frame_->EmitPush(scratch);
(...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after
3093 ASSERT_EQ(original_height + 1, frame_->height()); 3252 ASSERT_EQ(original_height + 1, frame_->height());
3094 } 3253 }
3095 3254
3096 3255
3097 void CodeGenerator::VisitLiteral(Literal* node) { 3256 void CodeGenerator::VisitLiteral(Literal* node) {
3098 #ifdef DEBUG 3257 #ifdef DEBUG
3099 int original_height = frame_->height(); 3258 int original_height = frame_->height();
3100 #endif 3259 #endif
3101 Comment cmnt(masm_, "[ Literal"); 3260 Comment cmnt(masm_, "[ Literal");
3102 Register reg = frame_->GetTOSRegister(); 3261 Register reg = frame_->GetTOSRegister();
3262 bool is_smi = node->handle()->IsSmi();
3103 __ mov(reg, Operand(node->handle())); 3263 __ mov(reg, Operand(node->handle()));
3104 frame_->EmitPush(reg); 3264 frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3105 ASSERT_EQ(original_height + 1, frame_->height()); 3265 ASSERT_EQ(original_height + 1, frame_->height());
3106 } 3266 }
3107 3267
3108 3268
3109 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { 3269 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3110 #ifdef DEBUG 3270 #ifdef DEBUG
3111 int original_height = frame_->height(); 3271 int original_height = frame_->height();
3112 #endif 3272 #endif
3113 VirtualFrame::SpilledScope spilled_scope(frame_); 3273 VirtualFrame::SpilledScope spilled_scope(frame_);
3114 Comment cmnt(masm_, "[ RexExp Literal"); 3274 Comment cmnt(masm_, "[ RexExp Literal");
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
3325 Literal* literal = node->value()->AsLiteral(); 3485 Literal* literal = node->value()->AsLiteral();
3326 bool overwrite_value = 3486 bool overwrite_value =
3327 (node->value()->AsBinaryOperation() != NULL && 3487 (node->value()->AsBinaryOperation() != NULL &&
3328 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); 3488 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3329 if (literal != NULL && literal->handle()->IsSmi()) { 3489 if (literal != NULL && literal->handle()->IsSmi()) {
3330 SmiOperation(node->binary_op(), 3490 SmiOperation(node->binary_op(),
3331 literal->handle(), 3491 literal->handle(),
3332 false, 3492 false,
3333 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3493 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3334 } else { 3494 } else {
3495 GenerateInlineSmi inline_smi =
3496 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3497 if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI;
Søren Thygesen Gjesse 2010/06/02 09:24:01 Maybe assert !literal.IsSmi() when it is != NULL (
3335 Load(node->value()); 3498 Load(node->value());
3336 VirtualFrameBinaryOperation( 3499 VirtualFrameBinaryOperation(
3337 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3500 node->binary_op(),
3501 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3502 inline_smi);
3338 } 3503 }
3339 } else { 3504 } else {
3340 Load(node->value()); 3505 Load(node->value());
3341 } 3506 }
3342 3507
3343 // Perform the assignment. 3508 // Perform the assignment.
3344 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) { 3509 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3345 CodeForSourcePosition(node->position()); 3510 CodeForSourcePosition(node->position());
3346 StoreToSlot(slot, 3511 StoreToSlot(slot,
3347 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT); 3512 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
3418 Literal* literal = node->value()->AsLiteral(); 3583 Literal* literal = node->value()->AsLiteral();
3419 bool overwrite_value = 3584 bool overwrite_value =
3420 (node->value()->AsBinaryOperation() != NULL && 3585 (node->value()->AsBinaryOperation() != NULL &&
3421 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); 3586 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3422 if (literal != NULL && literal->handle()->IsSmi()) { 3587 if (literal != NULL && literal->handle()->IsSmi()) {
3423 SmiOperation(node->binary_op(), 3588 SmiOperation(node->binary_op(),
3424 literal->handle(), 3589 literal->handle(),
3425 false, 3590 false,
3426 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3591 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3427 } else { 3592 } else {
3593 GenerateInlineSmi inline_smi =
3594 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3595 if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI;
3428 Load(node->value()); 3596 Load(node->value());
3429 VirtualFrameBinaryOperation( 3597 VirtualFrameBinaryOperation(
3430 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3598 node->binary_op(),
3599 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3600 inline_smi);
3431 } 3601 }
3432 } else { 3602 } else {
3433 // For non-compound assignment just load the right-hand side. 3603 // For non-compound assignment just load the right-hand side.
3434 Load(node->value()); 3604 Load(node->value());
3435 } 3605 }
3436 3606
3437 // Stack layout: 3607 // Stack layout:
3438 // [tos] : value 3608 // [tos] : value
3439 // [tos+1] : receiver (only materialized if non-trivial) 3609 // [tos+1] : receiver (only materialized if non-trivial)
3440 // [tos+2] : receiver if at the end of an initialization block 3610 // [tos+2] : receiver if at the end of an initialization block
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
3525 Literal* literal = node->value()->AsLiteral(); 3695 Literal* literal = node->value()->AsLiteral();
3526 bool overwrite_value = 3696 bool overwrite_value =
3527 (node->value()->AsBinaryOperation() != NULL && 3697 (node->value()->AsBinaryOperation() != NULL &&
3528 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); 3698 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3529 if (literal != NULL && literal->handle()->IsSmi()) { 3699 if (literal != NULL && literal->handle()->IsSmi()) {
3530 SmiOperation(node->binary_op(), 3700 SmiOperation(node->binary_op(),
3531 literal->handle(), 3701 literal->handle(),
3532 false, 3702 false,
3533 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3703 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3534 } else { 3704 } else {
3705 GenerateInlineSmi inline_smi =
3706 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3707 if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI;
3535 Load(node->value()); 3708 Load(node->value());
3536 VirtualFrameBinaryOperation( 3709 VirtualFrameBinaryOperation(
3537 node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); 3710 node->binary_op(),
3711 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3712 inline_smi);
3538 } 3713 }
3539 } else { 3714 } else {
3540 // For non-compound assignment just load the right-hand side. 3715 // For non-compound assignment just load the right-hand side.
3541 Load(node->value()); 3716 Load(node->value());
3542 } 3717 }
3543 3718
3544 // Stack layout: 3719 // Stack layout:
3545 // [tos] : value 3720 // [tos] : value
3546 // [tos+1] : key 3721 // [tos+1] : key
3547 // [tos+2] : receiver 3722 // [tos+2] : receiver
(...skipping 1531 matching lines...) Expand 10 before | Expand all | Expand 10 after
5079 #ifdef DEBUG 5254 #ifdef DEBUG
5080 int original_height = frame_->height(); 5255 int original_height = frame_->height();
5081 #endif 5256 #endif
5082 Comment cmnt(masm_, "[ CountOperation"); 5257 Comment cmnt(masm_, "[ CountOperation");
5083 5258
5084 bool is_postfix = node->is_postfix(); 5259 bool is_postfix = node->is_postfix();
5085 bool is_increment = node->op() == Token::INC; 5260 bool is_increment = node->op() == Token::INC;
5086 5261
5087 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); 5262 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5088 bool is_const = (var != NULL && var->mode() == Variable::CONST); 5263 bool is_const = (var != NULL && var->mode() == Variable::CONST);
5264 bool is_slot = (var != NULL && var->mode() == Variable::VAR);
5089 5265
5090 if (is_postfix) { 5266 if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
5267 // The type info declares that this variable is always a Smi. That
5268 // means it is a Smi both before and after the increment/decrement.
5269 // Lets make use of that to make a very minimal count.
5270 Reference target(this, node->expression(), !is_const);
5271 ASSERT(!target.is_illegal());
5272 target.GetValue(); // Pushes the value.
5273 Register value = frame_->PopToRegister();
5274 if (is_postfix) frame_->EmitPush(value);
5275 if (is_increment) {
5276 __ add(value, value, Operand(Smi::FromInt(1)));
5277 } else {
5278 __ sub(value, value, Operand(Smi::FromInt(1)));
5279 }
5280 frame_->EmitPush(value);
5281 target.SetValue(NOT_CONST_INIT);
5282 if (is_postfix) frame_->Pop();
5283 ASSERT_EQ(original_height + 1, frame_->height());
5284 return;
5285 }
5286
5287 // If it's a postfix expression and its result is not ignored and the
5288 // reference is non-trivial, then push a placeholder on the stack now
5289 // to hold the result of the expression.
5290 bool placeholder_pushed = false;
5291 if (!is_slot && is_postfix) {
5091 frame_->EmitPush(Operand(Smi::FromInt(0))); 5292 frame_->EmitPush(Operand(Smi::FromInt(0)));
5293 placeholder_pushed = true;
5092 } 5294 }
5093 5295
5094 // A constant reference is not saved to, so a constant reference is not a 5296 // A constant reference is not saved to, so a constant reference is not a
5095 // compound assignment reference. 5297 // compound assignment reference.
5096 { Reference target(this, node->expression(), !is_const); 5298 { Reference target(this, node->expression(), !is_const);
5097 if (target.is_illegal()) { 5299 if (target.is_illegal()) {
5098 // Spoof the virtual frame to have the expected height (one higher 5300 // Spoof the virtual frame to have the expected height (one higher
5099 // than on entry). 5301 // than on entry).
5100 if (!is_postfix) { 5302 if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
5101 frame_->EmitPush(Operand(Smi::FromInt(0)));
5102 }
5103 ASSERT_EQ(original_height + 1, frame_->height()); 5303 ASSERT_EQ(original_height + 1, frame_->height());
5104 return; 5304 return;
5105 } 5305 }
5306
5106 // This pushes 0, 1 or 2 words on the object to be used later when updating 5307 // This pushes 0, 1 or 2 words on the object to be used later when updating
5107 // the target. It also pushes the current value of the target. 5308 // the target. It also pushes the current value of the target.
5108 target.GetValue(); 5309 target.GetValue();
5109 5310
5110 JumpTarget slow; 5311 JumpTarget slow;
5111 JumpTarget exit; 5312 JumpTarget exit;
5112 5313
5314 Register value = frame_->PopToRegister();
5315
5316 // Postfix: Store the old value as the result.
5317 if (placeholder_pushed) {
5318 frame_->SetElementAt(value, target.size());
5319 } else if (is_postfix) {
5320 frame_->EmitPush(value);
5321 __ mov(VirtualFrame::scratch0(), value);
5322 value = VirtualFrame::scratch0();
5323 }
5324
5113 // Check for smi operand. 5325 // Check for smi operand.
5114 Register value = frame_->PopToRegister();
5115 __ tst(value, Operand(kSmiTagMask)); 5326 __ tst(value, Operand(kSmiTagMask));
5116 slow.Branch(ne); 5327 slow.Branch(ne);
5117 5328
5118 // Postfix: Store the old value as the result.
5119 if (is_postfix) {
5120 frame_->SetElementAt(value, target.size());
5121 }
5122
5123 // Perform optimistic increment/decrement. 5329 // Perform optimistic increment/decrement.
5124 if (is_increment) { 5330 if (is_increment) {
5125 __ add(value, value, Operand(Smi::FromInt(1)), SetCC); 5331 __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
5126 } else { 5332 } else {
5127 __ sub(value, value, Operand(Smi::FromInt(1)), SetCC); 5333 __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
5128 } 5334 }
5129 5335
5130 // If the increment/decrement didn't overflow, we're done. 5336 // If the increment/decrement didn't overflow, we're done.
5131 exit.Branch(vc); 5337 exit.Branch(vc);
5132 5338
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
5293 bool overwrite_left = 5499 bool overwrite_left =
5294 (node->left()->AsBinaryOperation() != NULL && 5500 (node->left()->AsBinaryOperation() != NULL &&
5295 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()); 5501 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5296 bool overwrite_right = 5502 bool overwrite_right =
5297 (node->right()->AsBinaryOperation() != NULL && 5503 (node->right()->AsBinaryOperation() != NULL &&
5298 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()); 5504 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5299 5505
5300 if (rliteral != NULL && rliteral->handle()->IsSmi()) { 5506 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
5301 VirtualFrame::RegisterAllocationScope scope(this); 5507 VirtualFrame::RegisterAllocationScope scope(this);
5302 Load(node->left()); 5508 Load(node->left());
5509 if (frame_->KnownSmiAt(0)) overwrite_left = false;
5303 SmiOperation(node->op(), 5510 SmiOperation(node->op(),
5304 rliteral->handle(), 5511 rliteral->handle(),
5305 false, 5512 false,
5306 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); 5513 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
5307 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { 5514 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
5308 VirtualFrame::RegisterAllocationScope scope(this); 5515 VirtualFrame::RegisterAllocationScope scope(this);
5309 Load(node->right()); 5516 Load(node->right());
5517 if (frame_->KnownSmiAt(0)) overwrite_right = false;
5310 SmiOperation(node->op(), 5518 SmiOperation(node->op(),
5311 lliteral->handle(), 5519 lliteral->handle(),
5312 true, 5520 true,
5313 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); 5521 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
5314 } else { 5522 } else {
5523 GenerateInlineSmi inline_smi =
5524 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
5525 if (lliteral != NULL) inline_smi = DONT_GENERATE_INLINE_SMI;
5526 if (rliteral != NULL) inline_smi = DONT_GENERATE_INLINE_SMI;
5315 VirtualFrame::RegisterAllocationScope scope(this); 5527 VirtualFrame::RegisterAllocationScope scope(this);
5316 OverwriteMode overwrite_mode = NO_OVERWRITE; 5528 OverwriteMode overwrite_mode = NO_OVERWRITE;
5317 if (overwrite_left) { 5529 if (overwrite_left) {
5318 overwrite_mode = OVERWRITE_LEFT; 5530 overwrite_mode = OVERWRITE_LEFT;
5319 } else if (overwrite_right) { 5531 } else if (overwrite_right) {
5320 overwrite_mode = OVERWRITE_RIGHT; 5532 overwrite_mode = OVERWRITE_RIGHT;
5321 } 5533 }
5322 Load(node->left()); 5534 Load(node->left());
5323 Load(node->right()); 5535 Load(node->right());
5324 VirtualFrameBinaryOperation(node->op(), overwrite_mode); 5536 VirtualFrameBinaryOperation(node->op(), overwrite_mode, inline_smi);
5325 } 5537 }
5326 } 5538 }
5327 ASSERT(!has_valid_frame() || 5539 ASSERT(!has_valid_frame() ||
5328 (has_cc() && frame_->height() == original_height) || 5540 (has_cc() && frame_->height() == original_height) ||
5329 (!has_cc() && frame_->height() == original_height + 1)); 5541 (!has_cc() && frame_->height() == original_height + 1));
5330 } 5542 }
5331 5543
5332 5544
5333 void CodeGenerator::VisitThisFunction(ThisFunction* node) { 5545 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5334 #ifdef DEBUG 5546 #ifdef DEBUG
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
5806 } else { 6018 } else {
5807 // Inline the keyed load. 6019 // Inline the keyed load.
5808 Comment cmnt(masm_, "[ Inlined load from keyed property"); 6020 Comment cmnt(masm_, "[ Inlined load from keyed property");
5809 6021
5810 // Counter will be decremented in the deferred code. Placed here to avoid 6022 // Counter will be decremented in the deferred code. Placed here to avoid
5811 // having it in the instruction stream below where patching will occur. 6023 // having it in the instruction stream below where patching will occur.
5812 __ IncrementCounter(&Counters::keyed_load_inline, 1, 6024 __ IncrementCounter(&Counters::keyed_load_inline, 1,
5813 frame_->scratch0(), frame_->scratch1()); 6025 frame_->scratch0(), frame_->scratch1());
5814 6026
5815 // Load the key and receiver from the stack. 6027 // Load the key and receiver from the stack.
6028 bool key_is_known_smi = frame_->KnownSmiAt(0);
5816 Register key = frame_->PopToRegister(); 6029 Register key = frame_->PopToRegister();
5817 Register receiver = frame_->PopToRegister(key); 6030 Register receiver = frame_->PopToRegister(key);
5818 VirtualFrame::SpilledScope spilled(frame_); 6031 VirtualFrame::SpilledScope spilled(frame_);
5819 6032
5820 // The deferred code expects key and receiver in registers. 6033 // The deferred code expects key and receiver in registers.
5821 DeferredReferenceGetKeyedValue* deferred = 6034 DeferredReferenceGetKeyedValue* deferred =
5822 new DeferredReferenceGetKeyedValue(key, receiver); 6035 new DeferredReferenceGetKeyedValue(key, receiver);
5823 6036
5824 // Check that the receiver is a heap object. 6037 // Check that the receiver is a heap object.
5825 __ tst(receiver, Operand(kSmiTagMask)); 6038 __ tst(receiver, Operand(kSmiTagMask));
5826 deferred->Branch(eq); 6039 deferred->Branch(eq);
5827 6040
5828 // The following instructions are the part of the inlined load keyed 6041 // The following instructions are the part of the inlined load keyed
5829 // property code which can be patched. Therefore the exact number of 6042 // property code which can be patched. Therefore the exact number of
5830 // instructions generated need to be fixed, so the constant pool is blocked 6043 // instructions generated need to be fixed, so the constant pool is blocked
5831 // while generating this code. 6044 // while generating this code.
5832 { Assembler::BlockConstPoolScope block_const_pool(masm_); 6045 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5833 Register scratch1 = VirtualFrame::scratch0(); 6046 Register scratch1 = VirtualFrame::scratch0();
5834 Register scratch2 = VirtualFrame::scratch1(); 6047 Register scratch2 = VirtualFrame::scratch1();
5835 // Check the map. The null map used below is patched by the inline cache 6048 // Check the map. The null map used below is patched by the inline cache
5836 // code. 6049 // code.
5837 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); 6050 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6051
6052 // Check that the key is a smi.
6053 if (!key_is_known_smi) {
6054 __ tst(key, Operand(kSmiTagMask));
6055 deferred->Branch(ne);
6056 }
6057
5838 #ifdef DEBUG 6058 #ifdef DEBUG
5839 Label check_inlined_codesize; 6059 Label check_inlined_codesize;
5840 masm_->bind(&check_inlined_codesize); 6060 masm_->bind(&check_inlined_codesize);
5841 #endif 6061 #endif
5842 __ mov(scratch2, Operand(Factory::null_value())); 6062 __ mov(scratch2, Operand(Factory::null_value()));
5843 __ cmp(scratch1, scratch2); 6063 __ cmp(scratch1, scratch2);
5844 deferred->Branch(ne); 6064 deferred->Branch(ne);
5845 6065
5846 // Check that the key is a smi.
5847 __ tst(key, Operand(kSmiTagMask));
5848 deferred->Branch(ne);
5849
5850 // Get the elements array from the receiver and check that it 6066 // Get the elements array from the receiver and check that it
5851 // is not a dictionary. 6067 // is not a dictionary.
5852 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); 6068 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
5853 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); 6069 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
5854 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 6070 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5855 __ cmp(scratch2, ip); 6071 __ cmp(scratch2, ip);
5856 deferred->Branch(ne); 6072 deferred->Branch(ne);
5857 6073
5858 // Check that key is within bounds. Use unsigned comparison to handle 6074 // Check that key is within bounds. Use unsigned comparison to handle
5859 // negative keys. 6075 // negative keys.
(...skipping 4408 matching lines...) Expand 10 before | Expand all | Expand 10 after
10268 __ bind(&string_add_runtime); 10484 __ bind(&string_add_runtime);
10269 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 10485 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10270 } 10486 }
10271 10487
10272 10488
10273 #undef __ 10489 #undef __
10274 10490
10275 } } // namespace v8::internal 10491 } } // namespace v8::internal
10276 10492
10277 #endif // V8_TARGET_ARCH_ARM 10493 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/jump-target-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698