Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: src/codegen-ia32.cc

Issue 13339: Experimental: thread the virtual frame through the deferred code... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/toiger/
Patch Set: Created 12 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/codegen.cc ('k') | src/macro-assembler-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 299
300 // Adjust for function-level loop nesting. 300 // Adjust for function-level loop nesting.
301 loop_nesting_ -= fun->loop_nesting(); 301 loop_nesting_ -= fun->loop_nesting();
302 302
303 // Code generation state must be reset. 303 // Code generation state must be reset.
304 ASSERT(state_ == NULL); 304 ASSERT(state_ == NULL);
305 ASSERT(loop_nesting() == 0); 305 ASSERT(loop_nesting() == 0);
306 ASSERT(!function_return_is_shadowed_); 306 ASSERT(!function_return_is_shadowed_);
307 function_return_.Unuse(); 307 function_return_.Unuse();
308 ASSERT(!has_cc()); 308 ASSERT(!has_cc());
309 DeleteFrame();
310
311 // Process any deferred code using the register allocator.
312 ProcessDeferred();
313
309 // There is no need to delete the register allocator, it is a 314 // There is no need to delete the register allocator, it is a
310 // stack-allocated local. 315 // stack-allocated local.
311 DeleteFrame();
312 allocator_ = NULL; 316 allocator_ = NULL;
313 scope_ = NULL; 317 scope_ = NULL;
314 } 318 }
315 319
316 320
317 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { 321 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
318 // Currently, this assertion will fail if we try to assign to 322 // Currently, this assertion will fail if we try to assign to
319 // a constant variable that is constant because it is read-only 323 // a constant variable that is constant because it is read-only
320 // (such as the variable referring to a named function expression). 324 // (such as the variable referring to a named function expression).
321 // We need to implement assignments to read-only variables. 325 // We need to implement assignments to read-only variables.
(...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after
689 } 693 }
690 } 694 }
691 695
692 696
693 class DeferredInlineBinaryOperation: public DeferredCode { 697 class DeferredInlineBinaryOperation: public DeferredCode {
694 public: 698 public:
695 DeferredInlineBinaryOperation(CodeGenerator* generator, 699 DeferredInlineBinaryOperation(CodeGenerator* generator,
696 Token::Value op, 700 Token::Value op,
697 OverwriteMode mode, 701 OverwriteMode mode,
698 GenericBinaryFlags flags) 702 GenericBinaryFlags flags)
699 : DeferredCode(generator), stub_(op, mode, flags) { } 703 : DeferredCode(generator),
704 stub_(op, mode, flags),
705 op_(op) {
706 }
700 707
701 void GenerateInlineCode() { 708 void GenerateInlineCode();
William Hesse 2008/12/10 12:02:51 Where is this function now defined? Or is it dead
Kevin Millikin (Chromium) 2008/12/10 12:09:50 Not dead, we call it in the generic binary operati
702 stub_.GenerateSmiCode(masm(), enter());
703 }
704 709
705 virtual void Generate() { 710 virtual void Generate() {
706 __ push(ebx); 711 __ push(ebx);
707 __ CallStub(&stub_); 712 __ CallStub(&stub_);
708 // We must preserve the eax value here, because it will be written 713 // We must preserve the eax value here, because it will be written
709 // to the top-of-stack element when getting back to the fast case 714 // to the top-of-stack element when getting back to the fast case
710 // code. See comment in GenericBinaryOperation where 715 // code. See comment in GenericBinaryOperation where
711 // deferred->exit() is bound. 716 // deferred->exit() is bound.
712 __ push(eax); 717 __ push(eax);
713 } 718 }
714 719
715 private: 720 private:
716 GenericBinaryOpStub stub_; 721 GenericBinaryOpStub stub_;
722 Token::Value op_;
717 }; 723 };
718 724
719 725
720 void CodeGenerator::GenericBinaryOperation(Token::Value op, 726 void CodeGenerator::GenericBinaryOperation(Token::Value op,
721 StaticType* type, 727 StaticType* type,
722 OverwriteMode overwrite_mode) { 728 OverwriteMode overwrite_mode) {
723 Comment cmnt(masm_, "[ BinaryOperation"); 729 Comment cmnt(masm_, "[ BinaryOperation");
724 Comment cmnt_token(masm_, Token::String(op)); 730 Comment cmnt_token(masm_, Token::String(op));
725 731
726 if (op == Token::COMMA) { 732 if (op == Token::COMMA) {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
762 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); 768 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags);
763 // Fetch the operands from the stack. 769 // Fetch the operands from the stack.
764 frame_->EmitPop(ebx); // get y 770 frame_->EmitPop(ebx); // get y
765 __ mov(eax, frame_->Top()); // get x 771 __ mov(eax, frame_->Top()); // get x
766 // Generate the inline part of the code. 772 // Generate the inline part of the code.
767 deferred->GenerateInlineCode(); 773 deferred->GenerateInlineCode();
768 // Put result back on the stack. It seems somewhat weird to let 774 // Put result back on the stack. It seems somewhat weird to let
769 // the deferred code jump back before the assignment to the frame 775 // the deferred code jump back before the assignment to the frame
770 // top, but this is just to let the peephole optimizer get rid of 776 // top, but this is just to let the peephole optimizer get rid of
771 // more code. 777 // more code.
772 __ bind(deferred->exit()); 778 deferred->exit()->Bind();
773 __ mov(frame_->Top(), eax); 779 __ mov(frame_->Top(), eax);
774 } else { 780 } else {
775 // Call the stub and push the result to the stack. 781 // Call the stub and push the result to the stack.
776 GenericBinaryOpStub stub(op, overwrite_mode, flags); 782 GenericBinaryOpStub stub(op, overwrite_mode, flags);
777 frame_->CallStub(&stub, 2); 783 frame_->CallStub(&stub, 2);
778 frame_->EmitPush(eax); 784 frame_->EmitPush(eax);
779 } 785 }
780 } 786 }
781 787
782 788
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
947 case Token::ADD: { 953 case Token::ADD: {
948 DeferredCode* deferred = NULL; 954 DeferredCode* deferred = NULL;
949 if (!reversed) { 955 if (!reversed) {
950 deferred = new DeferredInlinedSmiAdd(this, int_value, overwrite_mode); 956 deferred = new DeferredInlinedSmiAdd(this, int_value, overwrite_mode);
951 } else { 957 } else {
952 deferred = new DeferredInlinedSmiAddReversed(this, int_value, 958 deferred = new DeferredInlinedSmiAddReversed(this, int_value,
953 overwrite_mode); 959 overwrite_mode);
954 } 960 }
955 frame_->EmitPop(eax); 961 frame_->EmitPop(eax);
956 __ add(Operand(eax), Immediate(value)); 962 __ add(Operand(eax), Immediate(value));
957 __ j(overflow, deferred->enter(), not_taken); 963 deferred->enter()->Branch(overflow, not_taken);
958 __ test(eax, Immediate(kSmiTagMask)); 964 __ test(eax, Immediate(kSmiTagMask));
959 __ j(not_zero, deferred->enter(), not_taken); 965 deferred->enter()->Branch(not_zero, not_taken);
960 __ bind(deferred->exit()); 966 deferred->exit()->Bind();
961 frame_->EmitPush(eax); 967 frame_->EmitPush(eax);
962 break; 968 break;
963 } 969 }
964 970
965 case Token::SUB: { 971 case Token::SUB: {
966 DeferredCode* deferred = NULL; 972 DeferredCode* deferred = NULL;
967 frame_->EmitPop(eax); 973 frame_->EmitPop(eax);
968 if (!reversed) { 974 if (!reversed) {
969 deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode); 975 deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode);
970 __ sub(Operand(eax), Immediate(value)); 976 __ sub(Operand(eax), Immediate(value));
971 } else { 977 } else {
972 deferred = new DeferredInlinedSmiSubReversed(this, edx, overwrite_mode); 978 deferred = new DeferredInlinedSmiSubReversed(this, edx, overwrite_mode);
973 __ mov(edx, Operand(eax)); 979 __ mov(edx, Operand(eax));
974 __ mov(eax, Immediate(value)); 980 __ mov(eax, Immediate(value));
975 __ sub(eax, Operand(edx)); 981 __ sub(eax, Operand(edx));
976 } 982 }
977 __ j(overflow, deferred->enter(), not_taken); 983 deferred->enter()->Branch(overflow, not_taken);
978 __ test(eax, Immediate(kSmiTagMask)); 984 __ test(eax, Immediate(kSmiTagMask));
979 __ j(not_zero, deferred->enter(), not_taken); 985 deferred->enter()->Branch(not_zero, not_taken);
980 __ bind(deferred->exit()); 986 deferred->exit()->Bind();
981 frame_->EmitPush(eax); 987 frame_->EmitPush(eax);
982 break; 988 break;
983 } 989 }
984 990
985 case Token::SAR: { 991 case Token::SAR: {
986 if (reversed) { 992 if (reversed) {
987 frame_->EmitPop(eax); 993 frame_->EmitPop(eax);
988 frame_->EmitPush(Immediate(value)); 994 frame_->EmitPush(Immediate(value));
989 frame_->EmitPush(eax); 995 frame_->EmitPush(eax);
990 GenericBinaryOperation(op, type, overwrite_mode); 996 GenericBinaryOperation(op, type, overwrite_mode);
991 } else { 997 } else {
992 int shift_value = int_value & 0x1f; // only least significant 5 bits 998 int shift_value = int_value & 0x1f; // only least significant 5 bits
993 DeferredCode* deferred = 999 DeferredCode* deferred =
994 new DeferredInlinedSmiOperation(this, Token::SAR, shift_value, 1000 new DeferredInlinedSmiOperation(this, Token::SAR, shift_value,
995 overwrite_mode); 1001 overwrite_mode);
996 frame_->EmitPop(eax); 1002 frame_->EmitPop(eax);
997 __ test(eax, Immediate(kSmiTagMask)); 1003 __ test(eax, Immediate(kSmiTagMask));
998 __ j(not_zero, deferred->enter(), not_taken); 1004 deferred->enter()->Branch(not_zero, not_taken);
999 __ sar(eax, shift_value); 1005 __ sar(eax, shift_value);
1000 __ and_(eax, ~kSmiTagMask); 1006 __ and_(eax, ~kSmiTagMask);
1001 __ bind(deferred->exit()); 1007 deferred->exit()->Bind();
1002 frame_->EmitPush(eax); 1008 frame_->EmitPush(eax);
1003 } 1009 }
1004 break; 1010 break;
1005 } 1011 }
1006 1012
1007 case Token::SHR: { 1013 case Token::SHR: {
1008 if (reversed) { 1014 if (reversed) {
1009 frame_->EmitPop(eax); 1015 frame_->EmitPop(eax);
1010 frame_->EmitPush(Immediate(value)); 1016 frame_->EmitPush(Immediate(value));
1011 frame_->EmitPush(eax); 1017 frame_->EmitPush(eax);
1012 GenericBinaryOperation(op, type, overwrite_mode); 1018 GenericBinaryOperation(op, type, overwrite_mode);
1013 } else { 1019 } else {
1014 int shift_value = int_value & 0x1f; // only least significant 5 bits 1020 int shift_value = int_value & 0x1f; // only least significant 5 bits
1015 DeferredCode* deferred = 1021 DeferredCode* deferred =
1016 new DeferredInlinedSmiOperation(this, Token::SHR, shift_value, 1022 new DeferredInlinedSmiOperation(this, Token::SHR, shift_value,
1017 overwrite_mode); 1023 overwrite_mode);
1018 frame_->EmitPop(eax); 1024 frame_->EmitPop(eax);
1019 __ test(eax, Immediate(kSmiTagMask)); 1025 __ test(eax, Immediate(kSmiTagMask));
1020 __ mov(ebx, Operand(eax)); 1026 __ mov(ebx, Operand(eax));
1021 __ j(not_zero, deferred->enter(), not_taken); 1027 deferred->enter()->Branch(not_zero, not_taken);
1022 __ sar(ebx, kSmiTagSize); 1028 __ sar(ebx, kSmiTagSize);
1023 __ shr(ebx, shift_value); 1029 __ shr(ebx, shift_value);
1024 __ test(ebx, Immediate(0xc0000000)); 1030 __ test(ebx, Immediate(0xc0000000));
1025 __ j(not_zero, deferred->enter(), not_taken); 1031 deferred->enter()->Branch(not_zero, not_taken);
1026 // tag result and store it in TOS (eax) 1032 // tag result and store it in TOS (eax)
1027 ASSERT(kSmiTagSize == times_2); // adjust code if not the case 1033 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1028 __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag)); 1034 __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
1029 __ bind(deferred->exit()); 1035 deferred->exit()->Bind();
1030 frame_->EmitPush(eax); 1036 frame_->EmitPush(eax);
1031 } 1037 }
1032 break; 1038 break;
1033 } 1039 }
1034 1040
1035 case Token::SHL: { 1041 case Token::SHL: {
1036 if (reversed) { 1042 if (reversed) {
1037 frame_->EmitPop(eax); 1043 frame_->EmitPop(eax);
1038 frame_->EmitPush(Immediate(value)); 1044 frame_->EmitPush(Immediate(value));
1039 frame_->EmitPush(eax); 1045 frame_->EmitPush(eax);
1040 GenericBinaryOperation(op, type, overwrite_mode); 1046 GenericBinaryOperation(op, type, overwrite_mode);
1041 } else { 1047 } else {
1042 int shift_value = int_value & 0x1f; // only least significant 5 bits 1048 int shift_value = int_value & 0x1f; // only least significant 5 bits
1043 DeferredCode* deferred = 1049 DeferredCode* deferred =
1044 new DeferredInlinedSmiOperation(this, Token::SHL, shift_value, 1050 new DeferredInlinedSmiOperation(this, Token::SHL, shift_value,
1045 overwrite_mode); 1051 overwrite_mode);
1046 frame_->EmitPop(eax); 1052 frame_->EmitPop(eax);
1047 __ test(eax, Immediate(kSmiTagMask)); 1053 __ test(eax, Immediate(kSmiTagMask));
1048 __ mov(ebx, Operand(eax)); 1054 __ mov(ebx, Operand(eax));
1049 __ j(not_zero, deferred->enter(), not_taken); 1055 deferred->enter()->Branch(not_zero, not_taken);
1050 __ sar(ebx, kSmiTagSize); 1056 __ sar(ebx, kSmiTagSize);
1051 __ shl(ebx, shift_value); 1057 __ shl(ebx, shift_value);
1052 __ lea(ecx, Operand(ebx, 0x40000000)); 1058 __ lea(ecx, Operand(ebx, 0x40000000));
1053 __ test(ecx, Immediate(0x80000000)); 1059 __ test(ecx, Immediate(0x80000000));
1054 __ j(not_zero, deferred->enter(), not_taken); 1060 deferred->enter()->Branch(not_zero, not_taken);
1055 // tag result and store it in TOS (eax) 1061 // tag result and store it in TOS (eax)
1056 ASSERT(kSmiTagSize == times_2); // adjust code if not the case 1062 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1057 __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag)); 1063 __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
1058 __ bind(deferred->exit()); 1064 deferred->exit()->Bind();
1059 frame_->EmitPush(eax); 1065 frame_->EmitPush(eax);
1060 } 1066 }
1061 break; 1067 break;
1062 } 1068 }
1063 1069
1064 case Token::BIT_OR: 1070 case Token::BIT_OR:
1065 case Token::BIT_XOR: 1071 case Token::BIT_XOR:
1066 case Token::BIT_AND: { 1072 case Token::BIT_AND: {
1067 DeferredCode* deferred = NULL; 1073 DeferredCode* deferred = NULL;
1068 if (!reversed) { 1074 if (!reversed) {
1069 deferred = new DeferredInlinedSmiOperation(this, op, int_value, 1075 deferred = new DeferredInlinedSmiOperation(this, op, int_value,
1070 overwrite_mode); 1076 overwrite_mode);
1071 } else { 1077 } else {
1072 deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value, 1078 deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value,
1073 overwrite_mode); 1079 overwrite_mode);
1074 } 1080 }
1075 frame_->EmitPop(eax); 1081 frame_->EmitPop(eax);
1076 __ test(eax, Immediate(kSmiTagMask)); 1082 __ test(eax, Immediate(kSmiTagMask));
1077 __ j(not_zero, deferred->enter(), not_taken); 1083 deferred->enter()->Branch(not_zero, not_taken);
1078 if (op == Token::BIT_AND) { 1084 if (op == Token::BIT_AND) {
1079 __ and_(Operand(eax), Immediate(value)); 1085 __ and_(Operand(eax), Immediate(value));
1080 } else if (op == Token::BIT_XOR) { 1086 } else if (op == Token::BIT_XOR) {
1081 __ xor_(Operand(eax), Immediate(value)); 1087 __ xor_(Operand(eax), Immediate(value));
1082 } else { 1088 } else {
1083 ASSERT(op == Token::BIT_OR); 1089 ASSERT(op == Token::BIT_OR);
1084 __ or_(Operand(eax), Immediate(value)); 1090 __ or_(Operand(eax), Immediate(value));
1085 } 1091 }
1086 __ bind(deferred->exit()); 1092 deferred->exit()->Bind();
1087 frame_->EmitPush(eax); 1093 frame_->EmitPush(eax);
1088 break; 1094 break;
1089 } 1095 }
1090 1096
1091 default: { 1097 default: {
1092 if (!reversed) { 1098 if (!reversed) {
1093 frame_->EmitPush(Immediate(value)); 1099 frame_->EmitPush(Immediate(value));
1094 } else { 1100 } else {
1095 frame_->EmitPop(eax); 1101 frame_->EmitPop(eax);
1096 frame_->EmitPush(Immediate(value)); 1102 frame_->EmitPush(Immediate(value));
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
1209 // Strict only makes sense for equality comparisons. 1215 // Strict only makes sense for equality comparisons.
1210 ASSERT(!strict || cc == equal); 1216 ASSERT(!strict || cc == equal);
1211 1217
1212 int int_value = Smi::cast(*value)->value(); 1218 int int_value = Smi::cast(*value)->value();
1213 ASSERT(is_intn(int_value, kMaxSmiInlinedBits)); 1219 ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
1214 1220
1215 SmiComparisonDeferred* deferred = 1221 SmiComparisonDeferred* deferred =
1216 new SmiComparisonDeferred(this, cc, strict, int_value); 1222 new SmiComparisonDeferred(this, cc, strict, int_value);
1217 frame_->EmitPop(eax); 1223 frame_->EmitPop(eax);
1218 __ test(eax, Immediate(kSmiTagMask)); 1224 __ test(eax, Immediate(kSmiTagMask));
1219 __ j(not_zero, deferred->enter(), not_taken); 1225 deferred->enter()->Branch(not_zero, not_taken);
1220 // Test smi equality by pointer comparison. 1226 // Test smi equality by pointer comparison.
1221 __ cmp(Operand(eax), Immediate(value)); 1227 __ cmp(Operand(eax), Immediate(value));
1222 __ bind(deferred->exit()); 1228 deferred->exit()->Bind();
1223 cc_reg_ = cc; 1229 cc_reg_ = cc;
1224 } 1230 }
1225 1231
1226 1232
1227 class CallFunctionStub: public CodeStub { 1233 class CallFunctionStub: public CodeStub {
1228 public: 1234 public:
1229 explicit CallFunctionStub(int argc) : argc_(argc) { } 1235 explicit CallFunctionStub(int argc) : argc_(argc) { }
1230 1236
1231 void Generate(MacroAssembler* masm); 1237 void Generate(MacroAssembler* masm);
1232 1238
(...skipping 1463 matching lines...) Expand 10 before | Expand all | Expand 10 after
2696 __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset)); 2702 __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
2697 2703
2698 // Load the literal at the ast saved index. 2704 // Load the literal at the ast saved index.
2699 int literal_offset = 2705 int literal_offset =
2700 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 2706 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2701 __ mov(ebx, FieldOperand(ecx, literal_offset)); 2707 __ mov(ebx, FieldOperand(ecx, literal_offset));
2702 2708
2703 // Check whether we need to materialize the RegExp object. 2709 // Check whether we need to materialize the RegExp object.
2704 // If so, jump to the deferred code. 2710 // If so, jump to the deferred code.
2705 __ cmp(ebx, Factory::undefined_value()); 2711 __ cmp(ebx, Factory::undefined_value());
2706 __ j(equal, deferred->enter(), not_taken); 2712 deferred->enter()->Branch(equal, not_taken);
2707 __ bind(deferred->exit()); 2713 deferred->exit()->Bind();
2708 2714
2709 // Push the literal. 2715 // Push the literal.
2710 frame_->EmitPush(ebx); 2716 frame_->EmitPush(ebx);
2711 } 2717 }
2712 2718
2713 2719
2714 // This deferred code stub will be used for creating the boilerplate 2720 // This deferred code stub will be used for creating the boilerplate
2715 // by calling Runtime_CreateObjectLiteral. 2721 // by calling Runtime_CreateObjectLiteral.
2716 // Each created boilerplate is stored in the JSFunction and they are 2722 // Each created boilerplate is stored in the JSFunction and they are
2717 // therefore context dependent. 2723 // therefore context dependent.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2757 __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset)); 2763 __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
2758 2764
2759 // Load the literal at the ast saved index. 2765 // Load the literal at the ast saved index.
2760 int literal_offset = 2766 int literal_offset =
2761 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 2767 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2762 __ mov(ebx, FieldOperand(ecx, literal_offset)); 2768 __ mov(ebx, FieldOperand(ecx, literal_offset));
2763 2769
2764 // Check whether we need to materialize the object literal boilerplate. 2770 // Check whether we need to materialize the object literal boilerplate.
2765 // If so, jump to the deferred code. 2771 // If so, jump to the deferred code.
2766 __ cmp(ebx, Factory::undefined_value()); 2772 __ cmp(ebx, Factory::undefined_value());
2767 __ j(equal, deferred->enter(), not_taken); 2773 deferred->enter()->Branch(equal, not_taken);
2768 __ bind(deferred->exit()); 2774 deferred->exit()->Bind();
2769 2775
2770 // Push the literal. 2776 // Push the literal.
2771 frame_->EmitPush(ebx); 2777 frame_->EmitPush(ebx);
2772 // Clone the boilerplate object. 2778 // Clone the boilerplate object.
2773 frame_->CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1); 2779 frame_->CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
2774 // Push the new cloned literal object as the result. 2780 // Push the new cloned literal object as the result.
2775 frame_->EmitPush(eax); 2781 frame_->EmitPush(eax);
2776 2782
2777 2783
2778 for (int i = 0; i < node->properties()->length(); i++) { 2784 for (int i = 0; i < node->properties()->length(); i++) {
(...skipping 981 matching lines...) Expand 10 before | Expand all | Expand 10 after
3760 // Perform optimistic increment/decrement. 3766 // Perform optimistic increment/decrement.
3761 if (is_increment) { 3767 if (is_increment) {
3762 __ add(Operand(eax), Immediate(Smi::FromInt(1))); 3768 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
3763 } else { 3769 } else {
3764 __ sub(Operand(eax), Immediate(Smi::FromInt(1))); 3770 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
3765 } 3771 }
3766 3772
3767 // If the count operation didn't overflow and the result is a 3773 // If the count operation didn't overflow and the result is a
3768 // valid smi, we're done. Otherwise, we jump to the deferred 3774 // valid smi, we're done. Otherwise, we jump to the deferred
3769 // slow-case code. 3775 // slow-case code.
3770 __ j(overflow, deferred->enter(), not_taken); 3776 deferred->enter()->Branch(overflow, not_taken);
3771 __ test(eax, Immediate(kSmiTagMask)); 3777 __ test(eax, Immediate(kSmiTagMask));
3772 __ j(not_zero, deferred->enter(), not_taken); 3778 deferred->enter()->Branch(not_zero, not_taken);
3773 3779
3774 // Store the new value in the target if not const. 3780 // Store the new value in the target if not const.
3775 __ bind(deferred->exit()); 3781 deferred->exit()->Bind();
3776 frame_->EmitPush(eax); // Push the new value to TOS 3782 frame_->EmitPush(eax); // Push the new value to TOS
3777 if (!is_const) target.SetValue(NOT_CONST_INIT); 3783 if (!is_const) target.SetValue(NOT_CONST_INIT);
3778 } 3784 }
3779 3785
3780 // Postfix: Discard the new value and use the old. 3786 // Postfix: Discard the new value and use the old.
3781 if (is_postfix) { 3787 if (is_postfix) {
3782 frame_->Drop(); 3788 frame_->Drop();
3783 } 3789 }
3784 } 3790 }
3785 3791
(...skipping 576 matching lines...) Expand 10 before | Expand all | Expand 10 after
4362 // Return 1/0 for true/false in eax. 4368 // Return 1/0 for true/false in eax.
4363 __ bind(&true_result); 4369 __ bind(&true_result);
4364 __ mov(eax, 1); 4370 __ mov(eax, 1);
4365 __ ret(1 * kPointerSize); 4371 __ ret(1 * kPointerSize);
4366 __ bind(&false_result); 4372 __ bind(&false_result);
4367 __ mov(eax, 0); 4373 __ mov(eax, 0);
4368 __ ret(1 * kPointerSize); 4374 __ ret(1 * kPointerSize);
4369 } 4375 }
4370 4376
4371 4377
4378 #undef __
4379 #define __ masm_->
4380
4381 // This function's implementation is a copy of
4382 // GenericBinaryOpStub::GenerateSmiCode, with the slow-case label replaced
4383 // with the deferred code's entry target. The duplicated code is a
4384 // temporary intermediate stage on the way to using the virtual frame in
4385 // more places.
4386 void DeferredInlineBinaryOperation::GenerateInlineCode() {
Kevin Millikin (Chromium) 2008/12/10 12:09:50 ...is defined right here.
4387 // Perform fast-case smi code for the operation (eax <op> ebx) and
4388 // leave result in register eax.
4389
4390 // Prepare the smi check of both operands by or'ing them together
4391 // before checking against the smi mask.
4392 __ mov(ecx, Operand(ebx));
4393 __ or_(ecx, Operand(eax));
4394
4395 switch (op_) {
4396 case Token::ADD:
4397 __ add(eax, Operand(ebx)); // add optimistically
4398 enter()->Branch(overflow, not_taken);
4399 break;
4400
4401 case Token::SUB:
4402 __ sub(eax, Operand(ebx)); // subtract optimistically
4403 enter()->Branch(overflow, not_taken);
4404 break;
4405
4406 case Token::DIV:
4407 case Token::MOD:
4408 // Sign extend eax into edx:eax.
4409 __ cdq();
4410 // Check for 0 divisor.
4411 __ test(ebx, Operand(ebx));
4412 enter()->Branch(zero, not_taken);
4413 break;
4414
4415 default:
4416 // Fall-through to smi check.
4417 break;
4418 }
4419
4420 // Perform the actual smi check.
4421 ASSERT(kSmiTag == 0); // adjust zero check if not the case
4422 __ test(ecx, Immediate(kSmiTagMask));
4423 enter()->Branch(not_zero, not_taken);
4424
4425 switch (op_) {
4426 case Token::ADD:
4427 case Token::SUB:
4428 // Do nothing here.
4429 break;
4430
4431 case Token::MUL:
4432 // If the smi tag is 0 we can just leave the tag on one operand.
4433 ASSERT(kSmiTag == 0); // adjust code below if not the case
4434 // Remove tag from one of the operands (but keep sign).
4435 __ sar(eax, kSmiTagSize);
4436 // Do multiplication.
4437 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
4438 // Go slow on overflows.
4439 enter()->Branch(overflow, not_taken);
4440 // Check for negative zero result. Use ecx = x | y.
4441 __ NegativeZeroTest(generator(), eax, ecx, enter());
4442 break;
4443
4444 case Token::DIV:
4445 // Divide edx:eax by ebx.
4446 __ idiv(ebx);
4447 // Check for the corner case of dividing the most negative smi
4448 // by -1. We cannot use the overflow flag, since it is not set
4449 // by idiv instruction.
4450 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4451 __ cmp(eax, 0x40000000);
4452 enter()->Branch(equal);
4453 // Check for negative zero result. Use ecx = x | y.
4454 __ NegativeZeroTest(generator(), eax, ecx, enter());
4455 // Check that the remainder is zero.
4456 __ test(edx, Operand(edx));
4457 enter()->Branch(not_zero);
4458 // Tag the result and store it in register eax.
4459 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
4460 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
4461 break;
4462
4463 case Token::MOD:
4464 // Divide edx:eax by ebx.
4465 __ idiv(ebx);
4466 // Check for negative zero result. Use ecx = x | y.
4467 __ NegativeZeroTest(generator(), edx, ecx, enter());
4468 // Move remainder to register eax.
4469 __ mov(eax, Operand(edx));
4470 break;
4471
4472 case Token::BIT_OR:
4473 __ or_(eax, Operand(ebx));
4474 break;
4475
4476 case Token::BIT_AND:
4477 __ and_(eax, Operand(ebx));
4478 break;
4479
4480 case Token::BIT_XOR:
4481 __ xor_(eax, Operand(ebx));
4482 break;
4483
4484 case Token::SHL:
4485 case Token::SHR:
4486 case Token::SAR:
4487 // Move the second operand into register ecx.
4488 __ mov(ecx, Operand(ebx));
4489 // Remove tags from operands (but keep sign).
4490 __ sar(eax, kSmiTagSize);
4491 __ sar(ecx, kSmiTagSize);
4492 // Perform the operation.
4493 switch (op_) {
4494 case Token::SAR:
4495 __ sar(eax);
4496 // No checks of result necessary
4497 break;
4498 case Token::SHR:
4499 __ shr(eax);
4500 // Check that the *unsigned* result fits in a smi.
4501 // Neither of the two high-order bits can be set:
4502 // - 0x80000000: high bit would be lost when smi tagging.
4503 // - 0x40000000: this number would convert to negative when
4504 // Smi tagging these two cases can only happen with shifts
4505 // by 0 or 1 when handed a valid smi.
4506 __ test(eax, Immediate(0xc0000000));
4507 enter()->Branch(not_zero, not_taken);
4508 break;
4509 case Token::SHL:
4510 __ shl(eax);
4511 // Check that the *signed* result fits in a smi.
4512 __ lea(ecx, Operand(eax, 0x40000000));
4513 __ test(ecx, Immediate(0x80000000));
4514 enter()->Branch(not_zero, not_taken);
4515 break;
4516 default:
4517 UNREACHABLE();
4518 }
4519 // Tag the result and store it in register eax.
4520 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
4521 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
4522 break;
4523
4524 default:
4525 UNREACHABLE();
4526 break;
4527 }
4528 }
4529
4530
4531 #undef __
4532 #define __ masm->
4533
4372 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 4534 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
4373 // Perform fast-case smi code for the operation (eax <op> ebx) and 4535 // Perform fast-case smi code for the operation (eax <op> ebx) and
4374 // leave result in register eax. 4536 // leave result in register eax.
4375 4537
4376 // Prepare the smi check of both operands by or'ing them together 4538 // Prepare the smi check of both operands by or'ing them together
4377 // before checking against the smi mask. 4539 // before checking against the smi mask.
4378 __ mov(ecx, Operand(ebx)); 4540 __ mov(ecx, Operand(ebx));
4379 __ or_(ecx, Operand(eax)); 4541 __ or_(ecx, Operand(eax));
4380 4542
4381 switch (op_) { 4543 switch (op_) {
(...skipping 1148 matching lines...) Expand 10 before | Expand all | Expand 10 after
5530 5692
5531 // Slow-case: Go through the JavaScript implementation. 5693 // Slow-case: Go through the JavaScript implementation.
5532 __ bind(&slow); 5694 __ bind(&slow);
5533 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 5695 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5534 } 5696 }
5535 5697
5536 5698
5537 #undef __ 5699 #undef __
5538 5700
5539 } } // namespace v8::internal 5701 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/codegen.cc ('k') | src/macro-assembler-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698