| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 681 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 692 // Allocate a heap number in new space with undefined value. | 692 // Allocate a heap number in new space with undefined value. |
| 693 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. | 693 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. |
| 694 static void AllocateHeapNumber(MacroAssembler* masm, | 694 static void AllocateHeapNumber(MacroAssembler* masm, |
| 695 Label* need_gc, | 695 Label* need_gc, |
| 696 Register scratch1, | 696 Register scratch1, |
| 697 Register scratch2); | 697 Register scratch2); |
| 698 }; | 698 }; |
| 699 | 699 |
| 700 | 700 |
| 701 // Flag that indicates whether or not the code that handles smi arguments | 701 // Flag that indicates whether or not the code that handles smi arguments |
| 702 // should be inlined, placed in the stub, or omitted entirely. | 702 // should be placed in the stub, inlined, or omitted entirely. |
| 703 enum GenericBinaryFlags { | 703 enum GenericBinaryFlags { |
| 704 SMI_CODE_IN_STUB, | 704 SMI_CODE_IN_STUB, |
| 705 SMI_CODE_INLINED, | 705 SMI_CODE_INLINED |
| 706 // It is known at compile time that at least one argument is not a smi. | |
| 707 NO_SMI_CODE | |
| 708 }; | 706 }; |
| 709 | 707 |
| 710 | 708 |
| 711 class GenericBinaryOpStub: public CodeStub { | 709 class GenericBinaryOpStub: public CodeStub { |
| 712 public: | 710 public: |
| 713 GenericBinaryOpStub(Token::Value op, | 711 GenericBinaryOpStub(Token::Value op, |
| 714 OverwriteMode mode, | 712 OverwriteMode mode, |
| 715 GenericBinaryFlags flags) | 713 GenericBinaryFlags flags) |
| 716 : op_(op), mode_(mode), flags_(flags) { } | 714 : op_(op), mode_(mode), flags_(flags) { |
| 715 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); |
| 716 } |
| 717 | 717 |
| 718 void GenerateSmiCode(MacroAssembler* masm, Label* slow); | 718 void GenerateSmiCode(MacroAssembler* masm, Label* slow); |
| 719 | 719 |
| 720 private: | 720 private: |
| 721 Token::Value op_; | 721 Token::Value op_; |
| 722 OverwriteMode mode_; | 722 OverwriteMode mode_; |
| 723 GenericBinaryFlags flags_; | 723 GenericBinaryFlags flags_; |
| 724 | 724 |
| 725 const char* GetName(); | 725 const char* GetName(); |
| 726 | 726 |
| 727 #ifdef DEBUG | 727 #ifdef DEBUG |
| 728 void Print() { | 728 void Print() { |
| 729 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", | 729 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", |
| 730 Token::String(op_), | 730 Token::String(op_), |
| 731 static_cast<int>(mode_), | 731 static_cast<int>(mode_), |
| 732 static_cast<int>(flags_)); | 732 static_cast<int>(flags_)); |
| 733 } | 733 } |
| 734 #endif | 734 #endif |
| 735 | 735 |
| 736 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. | 736 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. |
| 737 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | 737 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
| 738 class OpBits: public BitField<Token::Value, 2, 12> {}; | 738 class OpBits: public BitField<Token::Value, 2, 13> {}; |
| 739 class FlagBits: public BitField<GenericBinaryFlags, 14, 2> {}; | 739 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; |
| 740 | 740 |
| 741 Major MajorKey() { return GenericBinaryOp; } | 741 Major MajorKey() { return GenericBinaryOp; } |
| 742 int MinorKey() { | 742 int MinorKey() { |
| 743 // Encode the parameters in a unique 16 bit value. | 743 // Encode the parameters in a unique 16 bit value. |
| 744 return OpBits::encode(op_) | 744 return OpBits::encode(op_) |
| 745 | ModeBits::encode(mode_) | 745 | ModeBits::encode(mode_) |
| 746 | FlagBits::encode(flags_); | 746 | FlagBits::encode(flags_); |
| 747 } | 747 } |
| 748 void Generate(MacroAssembler* masm); | 748 void Generate(MacroAssembler* masm); |
| 749 }; | 749 }; |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 838 : SMI_CODE_IN_STUB; | 838 : SMI_CODE_IN_STUB; |
| 839 break; | 839 break; |
| 840 } | 840 } |
| 841 | 841 |
| 842 Result right = frame_->Pop(); | 842 Result right = frame_->Pop(); |
| 843 Result left = frame_->Pop(); | 843 Result left = frame_->Pop(); |
| 844 bool left_is_smi = left.is_constant() && left.handle()->IsSmi(); | 844 bool left_is_smi = left.is_constant() && left.handle()->IsSmi(); |
| 845 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi(); | 845 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi(); |
| 846 bool right_is_smi = right.is_constant() && right.handle()->IsSmi(); | 846 bool right_is_smi = right.is_constant() && right.handle()->IsSmi(); |
| 847 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi(); | 847 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi(); |
| 848 bool generate_no_smi_code = false; // No smi code at all, inline or in stub. |
| 848 | 849 |
| 849 if (left_is_smi && right_is_smi) { | 850 if (left_is_smi && right_is_smi) { |
| 850 // Compute the result, and return that as a constant on the frame. | 851 // Compute the constant result at compile time, and leave it on the frame. |
| 851 int left_int = Smi::cast(*left.handle())->value(); | 852 int left_int = Smi::cast(*left.handle())->value(); |
| 852 int right_int = Smi::cast(*right.handle())->value(); | 853 int right_int = Smi::cast(*right.handle())->value(); |
| 853 if (FoldConstantSmis(op, left_int, right_int)) return; | 854 if (FoldConstantSmis(op, left_int, right_int)) return; |
| 854 } | 855 } |
| 855 | 856 |
| 856 if (left_is_non_smi || right_is_non_smi) { | 857 if (left_is_non_smi || right_is_non_smi) { |
| 857 // Set flag so that we go straight to the slow case, with no smi code. | 858 // Set flag so that we go straight to the slow case, with no smi code. |
| 858 flags = NO_SMI_CODE; | 859 generate_no_smi_code = true; |
| 859 } else if (right_is_smi) { | 860 } else if (right_is_smi) { |
| 860 ConstantSmiBinaryOperation(op, &left, right.handle(), type, | 861 ConstantSmiBinaryOperation(op, &left, right.handle(), |
| 861 false, overwrite_mode); | 862 type, false, overwrite_mode); |
| 862 return; | 863 return; |
| 863 } else if (left_is_smi) { | 864 } else if (left_is_smi) { |
| 864 ConstantSmiBinaryOperation(op, &right, left.handle(), type, | 865 ConstantSmiBinaryOperation(op, &right, left.handle(), |
| 865 true, overwrite_mode); | 866 type, true, overwrite_mode); |
| 866 return; | 867 return; |
| 867 } | 868 } |
| 868 | 869 |
| 869 if (flags == SMI_CODE_INLINED) { | 870 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) { |
| 870 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); | 871 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); |
| 871 } else { | 872 } else { |
| 872 frame_->Push(&left); | 873 frame_->Push(&left); |
| 873 frame_->Push(&right); | 874 frame_->Push(&right); |
| 874 // If we know the arguments aren't smis, use the binary operation stub | 875 // If we know the arguments aren't smis, use the binary operation stub |
| 875 // that does not check for the fast smi case. | 876 // that does not check for the fast smi case. |
| 876 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. | 877 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. |
| 877 if (flags == NO_SMI_CODE) { | 878 if (generate_no_smi_code) { |
| 878 flags = SMI_CODE_INLINED; | 879 flags = SMI_CODE_INLINED; |
| 879 } | 880 } |
| 880 GenericBinaryOpStub stub(op, overwrite_mode, flags); | 881 GenericBinaryOpStub stub(op, overwrite_mode, flags); |
| 881 Result answer = frame_->CallStub(&stub, 2); | 882 Result answer = frame_->CallStub(&stub, 2); |
| 882 frame_->Push(&answer); | 883 frame_->Push(&answer); |
| 883 } | 884 } |
| 884 } | 885 } |
| 885 | 886 |
| 886 | 887 |
| 887 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { | 888 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 962 } | 963 } |
| 963 frame_->Push(Handle<Object>(answer_object)); | 964 frame_->Push(Handle<Object>(answer_object)); |
| 964 return true; | 965 return true; |
| 965 } | 966 } |
| 966 | 967 |
| 967 | 968 |
| 968 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, | 969 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, |
| 969 Result* left, | 970 Result* left, |
| 970 Result* right, | 971 Result* right, |
| 971 OverwriteMode overwrite_mode) { | 972 OverwriteMode overwrite_mode) { |
| 972 // Create a new deferred code object that calls GenericBinaryOpStub | 973 // Implements a binary operation using a deferred code object |
| 973 // in the slow case. | 974 // and some inline code to operate on smis quickly. |
| 974 DeferredInlineBinaryOperation* deferred = | 975 DeferredInlineBinaryOperation* deferred = |
| 975 new DeferredInlineBinaryOperation(this, op, overwrite_mode, | 976 new DeferredInlineBinaryOperation(this, op, overwrite_mode, |
| 976 SMI_CODE_INLINED); | 977 SMI_CODE_INLINED); |
| 977 // Generate the inline code that handles some smi operations, | 978 // Generate the inline code that handles some smi operations, |
| 978 // and jumps to the deferred code for everything else. | 979 // and jumps to the deferred code for everything else. |
| 979 Result answer = deferred->GenerateInlineCode(left, right); | 980 Result answer = deferred->GenerateInlineCode(left, right); |
| 980 deferred->BindExit(&answer); | 981 deferred->BindExit(&answer); |
| 981 frame_->Push(&answer); | 982 frame_->Push(&answer); |
| 982 } | 983 } |
| 983 | 984 |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1226 deferred->enter()->Branch(overflow, operand, not_taken); | 1227 deferred->enter()->Branch(overflow, operand, not_taken); |
| 1227 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1228 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1228 deferred->enter()->Branch(not_zero, operand, not_taken); | 1229 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1229 deferred->BindExit(operand); | 1230 deferred->BindExit(operand); |
| 1230 frame_->Push(operand); | 1231 frame_->Push(operand); |
| 1231 break; | 1232 break; |
| 1232 } | 1233 } |
| 1233 | 1234 |
| 1234 case Token::SUB: { | 1235 case Token::SUB: { |
| 1235 DeferredCode* deferred = NULL; | 1236 DeferredCode* deferred = NULL; |
| 1236 Result answer(this); // Only allocated a new register if reversed. | 1237 Result answer(this); // Only allocate a new register if reversed. |
| 1237 if (reversed) { | 1238 if (reversed) { |
| 1238 answer = allocator()->Allocate(); | 1239 answer = allocator()->Allocate(); |
| 1239 ASSERT(answer.is_valid()); | 1240 ASSERT(answer.is_valid()); |
| 1240 deferred = new DeferredInlineSmiSubReversed(this, | 1241 deferred = new DeferredInlineSmiSubReversed(this, |
| 1241 smi_value, | 1242 smi_value, |
| 1242 overwrite_mode); | 1243 overwrite_mode); |
| 1243 __ Set(answer.reg(), Immediate(value)); | 1244 __ Set(answer.reg(), Immediate(value)); |
| 1244 if (operand->is_register()) { | 1245 if (operand->is_register()) { |
| 1245 __ sub(answer.reg(), Operand(operand->reg())); | 1246 __ sub(answer.reg(), Operand(operand->reg())); |
| 1246 } else { | 1247 } else { |
| (...skipping 4316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5563 | 5564 |
| 5564 left->ToRegister(); | 5565 left->ToRegister(); |
| 5565 right->ToRegister(); | 5566 right->ToRegister(); |
| 5566 // A newly allocated register answer is used to hold the answer. | 5567 // A newly allocated register answer is used to hold the answer. |
| 5567 // The registers containing left and right are not modified in | 5568 // The registers containing left and right are not modified in |
| 5568 // most cases, so they usually don't need to be spilled in the fast case. | 5569 // most cases, so they usually don't need to be spilled in the fast case. |
| 5569 Result answer = generator()->allocator()->Allocate(); | 5570 Result answer = generator()->allocator()->Allocate(); |
| 5570 | 5571 |
| 5571 ASSERT(answer.is_valid()); | 5572 ASSERT(answer.is_valid()); |
| 5572 // Perform the smi check. | 5573 // Perform the smi check. |
| 5573 __ mov(answer.reg(), left->reg()); | 5574 if (left->reg().is(right->reg())) { |
| 5574 __ or_(answer.reg(), Operand(right->reg())); | 5575 __ test(left->reg(), Immediate(kSmiTagMask)); |
| 5575 ASSERT(kSmiTag == 0); // adjust zero check if not the case | 5576 } else { |
| 5576 __ test(answer.reg(), Immediate(kSmiTagMask)); | 5577 __ mov(answer.reg(), left->reg()); |
| 5578 __ or_(answer.reg(), Operand(right->reg())); |
| 5579 ASSERT(kSmiTag == 0); // adjust zero check if not the case |
| 5580 __ test(answer.reg(), Immediate(kSmiTagMask)); |
| 5581 } |
| 5577 enter()->Branch(not_zero, left, right, not_taken); | 5582 enter()->Branch(not_zero, left, right, not_taken); |
| 5578 | 5583 |
| 5579 // All operations start by copying the left argument into answer. | 5584 // All operations start by copying the left argument into answer. |
| 5580 __ mov(answer.reg(), left->reg()); | 5585 __ mov(answer.reg(), left->reg()); |
| 5581 switch (op_) { | 5586 switch (op_) { |
| 5582 case Token::ADD: | 5587 case Token::ADD: |
| 5583 __ add(answer.reg(), Operand(right->reg())); // add optimistically | 5588 __ add(answer.reg(), Operand(right->reg())); // add optimistically |
| 5584 enter()->Branch(overflow, left, right, not_taken); | 5589 enter()->Branch(overflow, left, right, not_taken); |
| 5585 break; | 5590 break; |
| 5586 | 5591 |
| (...skipping 1481 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7068 | 7073 |
| 7069 // Slow-case: Go through the JavaScript implementation. | 7074 // Slow-case: Go through the JavaScript implementation. |
| 7070 __ bind(&slow); | 7075 __ bind(&slow); |
| 7071 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 7076 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 7072 } | 7077 } |
| 7073 | 7078 |
| 7074 | 7079 |
| 7075 #undef __ | 7080 #undef __ |
| 7076 | 7081 |
| 7077 } } // namespace v8::internal | 7082 } } // namespace v8::internal |
| OLD | NEW |