Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 174 // Allocate the arguments object and copy the parameters into it. | 174 // Allocate the arguments object and copy the parameters into it. |
| 175 if (scope_->arguments() != NULL) { | 175 if (scope_->arguments() != NULL) { |
| 176 ASSERT(scope_->arguments_shadow() != NULL); | 176 ASSERT(scope_->arguments_shadow() != NULL); |
| 177 Comment cmnt(masm_, "[ Allocate arguments object"); | 177 Comment cmnt(masm_, "[ Allocate arguments object"); |
| 178 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); | 178 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); |
| 179 VirtualFrame::SpilledScope spilled_scope(this); | 179 VirtualFrame::SpilledScope spilled_scope(this); |
| 180 __ lea(eax, frame_->Receiver()); | 180 __ lea(eax, frame_->Receiver()); |
| 181 frame_->EmitPush(frame_->Function()); | 181 frame_->EmitPush(frame_->Function()); |
| 182 frame_->EmitPush(eax); | 182 frame_->EmitPush(eax); |
| 183 frame_->EmitPush(Immediate(Smi::FromInt(scope_->num_parameters()))); | 183 frame_->EmitPush(Immediate(Smi::FromInt(scope_->num_parameters()))); |
| 184 frame_->CallStub(&stub, 3); | 184 Result answer = frame_->CallStub(&stub, 3); |
| 185 frame_->Push(eax); | 185 frame_->Push(&answer); |
| 186 } | 186 } |
| 187 | 187 |
| 188 if (scope_->num_heap_slots() > 0) { | 188 if (scope_->num_heap_slots() > 0) { |
| 189 Comment cmnt(masm_, "[ allocate local context"); | 189 Comment cmnt(masm_, "[ allocate local context"); |
| 190 // Allocate local context. | 190 // Allocate local context. |
| 191 // Get outer context and create a new context based on it. | 191 // Get outer context and create a new context based on it. |
| 192 VirtualFrame::SpilledScope spilled_scope(this); | 192 VirtualFrame::SpilledScope spilled_scope(this); |
| 193 frame_->EmitPush(frame_->Function()); | 193 frame_->EmitPush(frame_->Function()); |
| 194 frame_->CallRuntime(Runtime::kNewContext, 1); // eax holds the result | 194 frame_->CallRuntime(Runtime::kNewContext, 1); // eax holds the result |
| 195 | 195 |
| (...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 634 // Smi => false iff zero. | 634 // Smi => false iff zero. |
| 635 ASSERT(kSmiTag == 0); | 635 ASSERT(kSmiTag == 0); |
| 636 __ test(value.reg(), Operand(value.reg())); | 636 __ test(value.reg(), Operand(value.reg())); |
| 637 false_target->Branch(zero); | 637 false_target->Branch(zero); |
| 638 __ test(value.reg(), Immediate(kSmiTagMask)); | 638 __ test(value.reg(), Immediate(kSmiTagMask)); |
| 639 true_target->Branch(zero); | 639 true_target->Branch(zero); |
| 640 | 640 |
| 641 // Call the stub for all other cases. | 641 // Call the stub for all other cases. |
| 642 frame_->Push(&value); // Undo the Pop() from above. | 642 frame_->Push(&value); // Undo the Pop() from above. |
| 643 ToBooleanStub stub; | 643 ToBooleanStub stub; |
| 644 frame_->CallStub(&stub, 1); | 644 Result temp = frame_->CallStub(&stub, 1); |
| 645 // Convert the result (eax) to condition code. | 645 // Convert the result to a condition code. |
| 646 Result temp = allocator_->Allocate(eax); | 646 __ test(temp.reg(), Operand(temp.reg())); |
| 647 ASSERT(temp.is_valid()); | |
| 648 __ test(eax, Operand(eax)); | |
| 649 | 647 |
| 650 ASSERT(not_equal == not_zero); | 648 ASSERT(not_equal == not_zero); |
| 651 cc_reg_ = not_equal; | 649 cc_reg_ = not_equal; |
| 652 } | 650 } |
| 653 | 651 |
| 654 | 652 |
| 655 class FloatingPointHelper : public AllStatic { | 653 class FloatingPointHelper : public AllStatic { |
| 656 public: | 654 public: |
| 657 // Code pattern for loading floating point values. Input values must | 655 // Code pattern for loading floating point values. Input values must |
| 658 // be either smi or heap number objects (fp values). Requirements: | 656 // be either smi or heap number objects (fp values). Requirements: |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 744 public: | 742 public: |
| 745 DeferredInlineBinaryOperation(CodeGenerator* generator, | 743 DeferredInlineBinaryOperation(CodeGenerator* generator, |
| 746 Token::Value op, | 744 Token::Value op, |
| 747 OverwriteMode mode, | 745 OverwriteMode mode, |
| 748 GenericBinaryFlags flags) | 746 GenericBinaryFlags flags) |
| 749 : DeferredCode(generator), | 747 : DeferredCode(generator), |
| 750 stub_(op, mode, flags), | 748 stub_(op, mode, flags), |
| 751 op_(op) { | 749 op_(op) { |
| 752 } | 750 } |
| 753 | 751 |
| 754 void GenerateInlineCode(); | 752 // The binary operation's arguments are on top of the code generator's frame. |
| 753 Result GenerateInlineCode(); | |
| 755 | 754 |
| 756 virtual void Generate() { | 755 virtual void Generate() { |
| 757 // The arguments are is actually passed in ebx and the top of the stack. | 756 Result left(generator()); |
| 758 enter()->Bind(); | 757 Result right(generator()); |
| 759 VirtualFrame::SpilledScope spilled_scope(generator()); | 758 enter()->Bind(&left, &right); |
| 760 generator()->frame()->EmitPush(ebx); | 759 generator()->frame()->Push(&left); |
| 761 generator()->frame()->CallStub(&stub_, 2); | 760 generator()->frame()->Push(&right); |
| 762 // We must preserve the eax value here, because it will be written | 761 Result answer = generator()->frame()->CallStub(&stub_, 2); |
| 763 // to the top-of-stack element when getting back to the fast case | 762 exit()->Jump(&answer); |
| 764 // code. See comment in GenericBinaryOperation where | |
| 765 // deferred->exit() is bound. | |
| 766 generator()->frame()->EmitPush(eax); | |
| 767 // The result is actually returned on the top of the stack. | |
| 768 exit()->Jump(); | |
| 769 } | 763 } |
| 770 | 764 |
| 771 private: | 765 private: |
| 772 GenericBinaryOpStub stub_; | 766 GenericBinaryOpStub stub_; |
| 773 Token::Value op_; | 767 Token::Value op_; |
| 774 }; | 768 }; |
| 775 | 769 |
| 776 | 770 |
| 777 void CodeGenerator::GenericBinaryOperation(Token::Value op, | 771 void CodeGenerator::GenericBinaryOperation(Token::Value op, |
| 778 StaticType* type, | 772 StaticType* type, |
| 779 OverwriteMode overwrite_mode) { | 773 OverwriteMode overwrite_mode) { |
| 780 Comment cmnt(masm_, "[ BinaryOperation"); | 774 Comment cmnt(masm_, "[ BinaryOperation"); |
| 781 Comment cmnt_token(masm_, Token::String(op)); | 775 Comment cmnt_token(masm_, Token::String(op)); |
| 782 | 776 |
| 783 if (op == Token::COMMA) { | 777 if (op == Token::COMMA) { |
| 784 // Simply discard left value. | 778 // Simply discard left value. |
| 785 frame_->Nip(1); | 779 frame_->Nip(1); |
| 786 return; | 780 return; |
| 787 } | 781 } |
| 788 | 782 |
| 789 VirtualFrame::SpilledScope spilled_scope(this); | |
| 790 // Set the flags based on the operation, type and loop nesting level. | 783 // Set the flags based on the operation, type and loop nesting level. |
| 791 GenericBinaryFlags flags; | 784 GenericBinaryFlags flags; |
| 792 switch (op) { | 785 switch (op) { |
| 793 case Token::BIT_OR: | 786 case Token::BIT_OR: |
| 794 case Token::BIT_AND: | 787 case Token::BIT_AND: |
| 795 case Token::BIT_XOR: | 788 case Token::BIT_XOR: |
| 796 case Token::SHL: | 789 case Token::SHL: |
| 797 case Token::SHR: | 790 case Token::SHR: |
| 798 case Token::SAR: | 791 case Token::SAR: |
| 799 // Bit operations always assume they likely operate on Smis. Still only | 792 // Bit operations always assume they likely operate on Smis. Still only |
| 800 // generate the inline Smi check code if this operation is part of a loop. | 793 // generate the inline Smi check code if this operation is part of a loop. |
| 801 flags = (loop_nesting() > 0) | 794 flags = (loop_nesting() > 0) |
| 802 ? SMI_CODE_INLINED | 795 ? SMI_CODE_INLINED |
| 803 : SMI_CODE_IN_STUB; | 796 : SMI_CODE_IN_STUB; |
| 804 break; | 797 break; |
| 805 | 798 |
| 806 default: | 799 default: |
| 807 // By default only inline the Smi check code for likely smis if this | 800 // By default only inline the Smi check code for likely smis if this |
| 808 // operation is part of a loop. | 801 // operation is part of a loop. |
| 809 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) | 802 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) |
| 810 ? SMI_CODE_INLINED | 803 ? SMI_CODE_INLINED |
| 811 : SMI_CODE_IN_STUB; | 804 : SMI_CODE_IN_STUB; |
| 812 break; | 805 break; |
| 813 } | 806 } |
| 814 | 807 |
| 815 if (flags == SMI_CODE_INLINED) { | 808 if (flags == SMI_CODE_INLINED) { |
| 816 // Create a new deferred code for the slow-case part. | 809 // Create a new deferred code for the slow-case part. |
| 817 // | |
| 818 // TODO(): When this code is updated to use the virtual frame, it | |
| 819 // has to properly flow to the inline code from this deferred code | |
| 820 // stub. | |
| 821 DeferredInlineBinaryOperation* deferred = | 810 DeferredInlineBinaryOperation* deferred = |
| 822 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); | 811 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); |
| 823 // Fetch the operands from the stack. | |
| 824 frame_->EmitPop(ebx); // get y | |
| 825 __ mov(eax, frame_->Top()); // get x | |
| 826 // Generate the inline part of the code. | 812 // Generate the inline part of the code. |
| 827 deferred->GenerateInlineCode(); | 813 // The operands are on the frame. |
| 828 // Put result back on the stack. It seems somewhat weird to let | 814 Result answer = deferred->GenerateInlineCode(); |
| 829 // the deferred code jump back before the assignment to the frame | 815 deferred->exit()->Bind(&answer); |
| 830 // top, but this is just to let the peephole optimizer get rid of | 816 frame_->Push(&answer); |
| 831 // more code. | |
| 832 deferred->exit()->Bind(); | |
| 833 __ mov(frame_->Top(), eax); | |
| 834 } else { | 817 } else { |
| 835 // Call the stub and push the result to the stack. | 818 // Call the stub and push the result to the stack. |
| 836 GenericBinaryOpStub stub(op, overwrite_mode, flags); | 819 GenericBinaryOpStub stub(op, overwrite_mode, flags); |
| 837 frame_->CallStub(&stub, 2); | 820 Result answer = frame_->CallStub(&stub, 2); |
| 838 frame_->EmitPush(eax); | 821 frame_->Push(&answer); |
| 839 } | 822 } |
| 840 } | 823 } |
| 841 | 824 |
| 842 | 825 |
| 843 class DeferredInlinedSmiOperation: public DeferredCode { | 826 class DeferredInlinedSmiOperation: public DeferredCode { |
| 844 public: | 827 public: |
| 845 DeferredInlinedSmiOperation(CodeGenerator* generator, | 828 DeferredInlinedSmiOperation(CodeGenerator* generator, |
| 846 Token::Value op, int value, | 829 Token::Value op, int value, |
| 847 OverwriteMode overwrite_mode) : | 830 OverwriteMode overwrite_mode) : |
| 848 DeferredCode(generator), op_(op), value_(value), | 831 DeferredCode(generator), op_(op), value_(value), |
| (...skipping 572 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1421 int arg_count = args->length(); | 1404 int arg_count = args->length(); |
| 1422 for (int i = 0; i < arg_count; i++) { | 1405 for (int i = 0; i < arg_count; i++) { |
| 1423 Load(args->at(i)); | 1406 Load(args->at(i)); |
| 1424 } | 1407 } |
| 1425 | 1408 |
| 1426 // Record the position for debugging purposes. | 1409 // Record the position for debugging purposes. |
| 1427 CodeForSourcePosition(position); | 1410 CodeForSourcePosition(position); |
| 1428 | 1411 |
| 1429 // Use the shared code stub to call the function. | 1412 // Use the shared code stub to call the function. |
| 1430 CallFunctionStub call_function(arg_count); | 1413 CallFunctionStub call_function(arg_count); |
| 1431 frame_->CallStub(&call_function, arg_count + 1); | 1414 Result answer = frame_->CallStub(&call_function, arg_count + 1); |
| 1432 Result result = allocator_->Allocate(eax); | |
| 1433 | |
| 1434 // Restore context and replace function on the stack with the | 1415 // Restore context and replace function on the stack with the |
| 1435 // result of the stub invocation. | 1416 // result of the stub invocation. |
| 1436 frame_->RestoreContextRegister(); | 1417 frame_->RestoreContextRegister(); |
| 1437 frame_->SetElementAt(0, &result); | 1418 frame_->SetElementAt(0, &answer); |
| 1438 } | 1419 } |
| 1439 | 1420 |
| 1440 | 1421 |
| 1441 void CodeGenerator::Branch(bool if_true, JumpTarget* target) { | 1422 void CodeGenerator::Branch(bool if_true, JumpTarget* target) { |
| 1442 ASSERT(has_cc()); | 1423 ASSERT(has_cc()); |
| 1443 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); | 1424 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); |
| 1444 cc_reg_ = no_condition; | 1425 cc_reg_ = no_condition; |
| 1445 target->Branch(cc); | 1426 target->Branch(cc); |
| 1446 } | 1427 } |
| 1447 | 1428 |
| (...skipping 3375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4823 __ ret(1 * kPointerSize); | 4804 __ ret(1 * kPointerSize); |
| 4824 __ bind(&false_result); | 4805 __ bind(&false_result); |
| 4825 __ mov(eax, 0); | 4806 __ mov(eax, 0); |
| 4826 __ ret(1 * kPointerSize); | 4807 __ ret(1 * kPointerSize); |
| 4827 } | 4808 } |
| 4828 | 4809 |
| 4829 | 4810 |
| 4830 #undef __ | 4811 #undef __ |
| 4831 #define __ masm_-> | 4812 #define __ masm_-> |
| 4832 | 4813 |
| 4833 // This function's implementation is a copy of | 4814 Result DeferredInlineBinaryOperation::GenerateInlineCode() { |
| 4834 // GenericBinaryOpStub::GenerateSmiCode, with the slow-case label replaced | 4815 // Perform fast-case smi code for the operation (left <op> right) and |
| 4835 // with the deferred code's entry target. The duplicated code is a | 4816 // returns the result in a Result. |
| 4836 // temporary intermediate stage on the way to using the virtual frame in | 4817 // If any fast-case tests fail, it jumps to the slow-case deferred code, |
| 4837 // more places. | 4818 // which calls the binary operation stub, with the arguments (in registers) |
| 4838 void DeferredInlineBinaryOperation::GenerateInlineCode() { | 4819 // on top of the frame. |
| 4839 // Perform fast-case smi code for the operation (eax <op> ebx) and | 4820 |
| 4840 // leave result in register eax. | 4821 VirtualFrame* frame = generator()->frame(); |
| 4841 | 4822 // If operation is division or modulus, ensure |
| 4842 // Prepare the smi check of both operands by or'ing them together | 4823 // that the special registers needed are free. |
| 4843 // before checking against the smi mask. | 4824 Result reg_eax(generator()); // Valid only if op is DIV or MOD. |
| 4844 __ mov(ecx, Operand(ebx)); | 4825 Result reg_edx(generator()); // Valid only if op is DIV or MOD. |
| 4845 __ or_(ecx, Operand(eax)); | 4826 if (op_ == Token::DIV || op_ == Token::MOD) { |
| 4846 | 4827 reg_eax = generator()->allocator()->Allocate(eax); |
| 4828 ASSERT(reg_eax.is_valid()); | |
| 4829 reg_edx = generator()->allocator()->Allocate(edx); | |
| 4830 ASSERT(reg_edx.is_valid()); | |
| 4831 } | |
| 4832 | |
| 4833 Result right = frame->Pop(); | |
| 4834 Result left = frame->Pop(); | |
| 4835 left.ToRegister(); | |
| 4836 right.ToRegister(); | |
| 4837 // Answer is used to compute the answer, leaving left and right unchanged. | |
| 4838 // It is also returned from this function. | |
| 4839 // It is used as a temporary register in a few places, as well. | |
| 4840 Result answer(generator()); | |
| 4841 if (reg_eax.is_valid()) { | |
| 4842 answer = reg_eax; | |
| 4843 } else { | |
| 4844 answer = generator()->allocator()->Allocate(); | |
| 4845 } | |
| 4846 ASSERT(answer.is_valid()); | |
| 4847 // Perform the smi check. | |
| 4848 __ mov(answer.reg(), Operand(left.reg())); | |
| 4849 __ or_(answer.reg(), Operand(right.reg())); | |
| 4850 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
| 4851 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
| 4852 enter()->Branch(not_zero, &left, &right, not_taken); | |
| 4853 | |
| 4854 // All operations start by copying the left argument into answer. | |
| 4855 __ mov(answer.reg(), Operand(left.reg())); | |
| 4847 switch (op_) { | 4856 switch (op_) { |
| 4848 case Token::ADD: | 4857 case Token::ADD: |
| 4849 __ add(eax, Operand(ebx)); // add optimistically | 4858 __ add(answer.reg(), Operand(right.reg())); // add optimistically |
| 4850 enter()->Branch(overflow, not_taken); | 4859 enter()->Branch(overflow, &left, &right, not_taken); |
| 4851 break; | 4860 break; |
| 4852 | 4861 |
| 4853 case Token::SUB: | 4862 case Token::SUB: |
| 4854 __ sub(eax, Operand(ebx)); // subtract optimistically | 4863 __ sub(answer.reg(), Operand(right.reg())); // subtract optimistically |
| 4855 enter()->Branch(overflow, not_taken); | 4864 enter()->Branch(overflow, &left, &right, not_taken); |
| 4856 break; | 4865 break; |
| 4857 | 4866 |
| 4858 case Token::DIV: | 4867 |
| 4859 case Token::MOD: | 4868 case Token::MUL: { |
| 4869 // If the smi tag is 0 we can just leave the tag on one operand. | |
| 4870 ASSERT(kSmiTag == 0); // adjust code below if not the case | |
| 4871 // Remove tag from the left operand (but keep sign). | |
| 4872 // Left hand operand has been copied into answer. | |
| 4873 __ sar(answer.reg(), kSmiTagSize); | |
| 4874 // Do multiplication of smis, leaving result in answer. | |
| 4875 __ imul(answer.reg(), Operand(right.reg())); | |
| 4876 // Go slow on overflows. | |
| 4877 enter()->Branch(overflow, &left, &right, not_taken); | |
| 4878 // Check for negative zero result. If product is zero, | |
| 4879 // and one argument is negative, go to slow case. | |
| 4880 // The frame is unchanged in this block, so local control flow can | |
| 4881 // use a Label rather than a JumpTarget. | |
| 4882 Label non_zero_result; | |
| 4883 __ test(answer.reg(), Operand(answer.reg())); | |
| 4884 __ j(not_zero, &non_zero_result, taken); | |
| 4885 __ mov(answer.reg(), Operand(left.reg())); | |
| 4886 __ or_(answer.reg(), Operand(right.reg())); | |
| 4887 enter()->Branch(negative, &left, &right, not_taken); | |
| 4888 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. | |
| 4889 __ bind(&non_zero_result); | |
| 4890 break; | |
| 4891 } | |
| 4892 | |
| 4893 case Token::DIV: { | |
| 4894 // Left hand argument has been copied into answer, which is eax. | |
| 4860 // Sign extend eax into edx:eax. | 4895 // Sign extend eax into edx:eax. |
| 4861 __ cdq(); | 4896 __ cdq(); |
| 4862 // Check for 0 divisor. | 4897 // Check for 0 divisor. |
| 4863 __ test(ebx, Operand(ebx)); | 4898 __ test(right.reg(), Operand(right.reg())); |
| 4864 enter()->Branch(zero, not_taken); | 4899 enter()->Branch(zero, &left, &right, not_taken); |
| 4865 break; | |
| 4866 | |
| 4867 default: | |
| 4868 // Fall-through to smi check. | |
| 4869 break; | |
| 4870 } | |
| 4871 | |
| 4872 // Perform the actual smi check. | |
| 4873 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
| 4874 __ test(ecx, Immediate(kSmiTagMask)); | |
| 4875 enter()->Branch(not_zero, not_taken); | |
| 4876 | |
| 4877 switch (op_) { | |
| 4878 case Token::ADD: | |
| 4879 case Token::SUB: | |
| 4880 // Do nothing here. | |
| 4881 break; | |
| 4882 | |
| 4883 case Token::MUL: | |
| 4884 // If the smi tag is 0 we can just leave the tag on one operand. | |
| 4885 ASSERT(kSmiTag == 0); // adjust code below if not the case | |
| 4886 // Remove tag from one of the operands (but keep sign). | |
| 4887 __ sar(eax, kSmiTagSize); | |
| 4888 // Do multiplication. | |
| 4889 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax | |
| 4890 // Go slow on overflows. | |
| 4891 enter()->Branch(overflow, not_taken); | |
| 4892 // Check for negative zero result. Use ecx = x | y. | |
| 4893 __ NegativeZeroTest(generator(), eax, ecx, enter()); | |
| 4894 break; | |
| 4895 | |
| 4896 case Token::DIV: | |
| 4897 // Divide edx:eax by ebx. | 4900 // Divide edx:eax by ebx. |
| 4898 __ idiv(ebx); | 4901 __ idiv(right.reg()); |
| 4902 // Check for negative zero result. If result is zero, and divisor | |
| 4903 // is negative, return a floating point negative zero. | |
| 4904 // The frame is unchanged in this block, so local control flow can | |
| 4905 // use a Label rather than a JumpTarget. | |
| 4906 Label non_zero_result; | |
| 4907 __ test(left.reg(), Operand(left.reg())); | |
| 4908 __ j(not_zero, &non_zero_result, taken); | |
| 4909 __ test(right.reg(), Operand(right.reg())); | |
| 4910 enter()->Branch(negative, &left, &right, not_taken); | |
| 4911 __ bind(&non_zero_result); | |
| 4899 // Check for the corner case of dividing the most negative smi | 4912 // Check for the corner case of dividing the most negative smi |
| 4900 // by -1. We cannot use the overflow flag, since it is not set | 4913 // by -1. We cannot use the overflow flag, since it is not set |
| 4901 // by idiv instruction. | 4914 // by idiv instruction. |
| 4902 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 4915 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 4903 __ cmp(eax, 0x40000000); | 4916 __ cmp(reg_eax.reg(), 0x40000000); |
| 4904 enter()->Branch(equal); | 4917 enter()->Branch(equal, &left, &right, not_taken); |
| 4905 // Check for negative zero result. Use ecx = x | y. | |
| 4906 __ NegativeZeroTest(generator(), eax, ecx, enter()); | |
| 4907 // Check that the remainder is zero. | 4918 // Check that the remainder is zero. |
| 4908 __ test(edx, Operand(edx)); | 4919 __ test(reg_edx.reg(), Operand(reg_edx.reg())); |
| 4909 enter()->Branch(not_zero); | 4920 enter()->Branch(not_zero, &left, &right, not_taken); |
| 4910 // Tag the result and store it in register eax. | 4921 // Tag the result and store it in register temp. |
|
Kevin Millikin (Chromium)
2009/01/15 15:47:03
the answer register.
| |
| 4911 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 4922 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 4912 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); | 4923 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); |
| 4913 break; | 4924 break; |
| 4914 | 4925 } |
| 4915 case Token::MOD: | 4926 |
| 4927 case Token::MOD: { | |
| 4928 // Left hand argument has been copied into answer, which is eax. | |
| 4929 // Sign extend eax into edx:eax. | |
| 4930 __ cdq(); | |
| 4931 // Check for 0 divisor. | |
| 4932 __ test(right.reg(), Operand(right.reg())); | |
| 4933 enter()->Branch(zero, &left, &right, not_taken); | |
| 4934 | |
| 4916 // Divide edx:eax by ebx. | 4935 // Divide edx:eax by ebx. |
| 4917 __ idiv(ebx); | 4936 __ idiv(right.reg()); |
| 4918 // Check for negative zero result. Use ecx = x | y. | 4937 // Check for negative zero result. If result is zero, and divisor |
| 4919 __ NegativeZeroTest(generator(), edx, ecx, enter()); | 4938 // is negative, return a floating point negative zero. |
| 4920 // Move remainder to register eax. | 4939 // The frame is unchanged in this block, so local control flow can |
| 4921 __ mov(eax, Operand(edx)); | 4940 // use a Label rather than a JumpTarget. |
| 4922 break; | 4941 Label non_zero_result; |
| 4942 __ test(reg_edx.reg(), Operand(reg_edx.reg())); | |
| 4943 __ j(not_zero, &non_zero_result, taken); | |
| 4944 __ test(left.reg(), Operand(left.reg())); | |
| 4945 enter()->Branch(negative, &left, &right, not_taken); | |
| 4946 __ bind(&non_zero_result); | |
| 4947 // The answer is in edx. | |
| 4948 answer = reg_edx; | |
| 4949 break; | |
| 4950 } | |
| 4923 | 4951 |
| 4924 case Token::BIT_OR: | 4952 case Token::BIT_OR: |
| 4925 __ or_(eax, Operand(ebx)); | 4953 __ or_(answer.reg(), Operand(right.reg())); |
| 4926 break; | 4954 break; |
| 4927 | 4955 |
| 4928 case Token::BIT_AND: | 4956 case Token::BIT_AND: |
| 4929 __ and_(eax, Operand(ebx)); | 4957 __ and_(answer.reg(), Operand(right.reg())); |
| 4930 break; | 4958 break; |
| 4931 | 4959 |
| 4932 case Token::BIT_XOR: | 4960 case Token::BIT_XOR: |
| 4933 __ xor_(eax, Operand(ebx)); | 4961 __ xor_(answer.reg(), Operand(right.reg())); |
| 4934 break; | 4962 break; |
| 4935 | 4963 |
| 4936 case Token::SHL: | 4964 case Token::SHL: |
| 4937 case Token::SHR: | 4965 case Token::SHR: |
| 4938 case Token::SAR: | 4966 case Token::SAR: |
| 4939 // Move the second operand into register ecx. | 4967 // Move right into ecx. |
| 4940 __ mov(ecx, Operand(ebx)); | 4968 // Left is in two registers already, so even if left or answer is ecx, |
| 4969 // we can move right to it, and use the other one. | |
| 4970 // Right operand must be in register cl because x86 likes it that way. | |
| 4971 if (right.reg().is(ecx)) { | |
| 4972 // Right is already in the right place. Left may be in the | |
| 4973 // same register, which causes problems. Use answer instead. | |
| 4974 if (left.reg().is(ecx)) { | |
| 4975 left = answer; | |
| 4976 } | |
| 4977 } else if (left.reg().is(ecx)) { | |
| 4978 __ mov(left.reg(), Operand(right.reg())); | |
| 4979 right = left; | |
| 4980 left = answer; // Use copy of left in answer as left. | |
| 4981 } else if (answer.reg().is(ecx)) { | |
| 4982 __ mov(answer.reg(), Operand(right.reg())); | |
| 4983 right = answer; | |
| 4984 } else { | |
| 4985 Result reg_ecx = generator()->allocator()->Allocate(ecx); | |
| 4986 ASSERT(reg_ecx.is_valid()); | |
| 4987 __ mov(reg_ecx.reg(), Operand(right.reg())); | |
| 4988 right = reg_ecx; | |
| 4989 } | |
| 4990 ASSERT(left.reg().is_valid()); | |
| 4991 ASSERT(!left.reg().is(ecx)); | |
| 4992 ASSERT(right.reg().is(ecx)); | |
| 4993 answer.Unuse(); // Answer may now be being used for left or right. | |
| 4994 // We will modify left and right, which we do not do in any other | |
| 4995 // binary operation. The exits to slow code need to restore the | |
| 4996 // original values of left and right, or at least values that give | |
| 4997 // the same answer. | |
| 4998 | |
| 4999 // We are modifying left and right. They must be spilled! | |
| 5000 generator()->frame()->Spill(left.reg()); | |
| 5001 generator()->frame()->Spill(right.reg()); | |
| 5002 | |
| 4941 // Remove tags from operands (but keep sign). | 5003 // Remove tags from operands (but keep sign). |
| 4942 __ sar(eax, kSmiTagSize); | 5004 __ sar(left.reg(), kSmiTagSize); |
| 4943 __ sar(ecx, kSmiTagSize); | 5005 __ sar(ecx, kSmiTagSize); |
| 4944 // Perform the operation. | 5006 // Perform the operation. |
| 4945 switch (op_) { | 5007 switch (op_) { |
| 4946 case Token::SAR: | 5008 case Token::SAR: |
| 4947 __ sar(eax); | 5009 __ sar(left.reg()); |
| 4948 // No checks of result necessary | 5010 // No checks of result necessary |
| 4949 break; | 5011 break; |
| 4950 case Token::SHR: | 5012 case Token::SHR: { |
| 4951 __ shr(eax); | 5013 __ shr(left.reg()); |
| 4952 // Check that the *unsigned* result fits in a smi. | 5014 // Check that the *unsigned* result fits in a smi. |
| 4953 // Neither of the two high-order bits can be set: | 5015 // Neither of the two high-order bits can be set: |
| 4954 // - 0x80000000: high bit would be lost when smi tagging. | 5016 // - 0x80000000: high bit would be lost when smi tagging. |
| 4955 // - 0x40000000: this number would convert to negative when | 5017 // - 0x40000000: this number would convert to negative when |
| 4956 // Smi tagging these two cases can only happen with shifts | 5018 // Smi tagging these two cases can only happen with shifts |
| 4957 // by 0 or 1 when handed a valid smi. | 5019 // by 0 or 1 when handed a valid smi. |
| 4958 __ test(eax, Immediate(0xc0000000)); | 5020 // If the answer cannot be represented by a SMI, restore |
| 4959 enter()->Branch(not_zero, not_taken); | 5021 // the left and right arguments, and jump to slow case. |
| 5022 // The low bit of the left argument may be lost, but only | |
| 5023 // in a case where it is dropped anyway. | |
| 5024 JumpTarget result_ok(generator()); | |
| 5025 __ test(left.reg(), Immediate(0xc0000000)); | |
| 5026 result_ok.Branch(zero, &left, &right, taken); | |
| 5027 __ shl(left.reg()); | |
| 5028 ASSERT(kSmiTag == 0); | |
| 5029 __ shl(left.reg(), kSmiTagSize); | |
| 5030 __ shl(right.reg(), kSmiTagSize); | |
| 5031 enter()->Jump(&left, &right); | |
| 5032 result_ok.Bind(&left, &right); | |
| 4960 break; | 5033 break; |
| 4961 case Token::SHL: | 5034 } |
| 4962 __ shl(eax); | 5035 case Token::SHL: { |
| 5036 __ shl(left.reg()); | |
| 4963 // Check that the *signed* result fits in a smi. | 5037 // Check that the *signed* result fits in a smi. |
| 4964 __ lea(ecx, Operand(eax, 0x40000000)); | 5038 // TODO(): Can reduce registers from 4 to 3 by preallocating ecx. |
| 4965 __ test(ecx, Immediate(0x80000000)); | 5039 JumpTarget result_ok(generator()); |
| 4966 enter()->Branch(not_zero, not_taken); | 5040 Result smi_test_reg = generator()->allocator()->Allocate(); |
| 5041 ASSERT(smi_test_reg.is_valid()); | |
| 5042 __ lea(smi_test_reg.reg(), Operand(left.reg(), 0x40000000)); | |
| 5043 __ test(smi_test_reg.reg(), Immediate(0x80000000)); | |
| 5044 smi_test_reg.Unuse(); | |
| 5045 result_ok.Branch(zero, &left, &right, taken); | |
| 5046 __ shr(left.reg()); | |
| 5047 ASSERT(kSmiTag == 0); | |
| 5048 __ shl(left.reg(), kSmiTagSize); | |
| 5049 __ shl(right.reg(), kSmiTagSize); | |
| 5050 enter()->Jump(&left, &right); | |
| 5051 result_ok.Bind(&left, &right); | |
| 4967 break; | 5052 break; |
| 5053 } | |
| 4968 default: | 5054 default: |
| 4969 UNREACHABLE(); | 5055 UNREACHABLE(); |
| 4970 } | 5056 } |
| 4971 // Tag the result and store it in register eax. | 5057 // Smi-tag the result, in left, and make answer an alias for left. |
| 5058 answer = left; | |
| 5059 answer.ToRegister(); | |
| 4972 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 5060 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 4973 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); | 5061 __ lea(answer.reg(), |
| 5062 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | |
| 4974 break; | 5063 break; |
| 4975 | 5064 |
| 4976 default: | 5065 default: |
| 4977 UNREACHABLE(); | 5066 UNREACHABLE(); |
| 4978 break; | 5067 break; |
| 4979 } | 5068 } |
| 5069 return answer; | |
| 4980 } | 5070 } |
| 4981 | 5071 |
| 4982 | 5072 |
| 4983 #undef __ | 5073 #undef __ |
| 4984 #define __ masm-> | 5074 #define __ masm-> |
| 4985 | 5075 |
| 4986 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 5076 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 4987 // Perform fast-case smi code for the operation (eax <op> ebx) and | 5077 // Perform fast-case smi code for the operation (eax <op> ebx) and |
| 4988 // leave result in register eax. | 5078 // leave result in register eax. |
| 4989 | 5079 |
| (...skipping 1154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6144 | 6234 |
| 6145 // Slow-case: Go through the JavaScript implementation. | 6235 // Slow-case: Go through the JavaScript implementation. |
| 6146 __ bind(&slow); | 6236 __ bind(&slow); |
| 6147 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 6237 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 6148 } | 6238 } |
| 6149 | 6239 |
| 6150 | 6240 |
| 6151 #undef __ | 6241 #undef __ |
| 6152 | 6242 |
| 6153 } } // namespace v8::internal | 6243 } } // namespace v8::internal |
| OLD | NEW |