Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 723 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 734 frame_->Push(&value); // Undo the Pop() from above. | 734 frame_->Push(&value); // Undo the Pop() from above. |
| 735 ToBooleanStub stub; | 735 ToBooleanStub stub; |
| 736 Result temp = frame_->CallStub(&stub, 1); | 736 Result temp = frame_->CallStub(&stub, 1); |
| 737 // Convert the result to a condition code. | 737 // Convert the result to a condition code. |
| 738 __ test(temp.reg(), Operand(temp.reg())); | 738 __ test(temp.reg(), Operand(temp.reg())); |
| 739 temp.Unuse(); | 739 temp.Unuse(); |
| 740 dest->Split(not_equal); | 740 dest->Split(not_equal); |
| 741 } | 741 } |
| 742 | 742 |
| 743 | 743 |
| 744 enum ArgLocation { | |
|
Kevin Millikin (Chromium)
2010/01/25 16:27:44
It seems like this should be a member of FloatingP
Vladislav Kaznacheev
2010/01/25 17:44:53
Done.
| |
| 745 ARGS_ON_STACK, | |
| 746 ARGS_IN_REGISTERS | |
| 747 }; | |
| 748 | |
| 749 | |
| 744 class FloatingPointHelper : public AllStatic { | 750 class FloatingPointHelper : public AllStatic { |
| 745 public: | 751 public: |
| 746 // Code pattern for loading a floating point value. Input value must | 752 // Code pattern for loading a floating point value. Input value must |
| 747 // be either a smi or a heap number object (fp value). Requirements: | 753 // be either a smi or a heap number object (fp value). Requirements: |
| 748 // operand in register number. Returns operand as floating point number | 754 // operand in register number. Returns operand as floating point number |
| 749 // on FPU stack. | 755 // on FPU stack. |
| 750 static void LoadFloatOperand(MacroAssembler* masm, Register number); | 756 static void LoadFloatOperand(MacroAssembler* masm, Register number); |
| 751 // Code pattern for loading floating point values. Input values must | 757 // Code pattern for loading floating point values. Input values must |
| 752 // be either smi or heap number objects (fp values). Requirements: | 758 // be either smi or heap number objects (fp values). Requirements: |
| 753 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as | 759 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. |
| 754 // floating point numbers on FPU stack. | 760 // Returns operands as floating point numbers on FPU stack. |
| 755 static void LoadFloatOperands(MacroAssembler* masm, Register scratch); | 761 static void LoadFloatOperands(MacroAssembler* masm, |
| 762 Register scratch, | |
| 763 ArgLocation arg_location = ARGS_ON_STACK); | |
| 764 | |
| 765 // Similar to LoadFloatOperand but assumes that both operands are smis. | |
| 766 // Accepts operands on stack or in eax, ebx. | |
| 767 static void LoadFloatSmis(MacroAssembler* masm, | |
| 768 Register scratch, | |
| 769 ArgLocation arg_location); | |
| 770 | |
| 756 // Test if operands are smi or number objects (fp). Requirements: | 771 // Test if operands are smi or number objects (fp). Requirements: |
| 757 // operand_1 in eax, operand_2 in edx; falls through on float | 772 // operand_1 in eax, operand_2 in edx; falls through on float |
| 758 // operands, jumps to the non_float label otherwise. | 773 // operands, jumps to the non_float label otherwise. |
| 759 static void CheckFloatOperands(MacroAssembler* masm, | 774 static void CheckFloatOperands(MacroAssembler* masm, |
| 760 Label* non_float, | 775 Label* non_float, |
| 761 Register scratch); | 776 Register scratch); |
| 762 // Takes the operands in edx and eax and loads them as integers in eax | 777 // Takes the operands in edx and eax and loads them as integers in eax |
| 763 // and ecx. | 778 // and ecx. |
| 764 static void LoadAsIntegers(MacroAssembler* masm, | 779 static void LoadAsIntegers(MacroAssembler* masm, |
| 765 bool use_sse3, | 780 bool use_sse3, |
| 766 Label* operand_conversion_failure); | 781 Label* operand_conversion_failure); |
| 767 // Test if operands are numbers (smi or HeapNumber objects), and load | 782 // Test if operands are numbers (smi or HeapNumber objects), and load |
| 768 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if | 783 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if |
| 769 // either operand is not a number. Operands are in edx and eax. | 784 // either operand is not a number. Operands are in edx and eax. |
| 770 // Leaves operands unchanged. | 785 // Leaves operands unchanged. |
| 771 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); | 786 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); |
| 787 | |
| 788 // Similar to LoadSse2Operands but assumes that both operands are smis. | |
| 789 // Accepts operands on stack or in eax, ebx. | |
| 790 static void LoadSse2Smis(MacroAssembler* masm, | |
| 791 Register scratch, | |
| 792 ArgLocation arg_location); | |
| 772 }; | 793 }; |
| 773 | 794 |
| 774 | 795 |
| 775 const char* GenericBinaryOpStub::GetName() { | 796 const char* GenericBinaryOpStub::GetName() { |
| 776 if (name_ != NULL) return name_; | 797 if (name_ != NULL) return name_; |
| 777 const int kMaxNameLength = 100; | 798 const int kMaxNameLength = 100; |
| 778 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | 799 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); |
| 779 if (name_ == NULL) return "OOM"; | 800 if (name_ == NULL) return "OOM"; |
| 780 const char* op_name = Token::Name(op_); | 801 const char* op_name = Token::Name(op_); |
| 781 const char* overwrite_name; | 802 const char* overwrite_name; |
| (...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1324 } else { | 1345 } else { |
| 1325 __ mov(answer.reg(), left->reg()); | 1346 __ mov(answer.reg(), left->reg()); |
| 1326 __ or_(answer.reg(), Operand(right->reg())); | 1347 __ or_(answer.reg(), Operand(right->reg())); |
| 1327 ASSERT(kSmiTag == 0); // Adjust test if not the case. | 1348 ASSERT(kSmiTag == 0); // Adjust test if not the case. |
| 1328 __ test(answer.reg(), Immediate(kSmiTagMask)); | 1349 __ test(answer.reg(), Immediate(kSmiTagMask)); |
| 1329 } | 1350 } |
| 1330 deferred->Branch(not_zero); | 1351 deferred->Branch(not_zero); |
| 1331 __ mov(answer.reg(), left->reg()); | 1352 __ mov(answer.reg(), left->reg()); |
| 1332 switch (op) { | 1353 switch (op) { |
| 1333 case Token::ADD: | 1354 case Token::ADD: |
| 1334 __ add(answer.reg(), Operand(right->reg())); // Add optimistically. | 1355 __ add(answer.reg(), Operand(right->reg())); |
| 1335 deferred->Branch(overflow); | 1356 deferred->Branch(overflow); |
| 1336 break; | 1357 break; |
| 1337 | 1358 |
| 1338 case Token::SUB: | 1359 case Token::SUB: |
| 1339 __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically. | 1360 __ sub(answer.reg(), Operand(right->reg())); |
| 1340 deferred->Branch(overflow); | 1361 deferred->Branch(overflow); |
| 1341 break; | 1362 break; |
| 1342 | 1363 |
| 1343 case Token::MUL: { | 1364 case Token::MUL: { |
| 1344 // If the smi tag is 0 we can just leave the tag on one operand. | 1365 // If the smi tag is 0 we can just leave the tag on one operand. |
| 1345 ASSERT(kSmiTag == 0); // Adjust code below if not the case. | 1366 ASSERT(kSmiTag == 0); // Adjust code below if not the case. |
| 1346 // Remove smi tag from the left operand (but keep sign). | 1367 // Remove smi tag from the left operand (but keep sign). |
| 1347 // Left-hand operand has been copied into answer. | 1368 // Left-hand operand has been copied into answer. |
| 1348 __ SmiUntag(answer.reg()); | 1369 __ SmiUntag(answer.reg()); |
| 1349 // Do multiplication of smis, leaving result in answer. | 1370 // Do multiplication of smis, leaving result in answer. |
| (...skipping 5699 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7049 SetArgsInRegisters(); | 7070 SetArgsInRegisters(); |
| 7050 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); | 7071 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
| 7051 } | 7072 } |
| 7052 | 7073 |
| 7053 // Call the stub. | 7074 // Call the stub. |
| 7054 __ CallStub(this); | 7075 __ CallStub(this); |
| 7055 } | 7076 } |
| 7056 | 7077 |
| 7057 | 7078 |
| 7058 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 7079 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 7080 if (HasArgumentsInRegisters()) { | |
| 7081 __ mov(ebx, eax); | |
| 7082 __ mov(eax, edx); | |
| 7083 } else { | |
| 7084 __ mov(ebx, Operand(esp, 1 * kPointerSize)); | |
| 7085 __ mov(eax, Operand(esp, 2 * kPointerSize)); | |
| 7086 } | |
| 7087 | |
| 7088 Label not_smis, not_smis_or_overflow, not_smis_undo_optimistic; | |
| 7089 Label use_fp_on_smis, done; | |
| 7090 | |
| 7059 // Perform fast-case smi code for the operation (eax <op> ebx) and | 7091 // Perform fast-case smi code for the operation (eax <op> ebx) and |
| 7060 // leave result in register eax. | 7092 // leave result in register eax. |
| 7061 | 7093 |
| 7062 // Prepare the smi check of both operands by or'ing them together | 7094 // Prepare the smi check of both operands by or'ing them together |
| 7063 // before checking against the smi mask. | 7095 // before checking against the smi mask. |
| 7064 __ mov(ecx, Operand(ebx)); | 7096 __ mov(ecx, Operand(ebx)); |
| 7065 __ or_(ecx, Operand(eax)); | 7097 __ or_(ecx, Operand(eax)); |
| 7066 | 7098 |
| 7067 switch (op_) { | 7099 switch (op_) { |
| 7068 case Token::ADD: | 7100 case Token::ADD: |
| 7069 __ add(eax, Operand(ebx)); // add optimistically | 7101 __ add(eax, Operand(ebx)); // add optimistically |
| 7070 __ j(overflow, slow, not_taken); | 7102 __ j(overflow, ¬_smis_or_overflow, not_taken); |
| 7071 break; | 7103 break; |
| 7072 | 7104 |
| 7073 case Token::SUB: | 7105 case Token::SUB: |
| 7074 __ sub(eax, Operand(ebx)); // subtract optimistically | 7106 __ sub(eax, Operand(ebx)); // subtract optimistically |
| 7075 __ j(overflow, slow, not_taken); | 7107 __ j(overflow, ¬_smis_or_overflow, not_taken); |
| 7076 break; | 7108 break; |
| 7077 | 7109 |
| 7078 case Token::DIV: | 7110 case Token::DIV: |
| 7079 case Token::MOD: | 7111 case Token::MOD: |
| 7080 // Sign extend eax into edx:eax. | 7112 // Sign extend eax into edx:eax. |
| 7081 __ cdq(); | 7113 __ cdq(); |
| 7082 // Check for 0 divisor. | 7114 // Check for 0 divisor. |
| 7083 __ test(ebx, Operand(ebx)); | 7115 __ test(ebx, Operand(ebx)); |
| 7084 __ j(zero, slow, not_taken); | 7116 __ j(zero, ¬_smis_or_overflow, not_taken); |
| 7085 break; | 7117 break; |
| 7086 | 7118 |
| 7087 default: | 7119 default: |
| 7088 // Fall-through to smi check. | 7120 // Fall-through to smi check. |
| 7089 break; | 7121 break; |
| 7090 } | 7122 } |
| 7091 | 7123 |
| 7092 // Perform the actual smi check. | 7124 // Perform the actual smi check. |
| 7093 ASSERT(kSmiTag == 0); // adjust zero check if not the case | 7125 ASSERT(kSmiTag == 0); // adjust zero check if not the case |
| 7094 __ test(ecx, Immediate(kSmiTagMask)); | 7126 __ test(ecx, Immediate(kSmiTagMask)); |
| 7095 __ j(not_zero, slow, not_taken); | 7127 __ j(not_zero, ¬_smis_undo_optimistic, not_taken); |
| 7096 | 7128 |
| 7097 switch (op_) { | 7129 switch (op_) { |
| 7098 case Token::ADD: | 7130 case Token::ADD: |
| 7099 case Token::SUB: | 7131 case Token::SUB: |
| 7100 // Do nothing here. | 7132 // Do nothing here. |
| 7101 break; | 7133 break; |
| 7102 | 7134 |
| 7103 case Token::MUL: | 7135 case Token::MUL: |
| 7104 // If the smi tag is 0 we can just leave the tag on one operand. | 7136 // If the smi tag is 0 we can just leave the tag on one operand. |
| 7105 ASSERT(kSmiTag == 0); // adjust code below if not the case | 7137 ASSERT(kSmiTag == 0); // adjust code below if not the case |
| 7106 // Remove tag from one of the operands (but keep sign). | 7138 // Remove tag from one of the operands (but keep sign). |
| 7107 __ SmiUntag(eax); | 7139 __ SmiUntag(eax); |
| 7108 // Do multiplication. | 7140 // Do multiplication. |
| 7109 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax | 7141 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax |
| 7110 // Go slow on overflows. | 7142 // Go slow on overflows. |
| 7111 __ j(overflow, slow, not_taken); | 7143 __ j(overflow, &use_fp_on_smis, not_taken); |
| 7112 // Check for negative zero result. | 7144 // Check for negative zero result. |
| 7113 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | 7145 __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y |
| 7114 break; | 7146 break; |
| 7115 | 7147 |
| 7116 case Token::DIV: | 7148 case Token::DIV: |
| 7117 // Divide edx:eax by ebx. | 7149 // Divide edx:eax by ebx. |
| 7118 __ idiv(ebx); | 7150 __ idiv(ebx); |
| 7119 // Check for the corner case of dividing the most negative smi | 7151 // Check for the corner case of dividing the most negative smi |
| 7120 // by -1. We cannot use the overflow flag, since it is not set | 7152 // by -1. We cannot use the overflow flag, since it is not set |
| 7121 // by idiv instruction. | 7153 // by idiv instruction. |
| 7122 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 7154 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 7123 __ cmp(eax, 0x40000000); | 7155 __ cmp(eax, 0x40000000); |
| 7124 __ j(equal, slow); | 7156 __ j(equal, &use_fp_on_smis); |
| 7125 // Check for negative zero result. | 7157 // Check for negative zero result. |
| 7126 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | 7158 __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y |
| 7127 // Check that the remainder is zero. | 7159 // Check that the remainder is zero. |
| 7128 __ test(edx, Operand(edx)); | 7160 __ test(edx, Operand(edx)); |
| 7129 __ j(not_zero, slow); | 7161 __ j(not_zero, &use_fp_on_smis); |
| 7130 // Tag the result and store it in register eax. | 7162 // Tag the result and store it in register eax. |
| 7131 __ SmiTag(eax); | 7163 __ SmiTag(eax); |
| 7132 break; | 7164 break; |
| 7133 | 7165 |
| 7134 case Token::MOD: | 7166 case Token::MOD: |
| 7135 // Divide edx:eax by ebx. | 7167 // Divide edx:eax by ebx. |
| 7136 __ idiv(ebx); | 7168 __ idiv(ebx); |
| 7137 // Check for negative zero result. | 7169 // Check for negative zero result. |
| 7138 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y | 7170 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y |
| 7139 // Move remainder to register eax. | 7171 // Move remainder to register eax. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7174 // - 0x40000000: this number would convert to negative when | 7206 // - 0x40000000: this number would convert to negative when |
| 7175 // Smi tagging these two cases can only happen with shifts | 7207 // Smi tagging these two cases can only happen with shifts |
| 7176 // by 0 or 1 when handed a valid smi. | 7208 // by 0 or 1 when handed a valid smi. |
| 7177 __ test(eax, Immediate(0xc0000000)); | 7209 __ test(eax, Immediate(0xc0000000)); |
| 7178 __ j(not_zero, slow, not_taken); | 7210 __ j(not_zero, slow, not_taken); |
| 7179 break; | 7211 break; |
| 7180 case Token::SHL: | 7212 case Token::SHL: |
| 7181 __ shl_cl(eax); | 7213 __ shl_cl(eax); |
| 7182 // Check that the *signed* result fits in a smi. | 7214 // Check that the *signed* result fits in a smi. |
| 7183 __ cmp(eax, 0xc0000000); | 7215 __ cmp(eax, 0xc0000000); |
| 7184 __ j(sign, slow, not_taken); | 7216 __ j(sign, &use_fp_on_smis, not_taken); |
| 7185 break; | 7217 break; |
| 7186 default: | 7218 default: |
| 7187 UNREACHABLE(); | 7219 UNREACHABLE(); |
| 7188 } | 7220 } |
| 7189 // Tag the result and store it in register eax. | 7221 // Tag the result and store it in register eax. |
| 7190 __ SmiTag(eax); | 7222 __ SmiTag(eax); |
| 7191 break; | 7223 break; |
| 7192 | 7224 |
| 7193 default: | 7225 default: |
| 7194 UNREACHABLE(); | 7226 UNREACHABLE(); |
| 7195 break; | 7227 break; |
| 7196 } | 7228 } |
| 7229 GenerateReturn(masm); | |
| 7230 | |
| 7231 __ bind(¬_smis_or_overflow); | |
| 7232 // Revert optimistic operation. | |
| 7233 switch (op_) { | |
| 7234 case Token::ADD: __ sub(eax, Operand(ebx)); break; | |
| 7235 case Token::SUB: __ add(eax, Operand(ebx)); break; | |
| 7236 default: break; | |
| 7237 } | |
| 7238 ASSERT(kSmiTag == 0); // Adjust zero check if not the case. | |
| 7239 __ test(ecx, Immediate(kSmiTagMask)); | |
| 7240 __ j(not_zero, ¬_smis, not_taken); | |
| 7241 // Correct operand values are in eax, ebx at this point. | |
| 7242 | |
| 7243 __ bind(&use_fp_on_smis); | |
| 7244 // Both operands are known to be SMIs but the result does not fit into a SMI. | |
| 7245 switch (op_) { | |
| 7246 case Token::ADD: | |
| 7247 case Token::SUB: | |
| 7248 case Token::MUL: | |
| 7249 case Token::DIV: { | |
| 7250 Label after_alloc_failure; | |
| 7251 | |
| 7252 ArgLocation arg_location = | |
| 7253 (op_ == Token::ADD || op_ == Token::SUB) ? | |
| 7254 ARGS_IN_REGISTERS : | |
| 7255 ARGS_ON_STACK; | |
| 7256 | |
| 7257 __ AllocateHeapNumber( | |
| 7258 edx, | |
| 7259 ecx, | |
| 7260 no_reg, | |
| 7261 arg_location == ARGS_IN_REGISTERS ? &after_alloc_failure : slow); | |
| 7262 | |
| 7263 if (CpuFeatures::IsSupported(SSE2)) { | |
| 7264 CpuFeatures::Scope use_sse2(SSE2); | |
| 7265 FloatingPointHelper::LoadSse2Smis(masm, ecx, arg_location); | |
| 7266 switch (op_) { | |
| 7267 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
| 7268 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
| 7269 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
| 7270 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
| 7271 default: UNREACHABLE(); | |
| 7272 } | |
| 7273 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); | |
| 7274 } else { // SSE2 not available, use FPU. | |
| 7275 FloatingPointHelper::LoadFloatSmis(masm, ecx, arg_location); | |
| 7276 switch (op_) { | |
| 7277 case Token::ADD: __ faddp(1); break; | |
| 7278 case Token::SUB: __ fsubp(1); break; | |
| 7279 case Token::MUL: __ fmulp(1); break; | |
| 7280 case Token::DIV: __ fdivp(1); break; | |
| 7281 default: UNREACHABLE(); | |
| 7282 } | |
| 7283 __ fstp_d(FieldOperand(edx, HeapNumber::kValueOffset)); | |
| 7284 } | |
| 7285 __ mov(eax, edx); | |
| 7286 GenerateReturn(masm); | |
| 7287 | |
| 7288 if (HasArgumentsInRegisters()) { | |
|
Kevin Millikin (Chromium)
2010/01/25 16:27:44
Is this right? What if op_ == Token::ADD or Token
Vladislav Kaznacheev
2010/01/25 17:44:53
This was a bug. Fixed.
On 2010/01/25 16:27:44, Kev
| |
| 7289 __ bind(&after_alloc_failure); | |
| 7290 __ mov(edx, eax); | |
| 7291 __ mov(eax, ebx); | |
| 7292 __ jmp(slow); | |
| 7293 } | |
| 7294 } | |
|
Kevin Millikin (Chromium)
2010/01/25 16:27:44
Missing break would be a problem (at least dead co
Vladislav Kaznacheev
2010/01/25 17:44:53
Done.
| |
| 7295 case Token::BIT_OR: | |
| 7296 case Token::BIT_AND: | |
| 7297 case Token::BIT_XOR: | |
| 7298 case Token::SAR: | |
| 7299 // Do nothing here as these operations always succeed on a pair of smis. | |
| 7300 break; | |
| 7301 | |
| 7302 case Token::MOD: | |
| 7303 case Token::SHR: | |
| 7304 // Do nothing here as these go directly to runtime. | |
| 7305 break; | |
| 7306 | |
| 7307 case Token::SHL: { | |
| 7308 __ AllocateHeapNumber(ebx, ecx, edx, slow); | |
| 7309 // Store the result in the HeapNumber and return. | |
| 7310 if (CpuFeatures::IsSupported(SSE2)) { | |
| 7311 CpuFeatures::Scope use_sse2(SSE2); | |
| 7312 __ cvtsi2sd(xmm0, Operand(eax)); | |
| 7313 __ movdbl(FieldOperand(ebx, HeapNumber::kValueOffset), xmm0); | |
| 7314 } else { | |
| 7315 __ mov(Operand(esp, 1 * kPointerSize), eax); | |
| 7316 __ fild_s(Operand(esp, 1 * kPointerSize)); | |
| 7317 __ fstp_d(FieldOperand(ebx, HeapNumber::kValueOffset)); | |
| 7318 } | |
| 7319 __ mov(eax, ebx); | |
| 7320 GenerateReturn(masm); | |
| 7321 break; | |
| 7322 } | |
| 7323 | |
| 7324 default: UNREACHABLE(); break; | |
| 7325 } | |
| 7326 | |
| 7327 __ bind(¬_smis_undo_optimistic); | |
| 7328 switch (op_) { | |
| 7329 case Token::ADD: __ sub(eax, Operand(ebx)); break; | |
| 7330 case Token::SUB: __ add(eax, Operand(ebx)); break; | |
| 7331 default: break; | |
| 7332 } | |
| 7333 | |
| 7334 __ bind(¬_smis); | |
| 7335 __ mov(edx, eax); | |
| 7336 __ mov(eax, ebx); | |
| 7337 | |
| 7338 __ bind(&done); | |
| 7197 } | 7339 } |
| 7198 | 7340 |
| 7199 | 7341 |
| 7200 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 7342 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| 7201 Label call_runtime; | 7343 Label call_runtime; |
| 7202 | 7344 |
| 7203 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); | 7345 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); |
| 7204 | 7346 |
| 7205 // Generate fast case smi code if requested. This flag is set when the fast | 7347 // Generate fast case smi code if requested. This flag is set when the fast |
| 7206 // case smi code is not generated by the caller. Generating it here will speed | 7348 // case smi code is not generated by the caller. Generating it here will speed |
| 7207 // up common operations. | 7349 // up common operations. |
| 7208 if (HasSmiCodeInStub()) { | 7350 if (HasSmiCodeInStub()) { |
| 7209 Label slow; | 7351 GenerateSmiCode(masm, &call_runtime); |
| 7210 __ mov(ebx, Operand(esp, 1 * kPointerSize)); | 7352 } else if (op_ != Token::MOD) { // MOD goes straight to runtime. |
| 7211 __ mov(eax, Operand(esp, 2 * kPointerSize)); | 7353 GenerateLoadArguments(masm); |
| 7212 GenerateSmiCode(masm, &slow); | |
| 7213 GenerateReturn(masm); | |
| 7214 // Too bad. The fast case smi code didn't succeed. | |
| 7215 __ bind(&slow); | |
| 7216 } | 7354 } |
| 7217 | 7355 |
| 7218 // Make sure the arguments are in edx and eax. | |
| 7219 GenerateLoadArguments(masm); | |
| 7220 | |
| 7221 // Floating point case. | 7356 // Floating point case. |
| 7222 switch (op_) { | 7357 switch (op_) { |
| 7223 case Token::ADD: | 7358 case Token::ADD: |
| 7224 case Token::SUB: | 7359 case Token::SUB: |
| 7225 case Token::MUL: | 7360 case Token::MUL: |
| 7226 case Token::DIV: { | 7361 case Token::DIV: { |
| 7227 // eax: y | |
| 7228 // edx: x | |
| 7229 | |
| 7230 if (CpuFeatures::IsSupported(SSE2)) { | 7362 if (CpuFeatures::IsSupported(SSE2)) { |
| 7231 CpuFeatures::Scope use_sse2(SSE2); | 7363 CpuFeatures::Scope use_sse2(SSE2); |
| 7232 FloatingPointHelper::LoadSse2Operands(masm, &call_runtime); | 7364 FloatingPointHelper::LoadSse2Operands(masm, &call_runtime); |
| 7233 | 7365 |
| 7234 switch (op_) { | 7366 switch (op_) { |
| 7235 case Token::ADD: __ addsd(xmm0, xmm1); break; | 7367 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 7236 case Token::SUB: __ subsd(xmm0, xmm1); break; | 7368 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 7237 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 7369 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 7238 case Token::DIV: __ divsd(xmm0, xmm1); break; | 7370 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 7239 default: UNREACHABLE(); | 7371 default: UNREACHABLE(); |
| 7240 } | 7372 } |
| 7241 // Allocate a heap number, if needed. | 7373 GenerateHeapResultAllocation(masm, &call_runtime); |
| 7242 Label skip_allocation; | |
| 7243 switch (mode_) { | |
| 7244 case OVERWRITE_LEFT: | |
| 7245 __ mov(eax, Operand(edx)); | |
| 7246 // Fall through! | |
| 7247 case OVERWRITE_RIGHT: | |
| 7248 // If the argument in eax is already an object, we skip the | |
| 7249 // allocation of a heap number. | |
| 7250 __ test(eax, Immediate(kSmiTagMask)); | |
| 7251 __ j(not_zero, &skip_allocation, not_taken); | |
| 7252 // Fall through! | |
| 7253 case NO_OVERWRITE: { | |
| 7254 // Allocate a heap number for the result. Keep eax and edx intact | |
| 7255 // for the possible runtime call. | |
| 7256 __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime); | |
| 7257 // Now eax can be overwritten losing one of the arguments as we are | |
| 7258 // now done and will not need it any more. | |
| 7259 __ mov(eax, ebx); | |
| 7260 __ bind(&skip_allocation); | |
| 7261 break; | |
| 7262 } | |
| 7263 default: UNREACHABLE(); | |
| 7264 } | |
| 7265 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); | 7374 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| 7266 GenerateReturn(masm); | 7375 GenerateReturn(masm); |
| 7267 } else { // SSE2 not available, use FPU. | 7376 } else { // SSE2 not available, use FPU. |
| 7268 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); | 7377 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
| 7269 // Allocate a heap number, if needed. | 7378 FloatingPointHelper::LoadFloatOperands(masm, ecx, ARGS_IN_REGISTERS); |
| 7270 Label skip_allocation; | |
| 7271 switch (mode_) { | |
| 7272 case OVERWRITE_LEFT: | |
| 7273 __ mov(eax, Operand(edx)); | |
| 7274 // Fall through! | |
| 7275 case OVERWRITE_RIGHT: | |
| 7276 // If the argument in eax is already an object, we skip the | |
| 7277 // allocation of a heap number. | |
| 7278 __ test(eax, Immediate(kSmiTagMask)); | |
| 7279 __ j(not_zero, &skip_allocation, not_taken); | |
| 7280 // Fall through! | |
| 7281 case NO_OVERWRITE: | |
| 7282 // Allocate a heap number for the result. Keep eax and edx intact | |
| 7283 // for the possible runtime call. | |
| 7284 __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime); | |
| 7285 // Now eax can be overwritten losing one of the arguments as we are | |
| 7286 // now done and will not need it any more. | |
| 7287 __ mov(eax, ebx); | |
| 7288 __ bind(&skip_allocation); | |
| 7289 break; | |
| 7290 default: UNREACHABLE(); | |
| 7291 } | |
| 7292 FloatingPointHelper::LoadFloatOperands(masm, ecx); | |
| 7293 | |
| 7294 switch (op_) { | 7379 switch (op_) { |
| 7295 case Token::ADD: __ faddp(1); break; | 7380 case Token::ADD: __ faddp(1); break; |
| 7296 case Token::SUB: __ fsubp(1); break; | 7381 case Token::SUB: __ fsubp(1); break; |
| 7297 case Token::MUL: __ fmulp(1); break; | 7382 case Token::MUL: __ fmulp(1); break; |
| 7298 case Token::DIV: __ fdivp(1); break; | 7383 case Token::DIV: __ fdivp(1); break; |
| 7299 default: UNREACHABLE(); | 7384 default: UNREACHABLE(); |
| 7300 } | 7385 } |
| 7386 Label after_alloc_failure; | |
| 7387 GenerateHeapResultAllocation(masm, &after_alloc_failure); | |
| 7301 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 7388 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| 7302 GenerateReturn(masm); | 7389 GenerateReturn(masm); |
| 7390 __ bind(&after_alloc_failure); | |
| 7391 __ ffree(); | |
| 7392 __ jmp(&call_runtime); | |
| 7303 } | 7393 } |
| 7304 } | 7394 } |
| 7305 case Token::MOD: { | 7395 case Token::MOD: { |
| 7306 // For MOD we go directly to runtime in the non-smi case. | 7396 // For MOD we go directly to runtime in the non-smi case. |
| 7307 break; | 7397 break; |
| 7308 } | 7398 } |
| 7309 case Token::BIT_OR: | 7399 case Token::BIT_OR: |
| 7310 case Token::BIT_AND: | 7400 case Token::BIT_AND: |
| 7311 case Token::BIT_XOR: | 7401 case Token::BIT_XOR: |
| 7312 case Token::SAR: | 7402 case Token::SAR: |
| 7313 case Token::SHL: | 7403 case Token::SHL: |
| 7314 case Token::SHR: { | 7404 case Token::SHR: { |
| 7315 Label non_smi_result, skip_allocation; | 7405 Label non_smi_result; |
| 7316 Label operand_conversion_failure; | 7406 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); |
| 7317 FloatingPointHelper::LoadAsIntegers( | |
| 7318 masm, | |
| 7319 use_sse3_, | |
| 7320 &operand_conversion_failure); | |
| 7321 switch (op_) { | 7407 switch (op_) { |
| 7322 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; | 7408 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
| 7323 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; | 7409 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
| 7324 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; | 7410 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
| 7325 case Token::SAR: __ sar_cl(eax); break; | 7411 case Token::SAR: __ sar_cl(eax); break; |
| 7326 case Token::SHL: __ shl_cl(eax); break; | 7412 case Token::SHL: __ shl_cl(eax); break; |
| 7327 case Token::SHR: __ shr_cl(eax); break; | 7413 case Token::SHR: __ shr_cl(eax); break; |
| 7328 default: UNREACHABLE(); | 7414 default: UNREACHABLE(); |
| 7329 } | 7415 } |
| 7330 if (op_ == Token::SHR) { | 7416 if (op_ == Token::SHR) { |
| 7331 // Check if result is non-negative and fits in a smi. | 7417 // Check if result is non-negative and fits in a smi. |
| 7332 __ test(eax, Immediate(0xc0000000)); | 7418 __ test(eax, Immediate(0xc0000000)); |
| 7333 __ j(not_zero, &non_smi_result); | 7419 __ j(not_zero, &call_runtime); |
| 7334 } else { | 7420 } else { |
| 7335 // Check if result fits in a smi. | 7421 // Check if result fits in a smi. |
| 7336 __ cmp(eax, 0xc0000000); | 7422 __ cmp(eax, 0xc0000000); |
| 7337 __ j(negative, &non_smi_result); | 7423 __ j(negative, &non_smi_result); |
| 7338 } | 7424 } |
| 7339 // Tag smi result and return. | 7425 // Tag smi result and return. |
| 7340 __ SmiTag(eax); | 7426 __ SmiTag(eax); |
| 7341 GenerateReturn(masm); | 7427 GenerateReturn(masm); |
| 7342 | 7428 |
| 7343 // All ops except SHR return a signed int32 that we load in a HeapNumber. | 7429 // All ops except SHR return a signed int32 that we load in a HeapNumber. |
| 7344 if (op_ != Token::SHR) { | 7430 if (op_ != Token::SHR) { |
| 7345 __ bind(&non_smi_result); | 7431 __ bind(&non_smi_result); |
| 7346 // Allocate a heap number if needed. | 7432 // Allocate a heap number if needed. |
| 7347 __ mov(ebx, Operand(eax)); // ebx: result | 7433 __ mov(ebx, Operand(eax)); // ebx: result |
| 7434 Label skip_allocation; | |
| 7348 switch (mode_) { | 7435 switch (mode_) { |
| 7349 case OVERWRITE_LEFT: | 7436 case OVERWRITE_LEFT: |
| 7350 case OVERWRITE_RIGHT: | 7437 case OVERWRITE_RIGHT: |
| 7351 // If the operand was an object, we skip the | 7438 // If the operand was an object, we skip the |
| 7352 // allocation of a heap number. | 7439 // allocation of a heap number. |
| 7353 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? | 7440 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
| 7354 1 * kPointerSize : 2 * kPointerSize)); | 7441 1 * kPointerSize : 2 * kPointerSize)); |
| 7355 __ test(eax, Immediate(kSmiTagMask)); | 7442 __ test(eax, Immediate(kSmiTagMask)); |
| 7356 __ j(not_zero, &skip_allocation, not_taken); | 7443 __ j(not_zero, &skip_allocation, not_taken); |
| 7357 // Fall through! | 7444 // Fall through! |
| 7358 case NO_OVERWRITE: | 7445 case NO_OVERWRITE: |
| 7359 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); | 7446 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
| 7360 __ bind(&skip_allocation); | 7447 __ bind(&skip_allocation); |
| 7361 break; | 7448 break; |
| 7362 default: UNREACHABLE(); | 7449 default: UNREACHABLE(); |
| 7363 } | 7450 } |
| 7364 // Store the result in the HeapNumber and return. | 7451 // Store the result in the HeapNumber and return. |
| 7365 if (CpuFeatures::IsSupported(SSE2)) { | 7452 if (CpuFeatures::IsSupported(SSE2)) { |
| 7366 CpuFeatures::Scope use_sse2(SSE2); | 7453 CpuFeatures::Scope use_sse2(SSE2); |
| 7367 __ cvtsi2sd(xmm0, Operand(ebx)); | 7454 __ cvtsi2sd(xmm0, Operand(ebx)); |
| 7368 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); | 7455 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| 7369 } else { | 7456 } else { |
| 7370 __ mov(Operand(esp, 1 * kPointerSize), ebx); | 7457 __ mov(Operand(esp, 1 * kPointerSize), ebx); |
| 7371 __ fild_s(Operand(esp, 1 * kPointerSize)); | 7458 __ fild_s(Operand(esp, 1 * kPointerSize)); |
| 7372 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 7459 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| 7373 } | 7460 } |
| 7374 GenerateReturn(masm); | 7461 GenerateReturn(masm); |
| 7375 } | 7462 } |
| 7376 | |
| 7377 // Go to runtime for non-number inputs. | |
| 7378 __ bind(&operand_conversion_failure); | |
| 7379 // SHR should return uint32 - go to runtime for non-smi/negative result. | |
| 7380 if (op_ == Token::SHR) { | |
| 7381 __ bind(&non_smi_result); | |
| 7382 } | |
| 7383 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
| 7384 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
| 7385 break; | 7463 break; |
| 7386 } | 7464 } |
| 7387 default: UNREACHABLE(); break; | 7465 default: UNREACHABLE(); break; |
| 7388 } | 7466 } |
| 7389 | 7467 |
| 7390 // If all else fails, use the runtime system to get the correct | 7468 // If all else fails, use the runtime system to get the correct |
| 7391 // result. If arguments was passed in registers now place them on the | 7469 // result. If arguments was passed in registers now place them on the |
| 7392 // stack in the correct order below the return address. | 7470 // stack in the correct order below the return address. |
| 7393 __ bind(&call_runtime); | 7471 __ bind(&call_runtime); |
| 7394 if (HasArgumentsInRegisters()) { | 7472 if (HasArgumentsInRegisters()) { |
| 7395 __ pop(ecx); | 7473 __ pop(ecx); |
| 7396 if (HasArgumentsReversed()) { | 7474 if (HasArgumentsReversed()) { |
| 7397 __ push(eax); | 7475 __ push(eax); |
| 7398 __ push(edx); | 7476 __ push(edx); |
| 7399 } else { | 7477 } else { |
| 7400 __ push(edx); | 7478 __ push(edx); |
| 7401 __ push(eax); | 7479 __ push(eax); |
| 7402 } | 7480 } |
| 7403 __ push(ecx); | 7481 __ push(ecx); |
| 7404 } | 7482 } |
| 7405 switch (op_) { | 7483 switch (op_) { |
| 7406 case Token::ADD: { | 7484 case Token::ADD: { |
| 7407 // Test for string arguments before calling runtime. | 7485 // Test for string arguments before calling runtime. |
| 7408 Label not_strings, not_string1, string1; | 7486 Label not_strings, not_string1, string1; |
| 7409 Result answer; | 7487 Result answer; |
| 7410 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. | 7488 __ test(edx, Immediate(kSmiTagMask)); |
| 7411 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. | |
| 7412 __ test(eax, Immediate(kSmiTagMask)); | |
| 7413 __ j(zero, ¬_string1); | 7489 __ j(zero, ¬_string1); |
| 7414 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax); | 7490 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx); |
| 7415 __ j(above_equal, ¬_string1); | 7491 __ j(above_equal, ¬_string1); |
| 7416 | 7492 |
| 7417 // First argument is a a string, test second. | 7493 // First argument is a string, test second. |
| 7418 __ test(edx, Immediate(kSmiTagMask)); | 7494 __ test(eax, Immediate(kSmiTagMask)); |
| 7419 __ j(zero, &string1); | 7495 __ j(zero, &string1); |
| 7420 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx); | 7496 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx); |
| 7421 __ j(above_equal, &string1); | 7497 __ j(above_equal, &string1); |
| 7422 | 7498 |
| 7423 // First and second argument are strings. Jump to the string add stub. | 7499 // First and second argument are strings. Jump to the string add stub. |
| 7424 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 7500 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| 7425 __ TailCallStub(&stub); | 7501 __ TailCallStub(&stub); |
| 7426 | 7502 |
| 7427 // Only first argument is a string. | 7503 // Only first argument is a string. |
| 7428 __ bind(&string1); | 7504 __ bind(&string1); |
| 7429 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); | 7505 __ InvokeBuiltin( |
| 7506 HasArgumentsReversed() ? | |
|
Mads Ager (chromium)
2010/01/25 15:44:03
Indent by one more space. Looks like 3-space inden
Vladislav Kaznacheev
2010/01/25 16:24:47
Done.
| |
| 7507 Builtins::STRING_ADD_RIGHT : | |
| 7508 Builtins::STRING_ADD_LEFT, | |
| 7509 JUMP_FUNCTION); | |
| 7430 | 7510 |
| 7431 // First argument was not a string, test second. | 7511 // First argument was not a string, test second. |
| 7432 __ bind(¬_string1); | 7512 __ bind(¬_string1); |
| 7433 __ test(edx, Immediate(kSmiTagMask)); | 7513 __ test(eax, Immediate(kSmiTagMask)); |
| 7434 __ j(zero, ¬_strings); | 7514 __ j(zero, ¬_strings); |
| 7435 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx); | 7515 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx); |
| 7436 __ j(above_equal, ¬_strings); | 7516 __ j(above_equal, ¬_strings); |
| 7437 | 7517 |
| 7438 // Only second argument is a string. | 7518 // Only second argument is a string. |
| 7439 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); | 7519 __ InvokeBuiltin( |
| 7520 HasArgumentsReversed() ? | |
|
Mads Ager (chromium)
2010/01/25 15:44:03
Indent by one more space.
Vladislav Kaznacheev
2010/01/25 16:24:47
Done.
| |
| 7521 Builtins::STRING_ADD_LEFT : | |
| 7522 Builtins::STRING_ADD_RIGHT, | |
| 7523 JUMP_FUNCTION); | |
| 7440 | 7524 |
| 7441 __ bind(¬_strings); | 7525 __ bind(¬_strings); |
| 7442 // Neither argument is a string. | 7526 // Neither argument is a string. |
| 7443 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | 7527 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 7444 break; | 7528 break; |
| 7445 } | 7529 } |
| 7446 case Token::SUB: | 7530 case Token::SUB: |
| 7447 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | 7531 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 7448 break; | 7532 break; |
| 7449 case Token::MUL: | 7533 case Token::MUL: |
| 7450 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | 7534 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| 7451 break; | 7535 break; |
| 7452 case Token::DIV: | 7536 case Token::DIV: |
| 7453 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | 7537 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| 7454 break; | 7538 break; |
| 7455 case Token::MOD: | 7539 case Token::MOD: |
| 7456 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | 7540 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| 7457 break; | 7541 break; |
| 7458 case Token::BIT_OR: | 7542 case Token::BIT_OR: |
| 7459 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | 7543 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
| 7460 break; | 7544 break; |
| 7461 case Token::BIT_AND: | 7545 case Token::BIT_AND: |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 7472 break; | 7556 break; |
| 7473 case Token::SHR: | 7557 case Token::SHR: |
| 7474 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | 7558 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
| 7475 break; | 7559 break; |
| 7476 default: | 7560 default: |
| 7477 UNREACHABLE(); | 7561 UNREACHABLE(); |
| 7478 } | 7562 } |
| 7479 } | 7563 } |
| 7480 | 7564 |
| 7481 | 7565 |
| 7566 void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, | |
| 7567 Label* alloc_failure) { | |
| 7568 Label skip_allocation; | |
| 7569 OverwriteMode mode = mode_; | |
| 7570 if (HasArgumentsReversed()) { | |
| 7571 if (mode == OVERWRITE_RIGHT) | |
|
Mads Ager (chromium)
2010/01/25 15:44:03
Please uses '{' and '}' when having multi-line if
Vladislav Kaznacheev
2010/01/25 16:24:47
Done.
| |
| 7572 mode = OVERWRITE_LEFT; | |
| 7573 else if (mode == OVERWRITE_LEFT) | |
| 7574 mode = OVERWRITE_RIGHT; | |
| 7575 } | |
| 7576 switch (mode) { | |
| 7577 case OVERWRITE_LEFT: { | |
| 7578 // If the argument in edx is already an object, we skip the | |
| 7579 // allocation of a heap number. | |
| 7580 __ test(edx, Immediate(kSmiTagMask)); | |
| 7581 __ j(not_zero, &skip_allocation, not_taken); | |
| 7582 // Allocate a heap number for the result. Keep eax and edx intact | |
| 7583 // for the possible runtime call. | |
| 7584 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); | |
| 7585 // Now edx can be overwritten losing one of the arguments as we are | |
| 7586 // now done and will not need it any more. | |
| 7587 __ mov(edx, Operand(ebx)); | |
| 7588 __ bind(&skip_allocation); | |
| 7589 // Use object in edx as a result holder | |
| 7590 __ mov(eax, Operand(edx)); | |
| 7591 break; | |
| 7592 } | |
| 7593 case OVERWRITE_RIGHT: | |
| 7594 // If the argument in eax is already an object, we skip the | |
| 7595 // allocation of a heap number. | |
| 7596 __ test(eax, Immediate(kSmiTagMask)); | |
| 7597 __ j(not_zero, &skip_allocation, not_taken); | |
| 7598 // Fall through! | |
| 7599 case NO_OVERWRITE: | |
| 7600 // Allocate a heap number for the result. Keep eax and edx intact | |
| 7601 // for the possible runtime call. | |
| 7602 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); | |
| 7603 // Now eax can be overwritten losing one of the arguments as we are | |
| 7604 // now done and will not need it any more. | |
| 7605 __ mov(eax, ebx); | |
| 7606 __ bind(&skip_allocation); | |
| 7607 break; | |
| 7608 default: UNREACHABLE(); | |
| 7609 } | |
| 7610 } | |
| 7611 | |
| 7612 | |
| 7482 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { | 7613 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
| 7483 // If arguments are not passed in registers read them from the stack. | 7614 // If arguments are not passed in registers read them from the stack. |
| 7484 if (!HasArgumentsInRegisters()) { | 7615 if (!HasArgumentsInRegisters()) { |
| 7485 __ mov(eax, Operand(esp, 1 * kPointerSize)); | 7616 __ mov(eax, Operand(esp, 1 * kPointerSize)); |
| 7486 __ mov(edx, Operand(esp, 2 * kPointerSize)); | 7617 __ mov(edx, Operand(esp, 2 * kPointerSize)); |
| 7487 } | 7618 } |
| 7488 } | 7619 } |
| 7489 | 7620 |
| 7490 | 7621 |
| 7491 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { | 7622 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7734 __ SmiUntag(eax); // Untag smi before converting to float. | 7865 __ SmiUntag(eax); // Untag smi before converting to float. |
| 7735 __ cvtsi2sd(xmm1, Operand(eax)); | 7866 __ cvtsi2sd(xmm1, Operand(eax)); |
| 7736 __ SmiTag(eax); // Retag smi for heap number overwriting test. | 7867 __ SmiTag(eax); // Retag smi for heap number overwriting test. |
| 7737 __ jmp(&done); | 7868 __ jmp(&done); |
| 7738 __ bind(&load_float_eax); | 7869 __ bind(&load_float_eax); |
| 7739 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | 7870 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
| 7740 __ bind(&done); | 7871 __ bind(&done); |
| 7741 } | 7872 } |
| 7742 | 7873 |
| 7743 | 7874 |
| 7875 void FloatingPointHelper::LoadSse2Smis(MacroAssembler* masm, | |
| 7876 Register scratch, | |
| 7877 ArgLocation arg_location) { | |
| 7878 if (arg_location == ARGS_IN_REGISTERS) { | |
| 7879 __ mov(scratch, eax); | |
| 7880 } else { | |
| 7881 __ mov(scratch, Operand(esp, 2 * kPointerSize)); | |
| 7882 } | |
| 7883 __ SmiUntag(scratch); // Untag smi before converting to float. | |
| 7884 __ cvtsi2sd(xmm0, Operand(scratch)); | |
| 7885 | |
| 7886 | |
| 7887 if (arg_location == ARGS_IN_REGISTERS) { | |
| 7888 __ mov(scratch, ebx); | |
| 7889 } else { | |
| 7890 __ mov(scratch, Operand(esp, 1 * kPointerSize)); | |
| 7891 } | |
| 7892 __ SmiUntag(scratch); // Untag smi before converting to float. | |
| 7893 __ cvtsi2sd(xmm1, Operand(scratch)); | |
| 7894 } | |
| 7895 | |
| 7896 | |
| 7744 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | 7897 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
| 7745 Register scratch) { | 7898 Register scratch, |
| 7899 ArgLocation arg_location) { | |
| 7746 Label load_smi_1, load_smi_2, done_load_1, done; | 7900 Label load_smi_1, load_smi_2, done_load_1, done; |
| 7747 __ mov(scratch, Operand(esp, 2 * kPointerSize)); | 7901 if (arg_location == ARGS_IN_REGISTERS) { |
| 7902 __ mov(scratch, edx); | |
| 7903 } else { | |
| 7904 __ mov(scratch, Operand(esp, 2 * kPointerSize)); | |
| 7905 } | |
| 7748 __ test(scratch, Immediate(kSmiTagMask)); | 7906 __ test(scratch, Immediate(kSmiTagMask)); |
| 7749 __ j(zero, &load_smi_1, not_taken); | 7907 __ j(zero, &load_smi_1, not_taken); |
| 7750 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); | 7908 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); |
| 7751 __ bind(&done_load_1); | 7909 __ bind(&done_load_1); |
| 7752 | 7910 |
| 7753 __ mov(scratch, Operand(esp, 1 * kPointerSize)); | 7911 if (arg_location == ARGS_IN_REGISTERS) { |
| 7912 __ mov(scratch, eax); | |
| 7913 } else { | |
| 7914 __ mov(scratch, Operand(esp, 1 * kPointerSize)); | |
| 7915 } | |
| 7754 __ test(scratch, Immediate(kSmiTagMask)); | 7916 __ test(scratch, Immediate(kSmiTagMask)); |
| 7755 __ j(zero, &load_smi_2, not_taken); | 7917 __ j(zero, &load_smi_2, not_taken); |
| 7756 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); | 7918 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); |
| 7757 __ jmp(&done); | 7919 __ jmp(&done); |
| 7758 | 7920 |
| 7759 __ bind(&load_smi_1); | 7921 __ bind(&load_smi_1); |
| 7760 __ SmiUntag(scratch); | 7922 __ SmiUntag(scratch); |
| 7761 __ push(scratch); | 7923 __ push(scratch); |
| 7762 __ fild_s(Operand(esp, 0)); | 7924 __ fild_s(Operand(esp, 0)); |
| 7763 __ pop(scratch); | 7925 __ pop(scratch); |
| 7764 __ jmp(&done_load_1); | 7926 __ jmp(&done_load_1); |
| 7765 | 7927 |
| 7766 __ bind(&load_smi_2); | 7928 __ bind(&load_smi_2); |
| 7767 __ SmiUntag(scratch); | 7929 __ SmiUntag(scratch); |
| 7768 __ push(scratch); | 7930 __ push(scratch); |
| 7769 __ fild_s(Operand(esp, 0)); | 7931 __ fild_s(Operand(esp, 0)); |
| 7770 __ pop(scratch); | 7932 __ pop(scratch); |
| 7771 | 7933 |
| 7772 __ bind(&done); | 7934 __ bind(&done); |
| 7773 } | 7935 } |
| 7774 | 7936 |
| 7775 | 7937 |
| 7938 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, | |
| 7939 Register scratch, | |
| 7940 ArgLocation arg_location) { | |
| 7941 if (arg_location == ARGS_IN_REGISTERS) { | |
| 7942 __ mov(scratch, eax); | |
| 7943 } else { | |
| 7944 __ mov(scratch, Operand(esp, 2 * kPointerSize)); | |
| 7945 } | |
| 7946 __ SmiUntag(scratch); | |
| 7947 __ push(scratch); | |
| 7948 __ fild_s(Operand(esp, 0)); | |
| 7949 __ pop(scratch); | |
| 7950 | |
| 7951 if (arg_location == ARGS_IN_REGISTERS) { | |
| 7952 __ mov(scratch, ebx); | |
| 7953 } else { | |
| 7954 __ mov(scratch, Operand(esp, 1 * kPointerSize)); | |
| 7955 } | |
| 7956 __ SmiUntag(scratch); | |
| 7957 __ push(scratch); | |
| 7958 __ fild_s(Operand(esp, 0)); | |
| 7959 __ pop(scratch); | |
| 7960 } | |
| 7961 | |
| 7962 | |
| 7776 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, | 7963 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
| 7777 Label* non_float, | 7964 Label* non_float, |
| 7778 Register scratch) { | 7965 Register scratch) { |
| 7779 Label test_other, done; | 7966 Label test_other, done; |
| 7780 // Test if both operands are floats or smi -> scratch=k_is_float; | 7967 // Test if both operands are floats or smi -> scratch=k_is_float; |
| 7781 // Otherwise scratch = k_not_float. | 7968 // Otherwise scratch = k_not_float. |
| 7782 __ test(edx, Immediate(kSmiTagMask)); | 7969 __ test(edx, Immediate(kSmiTagMask)); |
| 7783 __ j(zero, &test_other, not_taken); // argument in edx is OK | 7970 __ j(zero, &test_other, not_taken); // argument in edx is OK |
| 7784 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); | 7971 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); |
| 7785 __ cmp(scratch, Factory::heap_number_map()); | 7972 __ cmp(scratch, Factory::heap_number_map()); |
| (...skipping 2007 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 9793 | 9980 |
| 9794 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 9981 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
| 9795 // tagged as a small integer. | 9982 // tagged as a small integer. |
| 9796 __ bind(&runtime); | 9983 __ bind(&runtime); |
| 9797 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); | 9984 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); |
| 9798 } | 9985 } |
| 9799 | 9986 |
| 9800 #undef __ | 9987 #undef __ |
| 9801 | 9988 |
| 9802 } } // namespace v8::internal | 9989 } } // namespace v8::internal |
| OLD | NEW |