Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 556019: Refactoring and small optimization of the smi code for binary op stubs... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 745 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 static void LoadFloatOperand(MacroAssembler* masm, Register number); 756 static void LoadFloatOperand(MacroAssembler* masm, Register number);
757 // Code pattern for loading floating point values. Input values must 757 // Code pattern for loading floating point values. Input values must
758 // be either smi or heap number objects (fp values). Requirements: 758 // be either smi or heap number objects (fp values). Requirements:
759 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. 759 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
760 // Returns operands as floating point numbers on FPU stack. 760 // Returns operands as floating point numbers on FPU stack.
761 static void LoadFloatOperands(MacroAssembler* masm, 761 static void LoadFloatOperands(MacroAssembler* masm,
762 Register scratch, 762 Register scratch,
763 ArgLocation arg_location = ARGS_ON_STACK); 763 ArgLocation arg_location = ARGS_ON_STACK);
764 764
765 // Similar to LoadFloatOperand but assumes that both operands are smis. 765 // Similar to LoadFloatOperand but assumes that both operands are smis.
766 // Accepts operands in eax, ebx. 766 // Expects operands in edx, eax.
767 static void LoadFloatSmis(MacroAssembler* masm, Register scratch); 767 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
768 768
769 // Test if operands are smi or number objects (fp). Requirements: 769 // Test if operands are smi or number objects (fp). Requirements:
770 // operand_1 in eax, operand_2 in edx; falls through on float 770 // operand_1 in eax, operand_2 in edx; falls through on float
771 // operands, jumps to the non_float label otherwise. 771 // operands, jumps to the non_float label otherwise.
772 static void CheckFloatOperands(MacroAssembler* masm, 772 static void CheckFloatOperands(MacroAssembler* masm,
773 Label* non_float, 773 Label* non_float,
774 Register scratch); 774 Register scratch);
775 // Takes the operands in edx and eax and loads them as integers in eax 775 // Takes the operands in edx and eax and loads them as integers in eax
776 // and ecx. 776 // and ecx.
777 static void LoadAsIntegers(MacroAssembler* masm, 777 static void LoadAsIntegers(MacroAssembler* masm,
778 bool use_sse3, 778 bool use_sse3,
779 Label* operand_conversion_failure); 779 Label* operand_conversion_failure);
780 // Test if operands are numbers (smi or HeapNumber objects), and load 780 // Test if operands are numbers (smi or HeapNumber objects), and load
781 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if 781 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
782 // either operand is not a number. Operands are in edx and eax. 782 // either operand is not a number. Operands are in edx and eax.
783 // Leaves operands unchanged. 783 // Leaves operands unchanged.
784 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); 784 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
785 785
786 // Similar to LoadSse2Operands but assumes that both operands are smis. 786 // Similar to LoadSSE2Operands but assumes that both operands are smis.
787 // Accepts operands in eax, ebx. 787 // Expects operands in edx, eax.
788 static void LoadSse2Smis(MacroAssembler* masm, Register scratch); 788 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
789 }; 789 };
790 790
791 791
792 const char* GenericBinaryOpStub::GetName() { 792 const char* GenericBinaryOpStub::GetName() {
793 if (name_ != NULL) return name_; 793 if (name_ != NULL) return name_;
794 const int kMaxNameLength = 100; 794 const int kMaxNameLength = 100;
795 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); 795 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
796 if (name_ == NULL) return "OOM"; 796 if (name_ == NULL) return "OOM";
797 const char* op_name = Token::Name(op_); 797 const char* op_name = Token::Name(op_);
798 const char* overwrite_name; 798 const char* overwrite_name;
(...skipping 6278 matching lines...) Expand 10 before | Expand all | Expand 10 after
7077 return frame->CallStub(this, left, right); 7077 return frame->CallStub(this, left, right);
7078 } else { 7078 } else {
7079 frame->Push(left); 7079 frame->Push(left);
7080 frame->Push(right); 7080 frame->Push(right);
7081 return frame->CallStub(this, 2); 7081 return frame->CallStub(this, 2);
7082 } 7082 }
7083 } 7083 }
7084 7084
7085 7085
7086 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 7086 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7087 if (HasArgsInRegisters()) { 7087 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
7088 __ mov(ebx, eax); 7088 // dividend in eax and edx free for the division. Use eax, ebx for those.
7089 __ mov(eax, edx); 7089 Comment load_comment(masm, "-- Load arguments");
7090 } else { 7090 Register left = edx;
7091 __ mov(ebx, Operand(esp, 1 * kPointerSize)); 7091 Register right = eax;
7092 __ mov(eax, Operand(esp, 2 * kPointerSize)); 7092 if (op_ == Token::DIV || op_ == Token::MOD) {
7093 left = eax;
7094 right = ebx;
7095 if (HasArgsInRegisters()) {
7096 __ mov(ebx, eax);
7097 __ mov(eax, edx);
7098 }
7099 }
7100 if (!HasArgsInRegisters()) {
7101 __ mov(right, Operand(esp, 1 * kPointerSize));
7102 __ mov(left, Operand(esp, 2 * kPointerSize));
7093 } 7103 }
7094 7104
7095 Label not_smis, not_smis_or_overflow, not_smis_undo_optimistic; 7105 // 2. Prepare the smi check of both operands by oring them together.
7096 Label use_fp_on_smis, done; 7106 Comment smi_check_comment(masm, "-- Smi check arguments");
7107 Label not_smis;
7108 Register combined = ecx;
7109 ASSERT(!left.is(combined) && !right.is(combined));
7110 switch (op_) {
7111 case Token::BIT_OR:
7112 // Perform the operation into eax and smi check the result. Preserve
7113 // eax in case the result is not a smi.
7114 ASSERT(!left.is(ecx) && !right.is(ecx));
7115 __ mov(ecx, right);
7116 __ or_(right, Operand(left)); // Bitwise or is commutative.
7117 combined = right;
7118 break;
7097 7119
7098 // Perform fast-case smi code for the operation (eax <op> ebx) and 7120 case Token::BIT_XOR:
7099 // leave result in register eax. 7121 case Token::BIT_AND:
7122 case Token::ADD:
7123 case Token::SUB:
7124 case Token::MUL:
7125 case Token::DIV:
7126 case Token::MOD:
7127 __ mov(combined, right);
7128 __ or_(combined, Operand(left));
7129 break;
7100 7130
7101 // Prepare the smi check of both operands by or'ing them together 7131 case Token::SHL:
7102 // before checking against the smi mask. 7132 case Token::SAR:
7103 __ mov(ecx, Operand(ebx)); 7133 case Token::SHR:
7104 __ or_(ecx, Operand(eax)); 7134 // Move the right operand into ecx for the shift operation, use eax
7135 // for the smi check register.
7136 ASSERT(!left.is(ecx) && !right.is(ecx));
7137 __ mov(ecx, right);
7138 __ or_(right, Operand(left));
7139 combined = right;
7140 break;
7105 7141
7142 default:
7143 break;
7144 }
7145
7146 // 3. Perform the smi check of the operands.
7147 ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
7148 __ test(combined, Immediate(kSmiTagMask));
7149 __ j(not_zero, &not_smis, not_taken);
7150
7151 // 4. Operands are both smis, perform the operation leaving the result in
7152 // eax and check the result if necessary.
7153 Comment perform_smi(masm, "-- Perform smi operation");
7154 Label use_fp_on_smis;
7106 switch (op_) { 7155 switch (op_) {
7156 case Token::BIT_OR:
7157 // Nothing to do.
7158 break;
7159
7160 case Token::BIT_XOR:
7161 ASSERT(right.is(eax));
7162 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
7163 break;
7164
7165 case Token::BIT_AND:
7166 ASSERT(right.is(eax));
7167 __ and_(right, Operand(left)); // Bitwise and is commutative.
7168 break;
7169
7170 case Token::SHL:
7171 // Remove tags from operands (but keep sign).
7172 __ SmiUntag(left);
7173 __ SmiUntag(ecx);
7174 // Perform the operation.
7175 __ shl_cl(left);
7176 // Check that the *signed* result fits in a smi.
7177 __ cmp(left, 0xc0000000);
7178 __ j(sign, &use_fp_on_smis, not_taken);
7179 // Tag the result and store it in register eax.
7180 __ SmiTag(left);
7181 __ mov(eax, left);
7182 break;
7183
7184 case Token::SAR:
7185 // Remove tags from operands (but keep sign).
7186 __ SmiUntag(left);
7187 __ SmiUntag(ecx);
7188 // Perform the operation.
7189 __ sar_cl(left);
7190 // Tag the result and store it in register eax.
7191 __ SmiTag(left);
7192 __ mov(eax, left);
7193 break;
7194
7195 case Token::SHR:
7196 // Remove tags from operands (but keep sign).
7197 __ SmiUntag(left);
7198 __ SmiUntag(ecx);
7199 // Perform the operation.
7200 __ shr_cl(left);
7201 // Check that the *unsigned* result fits in a smi.
7202 // Neither of the two high-order bits can be set:
7203 // - 0x80000000: high bit would be lost when smi tagging.
7204 // - 0x40000000: this number would convert to negative when
7205 // Smi tagging these two cases can only happen with shifts
7206 // by 0 or 1 when handed a valid smi.
7207 __ test(left, Immediate(0xc0000000));
7208 __ j(not_zero, slow, not_taken);
7209 // Tag the result and store it in register eax.
7210 __ SmiTag(left);
7211 __ mov(eax, left);
7212 break;
7213
7107 case Token::ADD: 7214 case Token::ADD:
7108 __ add(eax, Operand(ebx)); // add optimistically 7215 ASSERT(right.is(eax));
7109 __ j(overflow, &not_smis_or_overflow, not_taken); 7216 __ add(right, Operand(left)); // Addition is commutative.
7217 __ j(overflow, &use_fp_on_smis, not_taken);
7110 break; 7218 break;
7111 7219
7112 case Token::SUB: 7220 case Token::SUB:
7113 __ sub(eax, Operand(ebx)); // subtract optimistically 7221 __ sub(left, Operand(right));
7114 __ j(overflow, &not_smis_or_overflow, not_taken); 7222 __ j(overflow, &use_fp_on_smis, not_taken);
7115 break; 7223 __ mov(eax, left);
7116
7117 case Token::MUL:
7118 __ mov(edi, eax); // Backup the left operand.
7119 break;
7120
7121 case Token::DIV:
7122 __ mov(edi, eax); // Backup the left operand.
7123 // Fall through.
7124 case Token::MOD:
7125 // Sign extend eax into edx:eax.
7126 __ cdq();
7127 // Check for 0 divisor.
7128 __ test(ebx, Operand(ebx));
7129 __ j(zero, &not_smis_or_overflow, not_taken);
7130 break;
7131
7132 default:
7133 // Fall-through to smi check.
7134 break;
7135 }
7136
7137 // Perform the actual smi check.
7138 ASSERT(kSmiTag == 0); // adjust zero check if not the case
7139 __ test(ecx, Immediate(kSmiTagMask));
7140 __ j(not_zero, &not_smis_undo_optimistic, not_taken);
7141
7142 switch (op_) {
7143 case Token::ADD:
7144 case Token::SUB:
7145 // Do nothing here.
7146 break; 7224 break;
7147 7225
7148 case Token::MUL: 7226 case Token::MUL:
7149 // If the smi tag is 0 we can just leave the tag on one operand. 7227 // If the smi tag is 0 we can just leave the tag on one operand.
7150 ASSERT(kSmiTag == 0); // adjust code below if not the case 7228 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
7229 // We can't revert the multiplication if the result is not a smi
7230 // so save the right operand.
7231 __ mov(ebx, right);
7151 // Remove tag from one of the operands (but keep sign). 7232 // Remove tag from one of the operands (but keep sign).
7152 __ SmiUntag(eax); 7233 __ SmiUntag(right);
7153 // Do multiplication. 7234 // Do multiplication.
7154 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax 7235 __ imul(right, Operand(left)); // Multiplication is commutative.
7155 // Go slow on overflows.
7156 __ j(overflow, &use_fp_on_smis, not_taken); 7236 __ j(overflow, &use_fp_on_smis, not_taken);
7157 // Check for negative zero result. 7237 // Check for negative zero result. Use combined = left | right.
7158 __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y 7238 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
7159 break; 7239 break;
7160 7240
7161 case Token::DIV: 7241 case Token::DIV:
7162 // Divide edx:eax by ebx. 7242 // We can't revert the division if the result is not a smi so
7163 __ idiv(ebx); 7243 // save the left operand.
7164 // Check for the corner case of dividing the most negative smi 7244 __ mov(edi, left);
7165 // by -1. We cannot use the overflow flag, since it is not set 7245 // Check for 0 divisor.
7166 // by idiv instruction. 7246 __ test(right, Operand(right));
7247 __ j(zero, &use_fp_on_smis, not_taken);
7248 // Sign extend left into edx:eax.
7249 ASSERT(left.is(eax));
7250 __ cdq();
7251 // Divide edx:eax by right.
7252 __ idiv(right);
7253 // Check for the corner case of dividing the most negative smi by
7254 // -1. We cannot use the overflow flag, since it is not set by idiv
7255 // instruction.
7167 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 7256 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
7168 __ cmp(eax, 0x40000000); 7257 __ cmp(eax, 0x40000000);
7169 __ j(equal, &use_fp_on_smis); 7258 __ j(equal, &use_fp_on_smis);
7170 // Check for negative zero result. 7259 // Check for negative zero result. Use combined = left | right.
7171 __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y 7260 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
7172 // Check that the remainder is zero. 7261 // Check that the remainder is zero.
7173 __ test(edx, Operand(edx)); 7262 __ test(edx, Operand(edx));
7174 __ j(not_zero, &use_fp_on_smis); 7263 __ j(not_zero, &use_fp_on_smis);
7175 // Tag the result and store it in register eax. 7264 // Tag the result and store it in register eax.
7176 __ SmiTag(eax); 7265 __ SmiTag(eax);
7177 break; 7266 break;
7178 7267
7179 case Token::MOD: 7268 case Token::MOD:
7180 // Divide edx:eax by ebx. 7269 // Check for 0 divisor.
7181 __ idiv(ebx); 7270 __ test(right, Operand(right));
7182 // Check for negative zero result. 7271 __ j(zero, &not_smis, not_taken);
7183 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y 7272
7273 // Sign extend left into edx:eax.
7274 ASSERT(left.is(eax));
7275 __ cdq();
7276 // Divide edx:eax by right.
7277 __ idiv(right);
7278 // Check for negative zero result. Use combined = left | right.
7279 __ NegativeZeroTest(edx, combined, slow);
7184 // Move remainder to register eax. 7280 // Move remainder to register eax.
7185 __ mov(eax, Operand(edx)); 7281 __ mov(eax, edx);
7186 break;
7187
7188 case Token::BIT_OR:
7189 __ or_(eax, Operand(ebx));
7190 break;
7191
7192 case Token::BIT_AND:
7193 __ and_(eax, Operand(ebx));
7194 break;
7195
7196 case Token::BIT_XOR:
7197 __ xor_(eax, Operand(ebx));
7198 break;
7199
7200 case Token::SHL:
7201 case Token::SHR:
7202 case Token::SAR:
7203 // Move the second operand into register ecx.
7204 __ mov(ecx, Operand(ebx));
7205 // Remove tags from operands (but keep sign).
7206 __ SmiUntag(eax);
7207 __ SmiUntag(ecx);
7208 // Perform the operation.
7209 switch (op_) {
7210 case Token::SAR:
7211 __ sar_cl(eax);
7212 // No checks of result necessary
7213 break;
7214 case Token::SHR:
7215 __ shr_cl(eax);
7216 // Check that the *unsigned* result fits in a smi.
7217 // Neither of the two high-order bits can be set:
7218 // - 0x80000000: high bit would be lost when smi tagging.
7219 // - 0x40000000: this number would convert to negative when
7220 // Smi tagging these two cases can only happen with shifts
7221 // by 0 or 1 when handed a valid smi.
7222 __ test(eax, Immediate(0xc0000000));
7223 __ j(not_zero, slow, not_taken);
7224 break;
7225 case Token::SHL:
7226 __ shl_cl(eax);
7227 // Check that the *signed* result fits in a smi.
7228 __ cmp(eax, 0xc0000000);
7229 __ j(sign, &use_fp_on_smis, not_taken);
7230 break;
7231 default:
7232 UNREACHABLE();
7233 }
7234 // Tag the result and store it in register eax.
7235 __ SmiTag(eax);
7236 break; 7282 break;
7237 7283
7238 default: 7284 default:
7239 UNREACHABLE(); 7285 UNREACHABLE();
7240 break;
7241 } 7286 }
7287
7288 // 5. Emit return of result in eax.
7242 GenerateReturn(masm); 7289 GenerateReturn(masm);
7243 7290
7244 __ bind(&not_smis_or_overflow); 7291 // 6. For some operations emit inline code to perform floating point
7245 // Revert optimistic operation. 7292 // operations on known smis (e.g., if the result of the operation
7293 // overflowed the smi range).
7246 switch (op_) { 7294 switch (op_) {
7247 case Token::ADD: __ sub(eax, Operand(ebx)); break; 7295 case Token::SHL: {
7248 case Token::SUB: __ add(eax, Operand(ebx)); break; 7296 Comment perform_float(masm, "-- Perform float operation on smis");
7249 default: break; 7297 __ bind(&use_fp_on_smis);
7250 } 7298 // Result we want is in left == edx, so we can put the allocated heap
7251 ASSERT(kSmiTag == 0); // Adjust zero check if not the case. 7299 // number in eax.
7252 __ test(ecx, Immediate(kSmiTagMask)); 7300 __ AllocateHeapNumber(eax, ecx, ebx, slow);
7253 __ j(not_zero, &not_smis, not_taken); 7301 // Store the result in the HeapNumber and return.
7254 // Correct operand values are in eax, ebx at this point.
7255
7256 __ bind(&use_fp_on_smis);
7257 // Both operands are known to be SMIs but the result does not fit into a SMI.
7258 switch (op_) {
7259 case Token::MUL:
7260 case Token::DIV:
7261 __ mov(eax, edi); // Restore the left operand.
7262 // Fall through.
7263 case Token::ADD:
7264 case Token::SUB: {
7265 Label after_alloc_failure;
7266 __ AllocateHeapNumber(edx, ecx, no_reg, &after_alloc_failure);
7267
7268 if (CpuFeatures::IsSupported(SSE2)) { 7302 if (CpuFeatures::IsSupported(SSE2)) {
7269 CpuFeatures::Scope use_sse2(SSE2); 7303 CpuFeatures::Scope use_sse2(SSE2);
7270 FloatingPointHelper::LoadSse2Smis(masm, ecx); 7304 __ cvtsi2sd(xmm0, Operand(left));
7305 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
7306 } else {
7307 // It's OK to overwrite the right argument on the stack because we
7308 // are about to return.
7309 __ mov(Operand(esp, 1 * kPointerSize), left);
7310 __ fild_s(Operand(esp, 1 * kPointerSize));
7311 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
7312 }
7313 GenerateReturn(masm);
7314 break;
7315 }
7316
7317 case Token::ADD:
7318 case Token::SUB:
7319 case Token::MUL:
7320 case Token::DIV: {
7321 Comment perform_float(masm, "-- Perform float operation on smis");
7322 __ bind(&use_fp_on_smis);
7323 // Restore arguments to edx, eax.
7324 switch (op_) {
7325 case Token::ADD:
7326 // Revert right = right + left.
7327 __ sub(right, Operand(left));
7328 break;
7329 case Token::SUB:
7330 // Revert left = left - right.
7331 __ add(left, Operand(right));
7332 break;
7333 case Token::MUL:
7334 // Right was clobbered but a copy is in ebx.
7335 __ mov(right, ebx);
7336 break;
7337 case Token::DIV:
7338 // Left was clobbered but a copy is in edi. Right is in ebx for
7339 // division.
7340 __ mov(edx, edi);
7341 __ mov(eax, right);
7342 break;
7343 default: UNREACHABLE();
7344 break;
7345 }
7346 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
7347 if (CpuFeatures::IsSupported(SSE2)) {
7348 CpuFeatures::Scope use_sse2(SSE2);
7349 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
7271 switch (op_) { 7350 switch (op_) {
7272 case Token::ADD: __ addsd(xmm0, xmm1); break; 7351 case Token::ADD: __ addsd(xmm0, xmm1); break;
7273 case Token::SUB: __ subsd(xmm0, xmm1); break; 7352 case Token::SUB: __ subsd(xmm0, xmm1); break;
7274 case Token::MUL: __ mulsd(xmm0, xmm1); break; 7353 case Token::MUL: __ mulsd(xmm0, xmm1); break;
7275 case Token::DIV: __ divsd(xmm0, xmm1); break; 7354 case Token::DIV: __ divsd(xmm0, xmm1); break;
7276 default: UNREACHABLE(); 7355 default: UNREACHABLE();
7277 } 7356 }
7278 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); 7357 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
7279 } else { // SSE2 not available, use FPU. 7358 } else { // SSE2 not available, use FPU.
7280 FloatingPointHelper::LoadFloatSmis(masm, ecx); 7359 FloatingPointHelper::LoadFloatSmis(masm, ebx);
7281 switch (op_) { 7360 switch (op_) {
7282 case Token::ADD: __ faddp(1); break; 7361 case Token::ADD: __ faddp(1); break;
7283 case Token::SUB: __ fsubp(1); break; 7362 case Token::SUB: __ fsubp(1); break;
7284 case Token::MUL: __ fmulp(1); break; 7363 case Token::MUL: __ fmulp(1); break;
7285 case Token::DIV: __ fdivp(1); break; 7364 case Token::DIV: __ fdivp(1); break;
7286 default: UNREACHABLE(); 7365 default: UNREACHABLE();
7287 } 7366 }
7288 __ fstp_d(FieldOperand(edx, HeapNumber::kValueOffset)); 7367 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
7289 } 7368 }
7290 __ mov(eax, edx); 7369 __ mov(eax, ecx);
7291 GenerateReturn(masm);
7292
7293 __ bind(&after_alloc_failure);
7294 __ mov(edx, eax);
7295 __ mov(eax, ebx);
7296 __ jmp(slow);
7297 break;
7298 }
7299
7300 case Token::BIT_OR:
7301 case Token::BIT_AND:
7302 case Token::BIT_XOR:
7303 case Token::SAR:
7304 // Do nothing here as these operations always succeed on a pair of smis.
7305 break;
7306
7307 case Token::MOD:
7308 case Token::SHR:
7309 // Do nothing here as these go directly to runtime.
7310 break;
7311
7312 case Token::SHL: {
7313 __ AllocateHeapNumber(ebx, ecx, edx, slow);
7314 // Store the result in the HeapNumber and return.
7315 if (CpuFeatures::IsSupported(SSE2)) {
7316 CpuFeatures::Scope use_sse2(SSE2);
7317 __ cvtsi2sd(xmm0, Operand(eax));
7318 __ movdbl(FieldOperand(ebx, HeapNumber::kValueOffset), xmm0);
7319 } else {
7320 __ mov(Operand(esp, 1 * kPointerSize), eax);
7321 __ fild_s(Operand(esp, 1 * kPointerSize));
7322 __ fstp_d(FieldOperand(ebx, HeapNumber::kValueOffset));
7323 }
7324 __ mov(eax, ebx);
7325 GenerateReturn(masm); 7370 GenerateReturn(masm);
7326 break; 7371 break;
7327 } 7372 }
7328 7373
7329 default: UNREACHABLE(); break; 7374 default:
7375 break;
7330 } 7376 }
7331 7377
7332 __ bind(&not_smis_undo_optimistic); 7378 // 7. Non-smi operands, fall out to the non-smi code with the operands in
7379 // edx and eax.
7380 Comment done_comment(masm, "-- Enter non-smi code");
7381 __ bind(&not_smis);
7333 switch (op_) { 7382 switch (op_) {
7334 case Token::ADD: __ sub(eax, Operand(ebx)); break; 7383 case Token::BIT_OR:
7335 case Token::SUB: __ add(eax, Operand(ebx)); break; 7384 case Token::SHL:
7336 default: break; 7385 case Token::SAR:
7386 case Token::SHR:
7387 // Right operand is saved in ecx and eax was destroyed by the smi
7388 // check.
7389 __ mov(eax, ecx);
7390 break;
7391
7392 case Token::DIV:
7393 case Token::MOD:
7394 // Operands are in eax, ebx at this point.
7395 __ mov(edx, eax);
7396 __ mov(eax, ebx);
7397 break;
7398
7399 default:
7400 break;
7337 } 7401 }
7338
7339 __ bind(&not_smis);
7340 __ mov(edx, eax);
7341 __ mov(eax, ebx);
7342
7343 __ bind(&done);
7344 } 7402 }
7345 7403
7346 7404
7347 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 7405 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7348 Label call_runtime; 7406 Label call_runtime;
7349 7407
7350 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); 7408 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
7351 7409
7352 // Generate fast case smi code if requested. This flag is set when the fast 7410 // Generate fast case smi code if requested. This flag is set when the fast
7353 // case smi code is not generated by the caller. Generating it here will speed 7411 // case smi code is not generated by the caller. Generating it here will speed
7354 // up common operations. 7412 // up common operations.
7355 if (HasSmiCodeInStub()) { 7413 if (HasSmiCodeInStub()) {
7356 GenerateSmiCode(masm, &call_runtime); 7414 GenerateSmiCode(masm, &call_runtime);
7357 } else if (op_ != Token::MOD) { // MOD goes straight to runtime. 7415 } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
7358 GenerateLoadArguments(masm); 7416 GenerateLoadArguments(masm);
7359 } 7417 }
7360 7418
7361 // Floating point case. 7419 // Floating point case.
7362 switch (op_) { 7420 switch (op_) {
7363 case Token::ADD: 7421 case Token::ADD:
7364 case Token::SUB: 7422 case Token::SUB:
7365 case Token::MUL: 7423 case Token::MUL:
7366 case Token::DIV: { 7424 case Token::DIV: {
7367 if (CpuFeatures::IsSupported(SSE2)) { 7425 if (CpuFeatures::IsSupported(SSE2)) {
7368 CpuFeatures::Scope use_sse2(SSE2); 7426 CpuFeatures::Scope use_sse2(SSE2);
7369 FloatingPointHelper::LoadSse2Operands(masm, &call_runtime); 7427 FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
7370 7428
7371 switch (op_) { 7429 switch (op_) {
7372 case Token::ADD: __ addsd(xmm0, xmm1); break; 7430 case Token::ADD: __ addsd(xmm0, xmm1); break;
7373 case Token::SUB: __ subsd(xmm0, xmm1); break; 7431 case Token::SUB: __ subsd(xmm0, xmm1); break;
7374 case Token::MUL: __ mulsd(xmm0, xmm1); break; 7432 case Token::MUL: __ mulsd(xmm0, xmm1); break;
7375 case Token::DIV: __ divsd(xmm0, xmm1); break; 7433 case Token::DIV: __ divsd(xmm0, xmm1); break;
7376 default: UNREACHABLE(); 7434 default: UNREACHABLE();
7377 } 7435 }
7378 GenerateHeapResultAllocation(masm, &call_runtime); 7436 GenerateHeapResultAllocation(masm, &call_runtime);
7379 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 7437 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after
7842 __ bind(&load_smi); 7900 __ bind(&load_smi);
7843 __ SmiUntag(number); 7901 __ SmiUntag(number);
7844 __ push(number); 7902 __ push(number);
7845 __ fild_s(Operand(esp, 0)); 7903 __ fild_s(Operand(esp, 0));
7846 __ pop(number); 7904 __ pop(number);
7847 7905
7848 __ bind(&done); 7906 __ bind(&done);
7849 } 7907 }
7850 7908
7851 7909
7852 void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm, 7910 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
7853 Label* not_numbers) { 7911 Label* not_numbers) {
7854 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; 7912 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
7855 // Load operand in edx into xmm0, or branch to not_numbers. 7913 // Load operand in edx into xmm0, or branch to not_numbers.
7856 __ test(edx, Immediate(kSmiTagMask)); 7914 __ test(edx, Immediate(kSmiTagMask));
7857 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. 7915 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
7858 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map()); 7916 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
7859 __ j(not_equal, not_numbers); // Argument in edx is not a number. 7917 __ j(not_equal, not_numbers); // Argument in edx is not a number.
7860 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 7918 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
7861 __ bind(&load_eax); 7919 __ bind(&load_eax);
7862 // Load operand in eax into xmm1, or branch to not_numbers. 7920 // Load operand in eax into xmm1, or branch to not_numbers.
(...skipping 11 matching lines...) Expand all
7874 __ SmiUntag(eax); // Untag smi before converting to float. 7932 __ SmiUntag(eax); // Untag smi before converting to float.
7875 __ cvtsi2sd(xmm1, Operand(eax)); 7933 __ cvtsi2sd(xmm1, Operand(eax));
7876 __ SmiTag(eax); // Retag smi for heap number overwriting test. 7934 __ SmiTag(eax); // Retag smi for heap number overwriting test.
7877 __ jmp(&done); 7935 __ jmp(&done);
7878 __ bind(&load_float_eax); 7936 __ bind(&load_float_eax);
7879 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 7937 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
7880 __ bind(&done); 7938 __ bind(&done);
7881 } 7939 }
7882 7940
7883 7941
7884 void FloatingPointHelper::LoadSse2Smis(MacroAssembler* masm, 7942 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
7885 Register scratch) { 7943 Register scratch) {
7886 __ mov(scratch, eax); 7944 const Register left = edx;
7887 __ SmiUntag(scratch); // Untag smi before converting to float. 7945 const Register right = eax;
7946 __ mov(scratch, left);
7947 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
7948 __ SmiUntag(scratch);
7888 __ cvtsi2sd(xmm0, Operand(scratch)); 7949 __ cvtsi2sd(xmm0, Operand(scratch));
7889 7950
7890 __ mov(scratch, ebx); 7951 __ mov(scratch, right);
7891 __ SmiUntag(scratch); // Untag smi before converting to float. 7952 __ SmiUntag(scratch);
7892 __ cvtsi2sd(xmm1, Operand(scratch)); 7953 __ cvtsi2sd(xmm1, Operand(scratch));
7893 } 7954 }
7894 7955
7895 7956
7896 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 7957 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7897 Register scratch, 7958 Register scratch,
7898 ArgLocation arg_location) { 7959 ArgLocation arg_location) {
7899 Label load_smi_1, load_smi_2, done_load_1, done; 7960 Label load_smi_1, load_smi_2, done_load_1, done;
7900 if (arg_location == ARGS_IN_REGISTERS) { 7961 if (arg_location == ARGS_IN_REGISTERS) {
7901 __ mov(scratch, edx); 7962 __ mov(scratch, edx);
(...skipping 27 matching lines...) Expand all
7929 __ push(scratch); 7990 __ push(scratch);
7930 __ fild_s(Operand(esp, 0)); 7991 __ fild_s(Operand(esp, 0));
7931 __ pop(scratch); 7992 __ pop(scratch);
7932 7993
7933 __ bind(&done); 7994 __ bind(&done);
7934 } 7995 }
7935 7996
7936 7997
7937 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, 7998 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
7938 Register scratch) { 7999 Register scratch) {
7939 __ mov(scratch, eax); 8000 const Register left = edx;
8001 const Register right = eax;
8002 __ mov(scratch, left);
8003 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
7940 __ SmiUntag(scratch); 8004 __ SmiUntag(scratch);
7941 __ push(scratch); 8005 __ push(scratch);
7942 __ fild_s(Operand(esp, 0)); 8006 __ fild_s(Operand(esp, 0));
7943 __ pop(scratch);
7944 8007
7945 __ mov(scratch, ebx); 8008 __ mov(scratch, right);
7946 __ SmiUntag(scratch); 8009 __ SmiUntag(scratch);
7947 __ push(scratch); 8010 __ mov(Operand(esp, 0), scratch);
7948 __ fild_s(Operand(esp, 0)); 8011 __ fild_s(Operand(esp, 0));
7949 __ pop(scratch); 8012 __ pop(scratch);
7950 } 8013 }
7951 8014
7952 8015
7953 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 8016 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
7954 Label* non_float, 8017 Label* non_float,
7955 Register scratch) { 8018 Register scratch) {
7956 Label test_other, done; 8019 Label test_other, done;
7957 // Test if both operands are floats or smi -> scratch=k_is_float; 8020 // Test if both operands are floats or smi -> scratch=k_is_float;
(...skipping 778 matching lines...) Expand 10 before | Expand all | Expand 10 after
8736 __ push(ecx); 8799 __ push(ecx);
8737 8800
8738 // Inlined floating point compare. 8801 // Inlined floating point compare.
8739 // Call builtin if operands are not floating point or smi. 8802 // Call builtin if operands are not floating point or smi.
8740 Label check_for_symbols; 8803 Label check_for_symbols;
8741 Label unordered; 8804 Label unordered;
8742 if (CpuFeatures::IsSupported(SSE2)) { 8805 if (CpuFeatures::IsSupported(SSE2)) {
8743 CpuFeatures::Scope use_sse2(SSE2); 8806 CpuFeatures::Scope use_sse2(SSE2);
8744 CpuFeatures::Scope use_cmov(CMOV); 8807 CpuFeatures::Scope use_cmov(CMOV);
8745 8808
8746 FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols); 8809 FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
8747 __ comisd(xmm0, xmm1); 8810 __ comisd(xmm0, xmm1);
8748 8811
8749 // Jump to builtin for NaN. 8812 // Jump to builtin for NaN.
8750 __ j(parity_even, &unordered, not_taken); 8813 __ j(parity_even, &unordered, not_taken);
8751 __ mov(eax, 0); // equal 8814 __ mov(eax, 0); // equal
8752 __ mov(ecx, Immediate(Smi::FromInt(1))); 8815 __ mov(ecx, Immediate(Smi::FromInt(1)));
8753 __ cmov(above, eax, Operand(ecx)); 8816 __ cmov(above, eax, Operand(ecx));
8754 __ mov(ecx, Immediate(Smi::FromInt(-1))); 8817 __ mov(ecx, Immediate(Smi::FromInt(-1)));
8755 __ cmov(below, eax, Operand(ecx)); 8818 __ cmov(below, eax, Operand(ecx));
8756 __ ret(2 * kPointerSize); 8819 __ ret(2 * kPointerSize);
(...skipping 1207 matching lines...) Expand 10 before | Expand all | Expand 10 after
9964 10027
9965 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 10028 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
9966 // tagged as a small integer. 10029 // tagged as a small integer.
9967 __ bind(&runtime); 10030 __ bind(&runtime);
9968 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); 10031 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
9969 } 10032 }
9970 10033
9971 #undef __ 10034 #undef __
9972 10035
9973 } } // namespace v8::internal 10036 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698