Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(149)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 101016: Improve register allocation of left shift operation. Add tests... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | test/mjsunit/smi-ops.js » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5884 matching lines...) Expand 10 before | Expand all | Expand 10 after
5895 5895
5896 case Token::SHL: 5896 case Token::SHL:
5897 case Token::SHR: 5897 case Token::SHR:
5898 case Token::SAR: 5898 case Token::SAR:
5899 // Move right into ecx. 5899 // Move right into ecx.
5900 // Left is in two registers already, so even if left or answer is ecx, 5900 // Left is in two registers already, so even if left or answer is ecx,
5901 // we can move right to it, and use the other one. 5901 // we can move right to it, and use the other one.
5902 // Right operand must be in register cl because x86 likes it that way. 5902 // Right operand must be in register cl because x86 likes it that way.
5903 if (right->reg().is(ecx)) { 5903 if (right->reg().is(ecx)) {
5904 // Right is already in the right place. Left may be in the 5904 // Right is already in the right place. Left may be in the
5905 // same register, which causes problems. Use answer instead. 5905 // same register, which causes problems. Always use answer
5906 if (left->reg().is(ecx)) { 5906 // instead of left, even if left is not ecx, since this avoids
5907 *left = answer; 5907 // spilling left.
5908 } 5908 *left = answer;
5909 } else if (left->reg().is(ecx)) { 5909 } else if (left->reg().is(ecx)) {
5910 generator()->frame()->Spill(left->reg()); 5910 generator()->frame()->Spill(left->reg());
5911 __ mov(left->reg(), right->reg()); 5911 __ mov(left->reg(), right->reg());
5912 *right = *left; 5912 *right = *left;
5913 *left = answer; // Use copy of left in answer as left. 5913 *left = answer; // Use copy of left in answer as left.
5914 } else if (answer.reg().is(ecx)) { 5914 } else if (answer.reg().is(ecx)) {
5915 __ mov(answer.reg(), right->reg()); 5915 __ mov(answer.reg(), right->reg());
5916 *right = answer; 5916 *right = answer;
5917 } else { 5917 } else {
5918 Result reg_ecx = generator()->allocator()->Allocate(ecx); 5918 Result reg_ecx = generator()->allocator()->Allocate(ecx);
5919 ASSERT(reg_ecx.is_valid()); 5919 ASSERT(reg_ecx.is_valid());
5920 __ mov(ecx, right->reg()); 5920 __ mov(ecx, right->reg());
5921 *right = reg_ecx; 5921 *right = reg_ecx;
5922 // Answer and left both contain the left operand. Use answer, so
5923 // left is not spilled.
5924 *left = answer;
5922 } 5925 }
5923 ASSERT(left->reg().is_valid()); 5926 ASSERT(left->reg().is_valid());
5924 ASSERT(!left->reg().is(ecx)); 5927 ASSERT(!left->reg().is(ecx));
5925 ASSERT(right->reg().is(ecx)); 5928 ASSERT(right->reg().is(ecx));
5926 answer.Unuse(); // Answer may now be being used for left or right. 5929 answer.Unuse(); // Answer may now be being used for left or right.
5927 // We will modify left and right, which we do not do in any other 5930 // We will modify left and right, which we do not do in any other
5928 // binary operation. The exits to slow code need to restore the 5931 // binary operation. The exits to slow code need to restore the
5929 // original values of left and right, or at least values that give 5932 // original values of left and right, or at least values that give
5930 // the same answer. 5933 // the same answer.
5931 5934
(...skipping 29 matching lines...) Expand all
5961 ASSERT(kSmiTag == 0); 5964 ASSERT(kSmiTag == 0);
5962 __ shl(left->reg(), kSmiTagSize); 5965 __ shl(left->reg(), kSmiTagSize);
5963 __ shl(right->reg(), kSmiTagSize); 5966 __ shl(right->reg(), kSmiTagSize);
5964 enter()->Jump(left, right); 5967 enter()->Jump(left, right);
5965 result_ok.Bind(left); 5968 result_ok.Bind(left);
5966 break; 5969 break;
5967 } 5970 }
5968 case Token::SHL: { 5971 case Token::SHL: {
5969 __ shl(left->reg()); 5972 __ shl(left->reg());
5970 // Check that the *signed* result fits in a smi. 5973 // Check that the *signed* result fits in a smi.
5971 //
5972 // TODO(207): Can reduce registers from 4 to 3 by
5973 // preallocating ecx.
5974 JumpTarget result_ok(generator()); 5974 JumpTarget result_ok(generator());
5975 Result smi_test_reg = generator()->allocator()->Allocate(); 5975 __ cmp(left->reg(), 0xc0000000);
5976 ASSERT(smi_test_reg.is_valid()); 5976 result_ok.Branch(positive, left, taken);
5977 __ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000)); 5977
5978 __ test(smi_test_reg.reg(), Immediate(0x80000000));
5979 smi_test_reg.Unuse();
5980 result_ok.Branch(zero, left, taken);
5981 __ shr(left->reg()); 5978 __ shr(left->reg());
5982 ASSERT(kSmiTag == 0); 5979 ASSERT(kSmiTag == 0);
5983 __ shl(left->reg(), kSmiTagSize); 5980 __ shl(left->reg(), kSmiTagSize);
5984 __ shl(right->reg(), kSmiTagSize); 5981 __ shl(right->reg(), kSmiTagSize);
5985 enter()->Jump(left, right); 5982 enter()->Jump(left, right);
5986 result_ok.Bind(left); 5983 result_ok.Bind(left);
5987 break; 5984 break;
5988 } 5985 }
5989 default: 5986 default:
5990 UNREACHABLE(); 5987 UNREACHABLE();
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after
6270 __ pop(eax); 6267 __ pop(eax);
6271 switch (op_) { 6268 switch (op_) {
6272 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; 6269 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
6273 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; 6270 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
6274 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; 6271 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
6275 case Token::SAR: __ sar(eax); break; 6272 case Token::SAR: __ sar(eax); break;
6276 case Token::SHL: __ shl(eax); break; 6273 case Token::SHL: __ shl(eax); break;
6277 case Token::SHR: __ shr(eax); break; 6274 case Token::SHR: __ shr(eax); break;
6278 default: UNREACHABLE(); 6275 default: UNREACHABLE();
6279 } 6276 }
6280 6277 if (op_ == Token::SHR) {
6281 // Check if result is non-negative and fits in a smi. 6278 // Check if result is non-negative and fits in a smi.
6282 __ test(eax, Immediate(0xc0000000)); 6279 __ test(eax, Immediate(0xc0000000));
6283 __ j(not_zero, &non_smi_result); 6280 __ j(not_zero, &non_smi_result);
6284 6281 } else {
6282 // Check if result fits in a smi.
6283 __ cmp(eax, 0xc0000000);
6284 __ j(negative, &non_smi_result);
6285 }
6285 // Tag smi result and return. 6286 // Tag smi result and return.
6286 ASSERT(kSmiTagSize == times_2); // adjust code if not the case 6287 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
6287 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); 6288 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
6288 __ ret(2 * kPointerSize); 6289 __ ret(2 * kPointerSize);
6289 6290
6290 // All ops except SHR return a signed int32 that we load in a HeapNumber. 6291 // All ops except SHR return a signed int32 that we load in a HeapNumber.
6291 if (op_ != Token::SHR) { 6292 if (op_ != Token::SHR) {
6292 __ bind(&non_smi_result); 6293 __ bind(&non_smi_result);
6293 // Allocate a heap number if needed. 6294 // Allocate a heap number if needed.
6294 __ mov(ebx, Operand(eax)); // ebx: result 6295 __ mov(ebx, Operand(eax)); // ebx: result
(...skipping 922 matching lines...) Expand 10 before | Expand all | Expand 10 after
7217 7218
7218 // Slow-case: Go through the JavaScript implementation. 7219 // Slow-case: Go through the JavaScript implementation.
7219 __ bind(&slow); 7220 __ bind(&slow);
7220 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 7221 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
7221 } 7222 }
7222 7223
7223 7224
7224 #undef __ 7225 #undef __
7225 7226
7226 } } // namespace v8::internal 7227 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | test/mjsunit/smi-ops.js » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698