Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 555098: Support register arguments in more cases.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 960 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 if (left_is_smi && right_is_smi) { 971 if (left_is_smi && right_is_smi) {
972 // Compute the constant result at compile time, and leave it on the frame. 972 // Compute the constant result at compile time, and leave it on the frame.
973 int left_int = Smi::cast(*left.handle())->value(); 973 int left_int = Smi::cast(*left.handle())->value();
974 int right_int = Smi::cast(*right.handle())->value(); 974 int right_int = Smi::cast(*right.handle())->value();
975 if (FoldConstantSmis(op, left_int, right_int)) return; 975 if (FoldConstantSmis(op, left_int, right_int)) return;
976 } 976 }
977 977
978 Result answer; 978 Result answer;
979 if (left_is_non_smi || right_is_non_smi) { 979 if (left_is_non_smi || right_is_non_smi) {
980 // Go straight to the slow case, with no smi code. 980 // Go straight to the slow case, with no smi code.
981 frame_->Push(&left);
982 frame_->Push(&right);
983 GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB); 981 GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
984 answer = frame_->CallStub(&stub, 2); 982 answer = stub.GenerateCall(masm_, frame_, &left, &right);
985 } else if (right_is_smi) { 983 } else if (right_is_smi) {
986 answer = ConstantSmiBinaryOperation(op, &left, right.handle(), 984 answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
987 type, false, overwrite_mode); 985 type, false, overwrite_mode);
988 } else if (left_is_smi) { 986 } else if (left_is_smi) {
989 answer = ConstantSmiBinaryOperation(op, &right, left.handle(), 987 answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
990 type, true, overwrite_mode); 988 type, true, overwrite_mode);
991 } else { 989 } else {
992 // Set the flags based on the operation, type and loop nesting level. 990 // Set the flags based on the operation, type and loop nesting level.
993 // Bit operations always assume they likely operate on Smis. Still only 991 // Bit operations always assume they likely operate on Smis. Still only
994 // generate the inline Smi check code if this operation is part of a loop. 992 // generate the inline Smi check code if this operation is part of a loop.
995 // For all other operations only inline the Smi check code for likely smis 993 // For all other operations only inline the Smi check code for likely smis
996 // if the operation is part of a loop. 994 // if the operation is part of a loop.
997 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) { 995 if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
998 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); 996 answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
999 } else { 997 } else {
1000 frame_->Push(&left);
1001 frame_->Push(&right);
1002 GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS); 998 GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
1003 answer = frame_->CallStub(&stub, 2); 999 answer = stub.GenerateCall(masm_, frame_, &left, &right);
1004 } 1000 }
1005 } 1001 }
1006 frame_->Push(&answer); 1002 frame_->Push(&answer);
1007 } 1003 }
1008 1004
1009 1005
1010 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { 1006 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1011 Object* answer_object = Heap::undefined_value(); 1007 Object* answer_object = Heap::undefined_value();
1012 switch (op) { 1008 switch (op) {
1013 case Token::ADD: 1009 case Token::ADD:
(...skipping 6055 matching lines...) Expand 10 before | Expand all | Expand 10 after
7069 // Update flags to indicate that arguments are in registers. 7065 // Update flags to indicate that arguments are in registers.
7070 SetArgsInRegisters(); 7066 SetArgsInRegisters();
7071 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); 7067 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7072 } 7068 }
7073 7069
7074 // Call the stub. 7070 // Call the stub.
7075 __ CallStub(this); 7071 __ CallStub(this);
7076 } 7072 }
7077 7073
7078 7074
7075 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
7076 VirtualFrame* frame,
7077 Result* left,
7078 Result* right) {
7079 if (ArgsInRegistersSupported()) {
7080 SetArgsInRegisters();
7081 return frame->CallStub(this, left, right);
7082 } else {
7083 frame->Push(left);
7084 frame->Push(right);
7085 return frame->CallStub(this, 2);
7086 }
7087 }
7088
7089
7079 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 7090 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7080 if (HasArgsInRegisters()) { 7091 if (HasArgsInRegisters()) {
7081 __ mov(ebx, eax); 7092 __ mov(ebx, eax);
7082 __ mov(eax, edx); 7093 __ mov(eax, edx);
7083 } else { 7094 } else {
7084 __ mov(ebx, Operand(esp, 1 * kPointerSize)); 7095 __ mov(ebx, Operand(esp, 1 * kPointerSize));
7085 __ mov(eax, Operand(esp, 2 * kPointerSize)); 7096 __ mov(eax, Operand(esp, 2 * kPointerSize));
7086 } 7097 }
7087 7098
7088 Label not_smis, not_smis_or_overflow, not_smis_undo_optimistic; 7099 Label not_smis, not_smis_or_overflow, not_smis_undo_optimistic;
(...skipping 11 matching lines...) Expand all
7100 case Token::ADD: 7111 case Token::ADD:
7101 __ add(eax, Operand(ebx)); // add optimistically 7112 __ add(eax, Operand(ebx)); // add optimistically
7102 __ j(overflow, &not_smis_or_overflow, not_taken); 7113 __ j(overflow, &not_smis_or_overflow, not_taken);
7103 break; 7114 break;
7104 7115
7105 case Token::SUB: 7116 case Token::SUB:
7106 __ sub(eax, Operand(ebx)); // subtract optimistically 7117 __ sub(eax, Operand(ebx)); // subtract optimistically
7107 __ j(overflow, &not_smis_or_overflow, not_taken); 7118 __ j(overflow, &not_smis_or_overflow, not_taken);
7108 break; 7119 break;
7109 7120
7121 case Token::MUL:
7122 __ mov(edi, Operand(eax)); // backup the 1st operand
Kevin Millikin (Chromium) 2010/01/26 09:07:22 No need for Operand: __ mov(edi, eax). Comment sh
Vladislav Kaznacheev 2010/01/26 10:21:18 Done.
7123 break;
7124
7110 case Token::DIV: 7125 case Token::DIV:
7126 __ mov(edi, Operand(eax)); // backup the 1st operand
7127 // Fall through.
7111 case Token::MOD: 7128 case Token::MOD:
7112 // Sign extend eax into edx:eax. 7129 // Sign extend eax into edx:eax.
7113 __ cdq(); 7130 __ cdq();
7114 // Check for 0 divisor. 7131 // Check for 0 divisor.
7115 __ test(ebx, Operand(ebx)); 7132 __ test(ebx, Operand(ebx));
7116 __ j(zero, &not_smis_or_overflow, not_taken); 7133 __ j(zero, &not_smis_or_overflow, not_taken);
7117 break; 7134 break;
7118 7135
7119 default: 7136 default:
7120 // Fall-through to smi check. 7137 // Fall-through to smi check.
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
7236 default: break; 7253 default: break;
7237 } 7254 }
7238 ASSERT(kSmiTag == 0); // Adjust zero check if not the case. 7255 ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
7239 __ test(ecx, Immediate(kSmiTagMask)); 7256 __ test(ecx, Immediate(kSmiTagMask));
7240 __ j(not_zero, &not_smis, not_taken); 7257 __ j(not_zero, &not_smis, not_taken);
7241 // Correct operand values are in eax, ebx at this point. 7258 // Correct operand values are in eax, ebx at this point.
7242 7259
7243 __ bind(&use_fp_on_smis); 7260 __ bind(&use_fp_on_smis);
7244 // Both operands are known to be SMIs but the result does not fit into a SMI. 7261 // Both operands are known to be SMIs but the result does not fit into a SMI.
7245 switch (op_) { 7262 switch (op_) {
7263 case Token::MUL:
7264 case Token::DIV:
7265 __ mov(eax, edi); // Restore the 1st operand.
7266 // Fall through.
7246 case Token::ADD: 7267 case Token::ADD:
7247 case Token::SUB: 7268 case Token::SUB: {
7248 case Token::MUL:
7249 case Token::DIV: {
7250 Label after_alloc_failure; 7269 Label after_alloc_failure;
7251 7270 __ AllocateHeapNumber(edx, ecx, no_reg, &after_alloc_failure);
7252 FloatingPointHelper::ArgLocation arg_location =
7253 (op_ == Token::ADD || op_ == Token::SUB) ?
7254 FloatingPointHelper::ARGS_IN_REGISTERS :
7255 FloatingPointHelper::ARGS_ON_STACK;
7256
7257 __ AllocateHeapNumber(
7258 edx,
7259 ecx,
7260 no_reg,
7261 arg_location == FloatingPointHelper::ARGS_IN_REGISTERS ?
7262 &after_alloc_failure :
7263 slow);
7264 7271
7265 if (CpuFeatures::IsSupported(SSE2)) { 7272 if (CpuFeatures::IsSupported(SSE2)) {
7266 CpuFeatures::Scope use_sse2(SSE2); 7273 CpuFeatures::Scope use_sse2(SSE2);
7267 FloatingPointHelper::LoadSse2Smis(masm, ecx, arg_location); 7274 FloatingPointHelper::LoadSse2Smis(
7275 masm,
7276 ecx,
7277 FloatingPointHelper::ARGS_IN_REGISTERS);
Kevin Millikin (Chromium) 2010/01/26 09:07:22 Does LoadSse2Smis need the ArgLocation argument an
Vladislav Kaznacheev 2010/01/26 10:21:18 Removed (also for LoadFloatSmis). On 2010/01/26 09
7268 switch (op_) { 7278 switch (op_) {
7269 case Token::ADD: __ addsd(xmm0, xmm1); break; 7279 case Token::ADD: __ addsd(xmm0, xmm1); break;
7270 case Token::SUB: __ subsd(xmm0, xmm1); break; 7280 case Token::SUB: __ subsd(xmm0, xmm1); break;
7271 case Token::MUL: __ mulsd(xmm0, xmm1); break; 7281 case Token::MUL: __ mulsd(xmm0, xmm1); break;
7272 case Token::DIV: __ divsd(xmm0, xmm1); break; 7282 case Token::DIV: __ divsd(xmm0, xmm1); break;
7273 default: UNREACHABLE(); 7283 default: UNREACHABLE();
7274 } 7284 }
7275 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); 7285 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
7276 } else { // SSE2 not available, use FPU. 7286 } else { // SSE2 not available, use FPU.
7277 FloatingPointHelper::LoadFloatSmis(masm, ecx, arg_location); 7287 FloatingPointHelper::LoadFloatSmis(
7288 masm,
7289 ecx,
7290 FloatingPointHelper::ARGS_IN_REGISTERS);
7278 switch (op_) { 7291 switch (op_) {
7279 case Token::ADD: __ faddp(1); break; 7292 case Token::ADD: __ faddp(1); break;
7280 case Token::SUB: __ fsubp(1); break; 7293 case Token::SUB: __ fsubp(1); break;
7281 case Token::MUL: __ fmulp(1); break; 7294 case Token::MUL: __ fmulp(1); break;
7282 case Token::DIV: __ fdivp(1); break; 7295 case Token::DIV: __ fdivp(1); break;
7283 default: UNREACHABLE(); 7296 default: UNREACHABLE();
7284 } 7297 }
7285 __ fstp_d(FieldOperand(edx, HeapNumber::kValueOffset)); 7298 __ fstp_d(FieldOperand(edx, HeapNumber::kValueOffset));
7286 } 7299 }
7287 __ mov(eax, edx); 7300 __ mov(eax, edx);
7288 GenerateReturn(masm); 7301 GenerateReturn(masm);
7289 7302
7290 if (arg_location == FloatingPointHelper::ARGS_IN_REGISTERS) { 7303 __ bind(&after_alloc_failure);
7291 __ bind(&after_alloc_failure); 7304 __ mov(edx, eax);
7292 __ mov(edx, eax); 7305 __ mov(eax, ebx);
7293 __ mov(eax, ebx); 7306 __ jmp(slow);
7294 __ jmp(slow);
7295 }
7296 break; 7307 break;
7297 } 7308 }
7298 7309
7299 case Token::BIT_OR: 7310 case Token::BIT_OR:
7300 case Token::BIT_AND: 7311 case Token::BIT_AND:
7301 case Token::BIT_XOR: 7312 case Token::BIT_XOR:
7302 case Token::SAR: 7313 case Token::SAR:
7303 // Do nothing here as these operations always succeed on a pair of smis. 7314 // Do nothing here as these operations always succeed on a pair of smis.
7304 break; 7315 break;
7305 7316
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
7431 } 7442 }
7432 // Tag smi result and return. 7443 // Tag smi result and return.
7433 __ SmiTag(eax); 7444 __ SmiTag(eax);
7434 GenerateReturn(masm); 7445 GenerateReturn(masm);
7435 7446
7436 // All ops except SHR return a signed int32 that we load in a HeapNumber. 7447 // All ops except SHR return a signed int32 that we load in a HeapNumber.
7437 if (op_ != Token::SHR) { 7448 if (op_ != Token::SHR) {
7438 __ bind(&non_smi_result); 7449 __ bind(&non_smi_result);
7439 // Allocate a heap number if needed. 7450 // Allocate a heap number if needed.
7440 __ mov(ebx, Operand(eax)); // ebx: result 7451 __ mov(ebx, Operand(eax)); // ebx: result
7441 Label skip_allocation; 7452 Label skip_allocation;
7442 switch (mode_) { 7453 switch (mode_) {
7443 case OVERWRITE_LEFT: 7454 case OVERWRITE_LEFT:
7444 case OVERWRITE_RIGHT: 7455 case OVERWRITE_RIGHT:
7445 // If the operand was an object, we skip the 7456 // If the operand was an object, we skip the
7446 // allocation of a heap number. 7457 // allocation of a heap number.
7447 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 7458 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
7448 1 * kPointerSize : 2 * kPointerSize)); 7459 1 * kPointerSize : 2 * kPointerSize));
7449 __ test(eax, Immediate(kSmiTagMask)); 7460 __ test(eax, Immediate(kSmiTagMask));
7450 __ j(not_zero, &skip_allocation, not_taken); 7461 __ j(not_zero, &skip_allocation, not_taken);
7451 // Fall through! 7462 // Fall through!
(...skipping 2536 matching lines...) Expand 10 before | Expand all | Expand 10 after
9988 9999
9989 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 10000 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
9990 // tagged as a small integer. 10001 // tagged as a small integer.
9991 __ bind(&runtime); 10002 __ bind(&runtime);
9992 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); 10003 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
9993 } 10004 }
9994 10005
9995 #undef __ 10006 #undef __
9996 10007
9997 } } // namespace v8::internal 10008 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698