Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(630)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 335005: Port optimization of calls to GenericBinaryStub to x64 (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5039 matching lines...) Expand 10 before | Expand all | Expand 10 after
5050 private: 5050 private:
5051 Token::Value op_; 5051 Token::Value op_;
5052 Register dst_; 5052 Register dst_;
5053 Register left_; 5053 Register left_;
5054 Register right_; 5054 Register right_;
5055 OverwriteMode mode_; 5055 OverwriteMode mode_;
5056 }; 5056 };
5057 5057
5058 5058
5059 void DeferredInlineBinaryOperation::Generate() { 5059 void DeferredInlineBinaryOperation::Generate() {
5060 __ push(left_); 5060 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
5061 __ push(right_); 5061 stub.GenerateCall(masm_, left_, right_);
5062 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
5063 __ CallStub(&stub);
5064 if (!dst_.is(rax)) __ movq(dst_, rax); 5062 if (!dst_.is(rax)) __ movq(dst_, rax);
5065 } 5063 }
5066 5064
5067 5065
5068 void CodeGenerator::GenericBinaryOperation(Token::Value op, 5066 void CodeGenerator::GenericBinaryOperation(Token::Value op,
5069 SmiAnalysis* type, 5067 SmiAnalysis* type,
5070 OverwriteMode overwrite_mode) { 5068 OverwriteMode overwrite_mode) {
5071 Comment cmnt(masm_, "[ BinaryOperation"); 5069 Comment cmnt(masm_, "[ BinaryOperation");
5072 Comment cmnt_token(masm_, Token::String(op)); 5070 Comment cmnt_token(masm_, Token::String(op));
5073 5071
5074 if (op == Token::COMMA) { 5072 if (op == Token::COMMA) {
5075 // Simply discard left value. 5073 // Simply discard left value.
5076 frame_->Nip(1); 5074 frame_->Nip(1);
5077 return; 5075 return;
5078 } 5076 }
5079 5077
5080 // Set the flags based on the operation, type and loop nesting level. 5078 // Set the flags based on the operation, type and loop nesting level.
5081 GenericBinaryFlags flags; 5079 GenericBinaryFlags flags;
5082 switch (op) { 5080 switch (op) {
5083 case Token::BIT_OR: 5081 case Token::BIT_OR:
5084 case Token::BIT_AND: 5082 case Token::BIT_AND:
5085 case Token::BIT_XOR: 5083 case Token::BIT_XOR:
5086 case Token::SHL: 5084 case Token::SHL:
5087 case Token::SHR: 5085 case Token::SHR:
5088 case Token::SAR: 5086 case Token::SAR:
5089 // Bit operations always assume they likely operate on Smis. Still only 5087 // Bit operations always assume they likely operate on Smis. Still only
5090 // generate the inline Smi check code if this operation is part of a loop. 5088 // generate the inline Smi check code if this operation is part of a loop.
5091 flags = (loop_nesting() > 0) 5089 flags = (loop_nesting() > 0)
5092 ? SMI_CODE_INLINED 5090 ? NO_SMI_CODE_IN_STUB
5093 : SMI_CODE_IN_STUB; 5091 : NO_GENERIC_BINARY_FLAGS;
5094 break; 5092 break;
5095 5093
5096 default: 5094 default:
5097 // By default only inline the Smi check code for likely smis if this 5095 // By default only inline the Smi check code for likely smis if this
5098 // operation is part of a loop. 5096 // operation is part of a loop.
5099 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) 5097 flags = ((loop_nesting() > 0) && type->IsLikelySmi())
5100 ? SMI_CODE_INLINED 5098 ? NO_SMI_CODE_IN_STUB
5101 : SMI_CODE_IN_STUB; 5099 : NO_GENERIC_BINARY_FLAGS;
5102 break; 5100 break;
5103 } 5101 }
5104 5102
5105 Result right = frame_->Pop(); 5103 Result right = frame_->Pop();
5106 Result left = frame_->Pop(); 5104 Result left = frame_->Pop();
5107 5105
5108 if (op == Token::ADD) { 5106 if (op == Token::ADD) {
5109 bool left_is_string = left.is_constant() && left.handle()->IsString(); 5107 bool left_is_string = left.is_constant() && left.handle()->IsString();
5110 bool right_is_string = right.is_constant() && right.handle()->IsString(); 5108 bool right_is_string = right.is_constant() && right.handle()->IsString();
5111 if (left_is_string || right_is_string) { 5109 if (left_is_string || right_is_string) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
5150 } else if (right_is_smi) { 5148 } else if (right_is_smi) {
5151 ConstantSmiBinaryOperation(op, &left, right.handle(), 5149 ConstantSmiBinaryOperation(op, &left, right.handle(),
5152 type, false, overwrite_mode); 5150 type, false, overwrite_mode);
5153 return; 5151 return;
5154 } else if (left_is_smi) { 5152 } else if (left_is_smi) {
5155 ConstantSmiBinaryOperation(op, &right, left.handle(), 5153 ConstantSmiBinaryOperation(op, &right, left.handle(),
5156 type, true, overwrite_mode); 5154 type, true, overwrite_mode);
5157 return; 5155 return;
5158 } 5156 }
5159 5157
5160 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) { 5158 if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
5161 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); 5159 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5162 } else { 5160 } else {
5163 frame_->Push(&left); 5161 frame_->Push(&left);
5164 frame_->Push(&right); 5162 frame_->Push(&right);
5165 // If we know the arguments aren't smis, use the binary operation stub 5163 // If we know the arguments aren't smis, use the binary operation stub
5166 // that does not check for the fast smi case. 5164 // that does not check for the fast smi case.
5167 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. 5165 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
5168 if (generate_no_smi_code) { 5166 if (generate_no_smi_code) {
5169 flags = SMI_CODE_INLINED; 5167 flags = NO_SMI_CODE_IN_STUB;
5170 } 5168 }
5171 GenericBinaryOpStub stub(op, overwrite_mode, flags); 5169 GenericBinaryOpStub stub(op, overwrite_mode, flags);
5172 Result answer = frame_->CallStub(&stub, 2); 5170 Result answer = frame_->CallStub(&stub, 2);
5173 frame_->Push(&answer); 5171 frame_->Push(&answer);
5174 } 5172 }
5175 } 5173 }
5176 5174
5177 5175
5178 // Emit a LoadIC call to get the value from receiver and leave it in 5176 // Emit a LoadIC call to get the value from receiver and leave it in
5179 // dst. The receiver register is restored after the call. 5177 // dst. The receiver register is restored after the call.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5214 // instruction that gets patched and coverage code gets in the way. 5212 // instruction that gets patched and coverage code gets in the way.
5215 masm_->testl(rax, Immediate(-delta_to_patch_site)); 5213 masm_->testl(rax, Immediate(-delta_to_patch_site));
5216 __ IncrementCounter(&Counters::named_load_inline_miss, 1); 5214 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5217 5215
5218 if (!dst_.is(rax)) __ movq(dst_, rax); 5216 if (!dst_.is(rax)) __ movq(dst_, rax);
5219 __ pop(receiver_); 5217 __ pop(receiver_);
5220 } 5218 }
5221 5219
5222 5220
5223 void DeferredInlineSmiAdd::Generate() { 5221 void DeferredInlineSmiAdd::Generate() {
5224 __ push(dst_); 5222 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5225 __ Push(value_); 5223 igostub.GenerateCall(masm_, dst_, value_);
5226 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5227 __ CallStub(&igostub);
5228 if (!dst_.is(rax)) __ movq(dst_, rax); 5224 if (!dst_.is(rax)) __ movq(dst_, rax);
5229 } 5225 }
5230 5226
5231 5227
5232 void DeferredInlineSmiAddReversed::Generate() { 5228 void DeferredInlineSmiAddReversed::Generate() {
5233 __ Push(value_); 5229 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5234 __ push(dst_); 5230 igostub.GenerateCall(masm_, value_, dst_);
5235 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
5236 __ CallStub(&igostub);
5237 if (!dst_.is(rax)) __ movq(dst_, rax); 5231 if (!dst_.is(rax)) __ movq(dst_, rax);
5238 } 5232 }
5239 5233
5240 5234
5241 void DeferredInlineSmiSub::Generate() { 5235 void DeferredInlineSmiSub::Generate() {
5242 __ push(dst_); 5236 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5243 __ Push(value_); 5237 igostub.GenerateCall(masm_, dst_, value_);
5244 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
5245 __ CallStub(&igostub);
5246 if (!dst_.is(rax)) __ movq(dst_, rax);
5247 } 5238 }
5248 5239
5249 5240
5250 void DeferredInlineSmiOperation::Generate() { 5241 void DeferredInlineSmiOperation::Generate() {
5251 __ push(src_);
5252 __ Push(value_);
5253 // For mod we don't generate all the Smi code inline. 5242 // For mod we don't generate all the Smi code inline.
5254 GenericBinaryOpStub stub( 5243 GenericBinaryOpStub stub(
5255 op_, 5244 op_,
5256 overwrite_mode_, 5245 overwrite_mode_,
5257 (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED); 5246 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
5258 __ CallStub(&stub); 5247 stub.GenerateCall(masm_, src_, value_);
5259 if (!dst_.is(rax)) __ movq(dst_, rax); 5248 if (!dst_.is(rax)) __ movq(dst_, rax);
5260 } 5249 }
5261 5250
5262 5251
5263 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, 5252 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5264 Result* operand, 5253 Result* operand,
5265 Handle<Object> value, 5254 Handle<Object> value,
5266 SmiAnalysis* type, 5255 SmiAnalysis* type,
5267 bool reversed, 5256 bool reversed,
5268 OverwriteMode overwrite_mode) { 5257 OverwriteMode overwrite_mode) {
(...skipping 2064 matching lines...) Expand 10 before | Expand all | Expand 10 after
7333 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; 7322 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
7334 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; 7323 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
7335 case Token::SAR: return "GenericBinaryOpStub_SAR"; 7324 case Token::SAR: return "GenericBinaryOpStub_SAR";
7336 case Token::SHL: return "GenericBinaryOpStub_SHL"; 7325 case Token::SHL: return "GenericBinaryOpStub_SHL";
7337 case Token::SHR: return "GenericBinaryOpStub_SHR"; 7326 case Token::SHR: return "GenericBinaryOpStub_SHR";
7338 default: return "GenericBinaryOpStub"; 7327 default: return "GenericBinaryOpStub";
7339 } 7328 }
7340 } 7329 }
7341 7330
7342 7331
7332 void GenericBinaryOpStub::GenerateCall(
7333 MacroAssembler* masm,
7334 Register left,
7335 Register right) {
7336 if (!ArgsInRegistersSupported()) {
7337 // Pass arguments on the stack.
7338 __ push(left);
7339 __ push(right);
7340 } else {
7341 // The calling convention with registers is left in rdx and right in rax.
7342 Register left_arg = rdx;
7343 Register right_arg = rax;
7344 if (!(left.is(left_arg) && right.is(right_arg))) {
7345 if (left.is(right_arg) && right.is(left_arg)) {
7346 if (IsOperationCommutative()) {
7347 SetArgsReversed();
7348 } else {
7349 __ xchg(left, right);
7350 }
7351 } else if (left.is(left_arg)) {
7352 __ movq(right_arg, right);
7353 } else if (left.is(right_arg)) {
7354 if (IsOperationCommutative()) {
7355 __ movq(left_arg, right);
7356 SetArgsReversed();
7357 } else {
7358 // Order of moves important to avoid destroying left argument.
7359 __ movq(left_arg, left);
7360 __ movq(right_arg, right);
7361 }
7362 } else if (right.is(left_arg)) {
7363 if (IsOperationCommutative()) {
7364 __ movq(right_arg, left);
7365 SetArgsReversed();
7366 } else {
7367 // Order of moves important to avoid destroying right argument.
7368 __ movq(right_arg, right);
7369 __ movq(left_arg, left);
7370 }
7371 } else if (right.is(right_arg)) {
7372 __ movq(left_arg, left);
7373 } else {
7374 // Order of moves is not important.
7375 __ movq(left_arg, left);
7376 __ movq(right_arg, right);
7377 }
7378 }
7379
7380 // Update flags to indicate that arguments are in registers.
7381 SetArgsInRegisters();
7382 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7383 }
7384
7385 // Call the stub.
7386 __ CallStub(this);
7387 }
7388
7389
7390 void GenericBinaryOpStub::GenerateCall(
7391 MacroAssembler* masm,
7392 Register left,
7393 Smi* right) {
7394 if (!ArgsInRegistersSupported()) {
7395 // Pass arguments on the stack.
7396 __ push(left);
7397 __ Push(right);
7398 } else {
7399 // The calling convention with registers is left in rdx and right in rax.
7400 Register left_arg = rdx;
7401 Register right_arg = rax;
7402 if (left.is(left_arg)) {
7403 __ Move(right_arg, right);
7404 } else if (left.is(right_arg) && IsOperationCommutative()) {
7405 __ Move(left_arg, right);
7406 SetArgsReversed();
7407 } else {
7408 __ movq(left_arg, left);
7409 __ Move(right_arg, right);
7410 }
7411
7412 // Update flags to indicate that arguments are in registers.
7413 SetArgsInRegisters();
7414 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7415 }
7416
7417 // Call the stub.
7418 __ CallStub(this);
7419 }
7420
7421
7422 void GenericBinaryOpStub::GenerateCall(
7423 MacroAssembler* masm,
7424 Smi* left,
7425 Register right) {
7426 if (!ArgsInRegistersSupported()) {
7427 // Pass arguments on the stack.
7428 __ Push(left);
7429 __ push(right);
7430 } else {
7431 // The calling convention with registers is left in rdx and right in rax.
7432 Register left_arg = rdx;
7433 Register right_arg = rax;
7434 if (right.is(right_arg)) {
7435 __ Move(left_arg, left);
7436 } else if (right.is(left_arg) && IsOperationCommutative()) {
7437 __ Move(right_arg, left);
7438 SetArgsReversed();
7439 } else {
7440 __ Move(left_arg, left);
7441 __ movq(right_arg, right);
7442 }
7443 // Update flags to indicate that arguments are in registers.
7444 SetArgsInRegisters();
7445 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7446 }
7447
7448 // Call the stub.
7449 __ CallStub(this);
7450 }
7451
7452
7343 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 7453 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7344 // Perform fast-case smi code for the operation (rax <op> rbx) and 7454 // Perform fast-case smi code for the operation (rax <op> rbx) and
7345 // leave result in register rax. 7455 // leave result in register rax.
7346 7456
7347 // Smi check both operands. 7457 // Smi check both operands.
7348 __ JumpIfNotBothSmi(rax, rbx, slow); 7458 __ JumpIfNotBothSmi(rax, rbx, slow);
7349 7459
7350 switch (op_) { 7460 switch (op_) {
7351 case Token::ADD: { 7461 case Token::ADD: {
7352 __ SmiAdd(rax, rax, rbx, slow); 7462 __ SmiAdd(rax, rax, rbx, slow);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
7405 7515
7406 default: 7516 default:
7407 UNREACHABLE(); 7517 UNREACHABLE();
7408 break; 7518 break;
7409 } 7519 }
7410 } 7520 }
7411 7521
7412 7522
7413 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 7523 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7414 Label call_runtime; 7524 Label call_runtime;
7415 if (flags_ == SMI_CODE_IN_STUB) { 7525 if (HasSmiCodeInStub()) {
7416 // The fast case smi code wasn't inlined in the stub caller 7526 // The fast case smi code wasn't inlined in the stub caller
7417 // code. Generate it here to speed up common operations. 7527 // code. Generate it here to speed up common operations.
7418 Label slow; 7528 Label slow;
7419 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y 7529 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
7420 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x 7530 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
7421 GenerateSmiCode(masm, &slow); 7531 GenerateSmiCode(masm, &slow);
7422 __ ret(2 * kPointerSize); // remove both operands 7532 GenerateReturn(masm);
7423 7533
7424 // Too bad. The fast case smi code didn't succeed. 7534 // Too bad. The fast case smi code didn't succeed.
7425 __ bind(&slow); 7535 __ bind(&slow);
7426 } 7536 }
7427 7537
7428 // Setup registers. 7538 // Make sure the arguments are in rdx and rax.
7429 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y 7539 GenerateLoadArguments(masm);
7430 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
7431 7540
7432 // Floating point case. 7541 // Floating point case.
7433 switch (op_) { 7542 switch (op_) {
7434 case Token::ADD: 7543 case Token::ADD:
7435 case Token::SUB: 7544 case Token::SUB:
7436 case Token::MUL: 7545 case Token::MUL:
7437 case Token::DIV: { 7546 case Token::DIV: {
7438 // rax: y 7547 // rax: y
7439 // rdx: x 7548 // rdx: x
7440 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime); 7549 FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
7441 // Fast-case: Both operands are numbers. 7550 // Fast-case: Both operands are numbers.
7442 // Allocate a heap number, if needed. 7551 // Allocate a heap number, if needed.
7443 Label skip_allocation; 7552 Label skip_allocation;
7444 switch (mode_) { 7553 switch (mode_) {
7445 case OVERWRITE_LEFT: 7554 case OVERWRITE_LEFT:
7446 __ movq(rax, rdx); 7555 __ movq(rax, rdx);
7447 // Fall through! 7556 // Fall through!
7448 case OVERWRITE_RIGHT: 7557 case OVERWRITE_RIGHT:
7449 // If the argument in rax is already an object, we skip the 7558 // If the argument in rax is already an object, we skip the
7450 // allocation of a heap number. 7559 // allocation of a heap number.
7451 __ JumpIfNotSmi(rax, &skip_allocation); 7560 __ JumpIfNotSmi(rax, &skip_allocation);
7452 // Fall through! 7561 // Fall through!
7453 case NO_OVERWRITE: 7562 case NO_OVERWRITE:
7454 __ AllocateHeapNumber(rax, rcx, &call_runtime); 7563 // Allocate a heap number for the result. Keep rax and rdx intact
7564 // for the possible runtime call.
7565 __ AllocateHeapNumber(rbx, rcx, &call_runtime);
7566 __ movq(rax, rbx);
7455 __ bind(&skip_allocation); 7567 __ bind(&skip_allocation);
7456 break; 7568 break;
7457 default: UNREACHABLE(); 7569 default: UNREACHABLE();
7458 } 7570 }
7459 // xmm4 and xmm5 are volatile XMM registers. 7571 // xmm4 and xmm5 are volatile XMM registers.
7460 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5); 7572 FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
7461 7573
7462 switch (op_) { 7574 switch (op_) {
7463 case Token::ADD: __ addsd(xmm4, xmm5); break; 7575 case Token::ADD: __ addsd(xmm4, xmm5); break;
7464 case Token::SUB: __ subsd(xmm4, xmm5); break; 7576 case Token::SUB: __ subsd(xmm4, xmm5); break;
7465 case Token::MUL: __ mulsd(xmm4, xmm5); break; 7577 case Token::MUL: __ mulsd(xmm4, xmm5); break;
7466 case Token::DIV: __ divsd(xmm4, xmm5); break; 7578 case Token::DIV: __ divsd(xmm4, xmm5); break;
7467 default: UNREACHABLE(); 7579 default: UNREACHABLE();
7468 } 7580 }
7469 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); 7581 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
7470 __ ret(2 * kPointerSize); 7582 GenerateReturn(masm);
7471 } 7583 }
7472 case Token::MOD: { 7584 case Token::MOD: {
7473 // For MOD we go directly to runtime in the non-smi case. 7585 // For MOD we go directly to runtime in the non-smi case.
7474 break; 7586 break;
7475 } 7587 }
7476 case Token::BIT_OR: 7588 case Token::BIT_OR:
7477 case Token::BIT_AND: 7589 case Token::BIT_AND:
7478 case Token::BIT_XOR: 7590 case Token::BIT_XOR:
7479 case Token::SAR: 7591 case Token::SAR:
7480 case Token::SHL: 7592 case Token::SHL:
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
7528 } 7640 }
7529 if (op_ == Token::SHR) { 7641 if (op_ == Token::SHR) {
7530 // Check if result is non-negative. This can only happen for a shift 7642 // Check if result is non-negative. This can only happen for a shift
7531 // by zero, which also doesn't update the sign flag. 7643 // by zero, which also doesn't update the sign flag.
7532 __ testl(rax, rax); 7644 __ testl(rax, rax);
7533 __ j(negative, &non_smi_result); 7645 __ j(negative, &non_smi_result);
7534 } 7646 }
7535 __ JumpIfNotValidSmiValue(rax, &non_smi_result); 7647 __ JumpIfNotValidSmiValue(rax, &non_smi_result);
7536 // Tag smi result, if possible, and return. 7648 // Tag smi result, if possible, and return.
7537 __ Integer32ToSmi(rax, rax); 7649 __ Integer32ToSmi(rax, rax);
7538 __ ret(2 * kPointerSize); 7650 GenerateReturn(masm);
7539 7651
7540 // All ops except SHR return a signed int32 that we load in a HeapNumber. 7652 // All ops except SHR return a signed int32 that we load in a HeapNumber.
7541 if (op_ != Token::SHR && non_smi_result.is_linked()) { 7653 if (op_ != Token::SHR && non_smi_result.is_linked()) {
7542 __ bind(&non_smi_result); 7654 __ bind(&non_smi_result);
7543 // Allocate a heap number if needed. 7655 // Allocate a heap number if needed.
7544 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result 7656 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
7545 switch (mode_) { 7657 switch (mode_) {
7546 case OVERWRITE_LEFT: 7658 case OVERWRITE_LEFT:
7547 case OVERWRITE_RIGHT: 7659 case OVERWRITE_RIGHT:
7548 // If the operand was an object, we skip the 7660 // If the operand was an object, we skip the
7549 // allocation of a heap number. 7661 // allocation of a heap number.
7550 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? 7662 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7551 1 * kPointerSize : 2 * kPointerSize)); 7663 1 * kPointerSize : 2 * kPointerSize));
7552 __ JumpIfNotSmi(rax, &skip_allocation); 7664 __ JumpIfNotSmi(rax, &skip_allocation);
7553 // Fall through! 7665 // Fall through!
7554 case NO_OVERWRITE: 7666 case NO_OVERWRITE:
7555 __ AllocateHeapNumber(rax, rcx, &call_runtime); 7667 __ AllocateHeapNumber(rax, rcx, &call_runtime);
7556 __ bind(&skip_allocation); 7668 __ bind(&skip_allocation);
7557 break; 7669 break;
7558 default: UNREACHABLE(); 7670 default: UNREACHABLE();
7559 } 7671 }
7560 // Store the result in the HeapNumber and return. 7672 // Store the result in the HeapNumber and return.
7561 __ movq(Operand(rsp, 1 * kPointerSize), rbx); 7673 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
7562 __ fild_s(Operand(rsp, 1 * kPointerSize)); 7674 __ fild_s(Operand(rsp, 1 * kPointerSize));
7563 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); 7675 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
7564 __ ret(2 * kPointerSize); 7676 GenerateReturn(masm);
7565 } 7677 }
7566 7678
7567 // Clear the FPU exception flag and reset the stack before calling 7679 // Clear the FPU exception flag and reset the stack before calling
7568 // the runtime system. 7680 // the runtime system.
7569 __ bind(&operand_conversion_failure); 7681 __ bind(&operand_conversion_failure);
7570 __ addq(rsp, Immediate(2 * kPointerSize)); 7682 __ addq(rsp, Immediate(2 * kPointerSize));
7571 if (use_sse3_) { 7683 if (use_sse3_) {
7572 // If we've used the SSE3 instructions for truncating the 7684 // If we've used the SSE3 instructions for truncating the
7573 // floating point values to integers and it failed, we have a 7685 // floating point values to integers and it failed, we have a
7574 // pending #IA exception. Clear it. 7686 // pending #IA exception. Clear it.
(...skipping 10 matching lines...) Expand all
7585 __ bind(&non_smi_result); 7697 __ bind(&non_smi_result);
7586 } 7698 }
7587 __ movq(rax, Operand(rsp, 1 * kPointerSize)); 7699 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7588 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); 7700 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7589 break; 7701 break;
7590 } 7702 }
7591 default: UNREACHABLE(); break; 7703 default: UNREACHABLE(); break;
7592 } 7704 }
7593 7705
7594 // If all else fails, use the runtime system to get the correct 7706 // If all else fails, use the runtime system to get the correct
7595 // result. 7707 // result. If arguments was passed in registers now place them on the
7708 // stack in the correct order below the return address.
7596 __ bind(&call_runtime); 7709 __ bind(&call_runtime);
7710 if (HasArgumentsInRegisters()) {
7711 __ pop(rcx);
7712 if (HasArgumentsReversed()) {
7713 __ push(rax);
7714 __ push(rdx);
7715 } else {
7716 __ push(rdx);
7717 __ push(rax);
7718 }
7719 __ push(rcx);
7720 }
7597 switch (op_) { 7721 switch (op_) {
7598 case Token::ADD: 7722 case Token::ADD:
7599 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 7723 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
7600 break; 7724 break;
7601 case Token::SUB: 7725 case Token::SUB:
7602 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 7726 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
7603 break; 7727 break;
7604 case Token::MUL: 7728 case Token::MUL:
7605 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 7729 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
7606 break; 7730 break;
(...skipping 20 matching lines...) Expand all
7627 break; 7751 break;
7628 case Token::SHR: 7752 case Token::SHR:
7629 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 7753 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
7630 break; 7754 break;
7631 default: 7755 default:
7632 UNREACHABLE(); 7756 UNREACHABLE();
7633 } 7757 }
7634 } 7758 }
7635 7759
7636 7760
7761 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
7762 // If arguments are not passed in registers read them from the stack.
7763 if (!HasArgumentsInRegisters()) {
7764 __ movq(rax, Operand(rsp, 1 * kPointerSize));
7765 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7766 }
7767 }
7768
7769
7770 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
7771 // If arguments are not passed in registers remove them from the stack before
7772 // returning.
7773 if (!HasArgumentsInRegisters()) {
7774 __ ret(2 * kPointerSize); // Remove both operands
7775 } else {
7776 __ ret(0);
7777 }
7778 }
7779
7780
7637 int CompareStub::MinorKey() { 7781 int CompareStub::MinorKey() {
7638 // Encode the two parameters in a unique 16 bit value. 7782 // Encode the two parameters in a unique 16 bit value.
7639 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); 7783 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7640 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); 7784 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7641 } 7785 }
7642 7786
7643 #undef __ 7787 #undef __
7644 7788
7645 #define __ masm. 7789 #define __ masm.
7646 7790
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
7729 masm.GetCode(&desc); 7873 masm.GetCode(&desc);
7730 // Call the function from C++. 7874 // Call the function from C++.
7731 return FUNCTION_CAST<ModuloFunction>(buffer); 7875 return FUNCTION_CAST<ModuloFunction>(buffer);
7732 } 7876 }
7733 7877
7734 #endif 7878 #endif
7735 7879
7736 #undef __ 7880 #undef __
7737 7881
7738 } } // namespace v8::internal 7882 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698