Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(82)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 155073: X64: Fix LoadFloatOperands. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 11 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4368 matching lines...) Expand 10 before | Expand all | Expand 10 after
4379 } else { 4379 } else {
4380 // Here we split control flow to the stub call and inlined cases 4380 // Here we split control flow to the stub call and inlined cases
4381 // before finally splitting it to the control destination. We use 4381 // before finally splitting it to the control destination. We use
4382 // a jump target and branching to duplicate the virtual frame at 4382 // a jump target and branching to duplicate the virtual frame at
4383 // the first split. We manually handle the off-frame references 4383 // the first split. We manually handle the off-frame references
4384 // by reconstituting them on the non-fall-through path. 4384 // by reconstituting them on the non-fall-through path.
4385 JumpTarget is_smi; 4385 JumpTarget is_smi;
4386 Register left_reg = left_side.reg(); 4386 Register left_reg = left_side.reg();
4387 Register right_reg = right_side.reg(); 4387 Register right_reg = right_side.reg();
4388 4388
4389 __ movq(kScratchRegister, left_side.reg()); 4389 __ movq(kScratchRegister, left_reg);
4390 __ or_(kScratchRegister, right_side.reg()); 4390 __ or_(kScratchRegister, right_reg);
4391 __ testl(kScratchRegister, Immediate(kSmiTagMask)); 4391 __ testl(kScratchRegister, Immediate(kSmiTagMask));
4392 is_smi.Branch(zero, taken); 4392 is_smi.Branch(zero, taken);
4393 // When non-smi, call out to the compare stub. 4393 // When non-smi, call out to the compare stub.
4394 CompareStub stub(cc, strict); 4394 CompareStub stub(cc, strict);
4395 Result answer = frame_->CallStub(&stub, &left_side, &right_side); 4395 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
4396 if (cc == equal) { 4396 if (cc == equal) {
4397 __ testq(answer.reg(), answer.reg()); 4397 __ testq(answer.reg(), answer.reg());
4398 } else { 4398 } else {
4399 __ cmpq(answer.reg(), Immediate(0)); 4399 __ cmpq(answer.reg(), Immediate(0));
4400 } 4400 }
(...skipping 2085 matching lines...) Expand 10 before | Expand all | Expand 10 after
6486 __ bind(&done_load_lhs); 6486 __ bind(&done_load_lhs);
6487 6487
6488 __ testl(rhs, Immediate(kSmiTagMask)); 6488 __ testl(rhs, Immediate(kSmiTagMask));
6489 __ j(zero, &load_smi_rhs); 6489 __ j(zero, &load_smi_rhs);
6490 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); 6490 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
6491 __ jmp(&done); 6491 __ jmp(&done);
6492 6492
6493 __ bind(&load_smi_lhs); 6493 __ bind(&load_smi_lhs);
6494 ASSERT(kSmiTagSize == 1); 6494 ASSERT(kSmiTagSize == 1);
6495 ASSERT(kSmiTag == 0); 6495 ASSERT(kSmiTag == 0);
6496 __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0)); 6496 __ movsxlq(kScratchRegister, lhs);
6497 __ sar(kScratchRegister, Immediate(kSmiTagSize));
6497 __ push(kScratchRegister); 6498 __ push(kScratchRegister);
6498 __ fild_s(Operand(rsp, 0)); 6499 __ fild_d(Operand(rsp, 0));
6499 __ pop(kScratchRegister); 6500 __ pop(kScratchRegister);
6500 __ jmp(&done_load_lhs); 6501 __ jmp(&done_load_lhs);
6501 6502
6502 __ bind(&load_smi_rhs); 6503 __ bind(&load_smi_rhs);
6503 __ movq(kScratchRegister, rhs); 6504 __ movsxlq(kScratchRegister, rhs);
6504 __ sar(kScratchRegister, Immediate(kSmiTagSize)); 6505 __ sar(kScratchRegister, Immediate(kSmiTagSize));
6505 __ push(kScratchRegister); 6506 __ push(kScratchRegister);
6506 __ fild_s(Operand(rsp, 0)); 6507 __ fild_d(Operand(rsp, 0));
6507 __ pop(kScratchRegister); 6508 __ pop(kScratchRegister);
6508 6509
6509 __ bind(&done); 6510 __ bind(&done);
6510 } 6511 }
6511 6512
6512 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 6513 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
6513 Label* non_float) { 6514 Label* non_float) {
6514 Label test_other, done; 6515 Label test_other, done;
6515 // Test if both operands are floats or smi -> scratch=k_is_float; 6516 // Test if both operands are numbers (heap_numbers or smis).
6516 // Otherwise scratch = k_not_float. 6517 // If not, jump to label non_float.
6517 __ testl(rdx, Immediate(kSmiTagMask)); 6518 __ testl(rdx, Immediate(kSmiTagMask));
6518 __ j(zero, &test_other); // argument in rdx is OK 6519 __ j(zero, &test_other); // argument in rdx is OK
6519 __ movq(kScratchRegister, 6520 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),Factory::heap_number_map());
6520 Factory::heap_number_map(), 6521 __ j(not_equal, non_float); // The argument in rdx is not a number.
6521 RelocInfo::EMBEDDED_OBJECT);
6522 __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
6523 __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
6524 6522
6525 __ bind(&test_other); 6523 __ bind(&test_other);
6526 __ testl(rax, Immediate(kSmiTagMask)); 6524 __ testl(rax, Immediate(kSmiTagMask));
6527 __ j(zero, &done); // argument in rax is OK 6525 __ j(zero, &done); // argument in rax is OK
6528 __ movq(kScratchRegister, 6526 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset),Factory::heap_number_map());
6529 Factory::heap_number_map(), 6527 __ j(not_equal, non_float); // The argument in rax is not a number.
6530 RelocInfo::EMBEDDED_OBJECT);
6531 __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
6532 __ j(not_equal, non_float); // argument in rax is not a number -> NaN
6533 6528
6534 // Fall-through: Both operands are numbers. 6529 // Fall-through: Both operands are numbers.
6535 __ bind(&done); 6530 __ bind(&done);
6536 } 6531 }
6537 6532
6538 6533
6539 const char* GenericBinaryOpStub::GetName() { 6534 const char* GenericBinaryOpStub::GetName() {
6540 switch (op_) { 6535 switch (op_) {
6541 case Token::ADD: return "GenericBinaryOpStub_ADD"; 6536 case Token::ADD: return "GenericBinaryOpStub_ADD";
6542 case Token::SUB: return "GenericBinaryOpStub_SUB"; 6537 case Token::SUB: return "GenericBinaryOpStub_SUB";
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
6785 if (use_sse3) { 6780 if (use_sse3) {
6786 // Truncate the operands to 32-bit integers and check for 6781 // Truncate the operands to 32-bit integers and check for
6787 // exceptions in doing so. 6782 // exceptions in doing so.
6788 CpuFeatures::Scope scope(CpuFeatures::SSE3); 6783 CpuFeatures::Scope scope(CpuFeatures::SSE3);
6789 __ fisttp_s(Operand(rsp, 0 * kPointerSize)); 6784 __ fisttp_s(Operand(rsp, 0 * kPointerSize));
6790 __ fisttp_s(Operand(rsp, 1 * kPointerSize)); 6785 __ fisttp_s(Operand(rsp, 1 * kPointerSize));
6791 __ fnstsw_ax(); 6786 __ fnstsw_ax();
6792 __ testl(rax, Immediate(1)); 6787 __ testl(rax, Immediate(1));
6793 __ j(not_zero, &operand_conversion_failure); 6788 __ j(not_zero, &operand_conversion_failure);
6794 } else { 6789 } else {
6790 // TODO(X64): Verify that SSE3 is always supported, drop this code.
6795 // Check if right operand is int32. 6791 // Check if right operand is int32.
6796 __ fist_s(Operand(rsp, 0 * kPointerSize)); 6792 __ fist_s(Operand(rsp, 0 * kPointerSize));
6797 __ fild_s(Operand(rsp, 0 * kPointerSize)); 6793 __ fild_s(Operand(rsp, 0 * kPointerSize));
6798 __ fucompp(); 6794 __ fucompp();
6799 __ fnstsw_ax(); 6795 __ fnstsw_ax();
6800 __ sahf(); // TODO(X64): Not available. 6796 __ sahf(); // TODO(X64): Not available.
6801 __ j(not_zero, &operand_conversion_failure); 6797 __ j(not_zero, &operand_conversion_failure);
6802 __ j(parity_even, &operand_conversion_failure); 6798 __ j(parity_even, &operand_conversion_failure);
6803 6799
6804 // Check if left operand is int32. 6800 // Check if left operand is int32.
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
6939 int CompareStub::MinorKey() { 6935 int CompareStub::MinorKey() {
6940 // Encode the two parameters in a unique 16 bit value. 6936 // Encode the two parameters in a unique 16 bit value.
6941 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); 6937 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
6942 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); 6938 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
6943 } 6939 }
6944 6940
6945 6941
6946 #undef __ 6942 #undef __
6947 6943
6948 } } // namespace v8::internal 6944 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698