Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(488)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 1687014: Port improved ia32 CompareStub to x64. Add framework for inlined floating po... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
195 public: 195 public:
196 // Code pattern for loading a floating point value. Input value must 196 // Code pattern for loading a floating point value. Input value must
197 // be either a smi or a heap number object (fp value). Requirements: 197 // be either a smi or a heap number object (fp value). Requirements:
198 // operand on TOS+1. Returns operand as floating point number on FPU 198 // operand on TOS+1. Returns operand as floating point number on FPU
199 // stack. 199 // stack.
200 static void LoadFloatOperand(MacroAssembler* masm, Register scratch); 200 static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
201 201
202 // Code pattern for loading a floating point value. Input value must 202 // Code pattern for loading a floating point value. Input value must
203 // be either a smi or a heap number object (fp value). Requirements: 203 // be either a smi or a heap number object (fp value). Requirements:
204 // operand in src register. Returns operand as floating point number 204 // operand in src register. Returns operand as floating point number
205 // in XMM register 205 // in XMM register. May destroy src register.
206 static void LoadFloatOperand(MacroAssembler* masm, 206 static void LoadFloatOperand(MacroAssembler* masm,
207 Register src, 207 Register src,
208 XMMRegister dst); 208 XMMRegister dst);
209 209
210 // Code pattern for loading a possible number into a XMM register.
211 // If the contents of src is not a number, control branches to
212 // the Label not_number. If contents of src is a smi or a heap number
213 // object (fp value), it is loaded into the XMM register as a double.
214 // The register src is not changed, and src may not be kScratchRegister.
215 static void LoadFloatOperand(MacroAssembler* masm,
216 Register src,
217 XMMRegister dst,
218 Label *not_number);
219
210 // Code pattern for loading floating point values. Input values must 220 // Code pattern for loading floating point values. Input values must
211 // be either smi or heap number objects (fp values). Requirements: 221 // be either smi or heap number objects (fp values). Requirements:
212 // operand_1 in rdx, operand_2 in rax; Returns operands as 222 // operand_1 in rdx, operand_2 in rax; Returns operands as
213 // floating point numbers in XMM registers. 223 // floating point numbers in XMM registers.
214 static void LoadFloatOperands(MacroAssembler* masm, 224 static void LoadFloatOperands(MacroAssembler* masm,
215 XMMRegister dst1, 225 XMMRegister dst1,
216 XMMRegister dst2); 226 XMMRegister dst2);
217 227
218 // Similar to LoadFloatOperands, assumes that the operands are smis. 228 // Similar to LoadFloatOperands, assumes that the operands are smis.
219 static void LoadFloatOperandsFromSmis(MacroAssembler* masm, 229 static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
(...skipping 5093 matching lines...) Expand 10 before | Expand all | Expand 10 after
5313 5323
5314 static bool CouldBeNaN(const Result& result) { 5324 static bool CouldBeNaN(const Result& result) {
5315 if (result.type_info().IsSmi()) return false; 5325 if (result.type_info().IsSmi()) return false;
5316 if (result.type_info().IsInteger32()) return false; 5326 if (result.type_info().IsInteger32()) return false;
5317 if (!result.is_constant()) return true; 5327 if (!result.is_constant()) return true;
5318 if (!result.handle()->IsHeapNumber()) return false; 5328 if (!result.handle()->IsHeapNumber()) return false;
5319 return isnan(HeapNumber::cast(*result.handle())->value()); 5329 return isnan(HeapNumber::cast(*result.handle())->value());
5320 } 5330 }
5321 5331
5322 5332
5333 // Convert from signed to unsigned comparison to match the way EFLAGS are set
5334 // by FPU and XMM compare instructions.
5335 static Condition DoubleCondition(Condition cc) {
5336 switch (cc) {
5337 case less: return below;
5338 case equal: return equal;
5339 case less_equal: return below_equal;
5340 case greater: return above;
5341 case greater_equal: return above_equal;
5342 default: UNREACHABLE();
5343 }
5344 UNREACHABLE();
5345 return equal;
5346 }
5347
5348
5323 void CodeGenerator::Comparison(AstNode* node, 5349 void CodeGenerator::Comparison(AstNode* node,
5324 Condition cc, 5350 Condition cc,
5325 bool strict, 5351 bool strict,
5326 ControlDestination* dest) { 5352 ControlDestination* dest) {
5327 // Strict only makes sense for equality comparisons. 5353 // Strict only makes sense for equality comparisons.
5328 ASSERT(!strict || cc == equal); 5354 ASSERT(!strict || cc == equal);
5329 5355
5330 Result left_side; 5356 Result left_side;
5331 Result right_side; 5357 Result right_side;
5332 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. 5358 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
5384 } 5410 }
5385 } else { 5411 } else {
5386 // Only one side is a constant Smi. 5412 // Only one side is a constant Smi.
5387 // If left side is a constant Smi, reverse the operands. 5413 // If left side is a constant Smi, reverse the operands.
5388 // Since one side is a constant Smi, conversion order does not matter. 5414 // Since one side is a constant Smi, conversion order does not matter.
5389 if (left_side_constant_smi) { 5415 if (left_side_constant_smi) {
5390 Result temp = left_side; 5416 Result temp = left_side;
5391 left_side = right_side; 5417 left_side = right_side;
5392 right_side = temp; 5418 right_side = temp;
5393 cc = ReverseCondition(cc); 5419 cc = ReverseCondition(cc);
5394 // This may reintroduce greater or less_equal as the value of cc. 5420 // This may re-introduce greater or less_equal as the value of cc.
5395 // CompareStub and the inline code both support all values of cc. 5421 // CompareStub and the inline code both support all values of cc.
5396 } 5422 }
5397 // Implement comparison against a constant Smi, inlining the case 5423 // Implement comparison against a constant Smi, inlining the case
5398 // where both sides are Smis. 5424 // where both sides are Smis.
5399 left_side.ToRegister(); 5425 left_side.ToRegister();
5400 Register left_reg = left_side.reg(); 5426 Register left_reg = left_side.reg();
5401 Handle<Object> right_val = right_side.handle(); 5427 Handle<Object> right_val = right_side.handle();
5402 5428
5403 // Here we split control flow to the stub call and inlined cases 5429 // Here we split control flow to the stub call and inlined cases
5404 // before finally splitting it to the control destination. We use 5430 // before finally splitting it to the control destination. We use
(...skipping 22 matching lines...) Expand all
5427 } else { 5453 } else {
5428 Result temp = allocator()->Allocate(); 5454 Result temp = allocator()->Allocate();
5429 __ movl(temp.reg(), Immediate(value)); 5455 __ movl(temp.reg(), Immediate(value));
5430 __ cvtlsi2sd(xmm0, temp.reg()); 5456 __ cvtlsi2sd(xmm0, temp.reg());
5431 temp.Unuse(); 5457 temp.Unuse();
5432 } 5458 }
5433 __ ucomisd(xmm1, xmm0); 5459 __ ucomisd(xmm1, xmm0);
5434 // Jump to builtin for NaN. 5460 // Jump to builtin for NaN.
5435 not_number.Branch(parity_even, &left_side); 5461 not_number.Branch(parity_even, &left_side);
5436 left_side.Unuse(); 5462 left_side.Unuse();
5437 Condition double_cc = cc; 5463 dest->true_target()->Branch(DoubleCondition(cc));
5438 switch (cc) {
5439 case less: double_cc = below; break;
5440 case equal: double_cc = equal; break;
5441 case less_equal: double_cc = below_equal; break;
5442 case greater: double_cc = above; break;
5443 case greater_equal: double_cc = above_equal; break;
5444 default: UNREACHABLE();
5445 }
5446 dest->true_target()->Branch(double_cc);
5447 dest->false_target()->Jump(); 5464 dest->false_target()->Jump();
5448 not_number.Bind(&left_side); 5465 not_number.Bind(&left_side);
5449 } 5466 }
5450 5467
5451 // Setup and call the compare stub. 5468 // Setup and call the compare stub.
5452 CompareStub stub(cc, strict); 5469 CompareStub stub(cc, strict, kCantBothBeNaN);
5453 Result result = frame_->CallStub(&stub, &left_side, &right_side); 5470 Result result = frame_->CallStub(&stub, &left_side, &right_side);
5454 result.ToRegister(); 5471 result.ToRegister();
5455 __ testq(result.reg(), result.reg()); 5472 __ testq(result.reg(), result.reg());
5456 result.Unuse(); 5473 result.Unuse();
5457 dest->true_target()->Branch(cc); 5474 dest->true_target()->Branch(cc);
5458 dest->false_target()->Jump(); 5475 dest->false_target()->Jump();
5459 5476
5460 is_smi.Bind(); 5477 is_smi.Bind();
5461 left_side = Result(left_reg); 5478 left_side = Result(left_reg);
5462 right_side = Result(right_val); 5479 right_side = Result(right_val);
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
5635 temp2.Unuse(); 5652 temp2.Unuse();
5636 left_side.Unuse(); 5653 left_side.Unuse();
5637 right_side.Unuse(); 5654 right_side.Unuse();
5638 dest->Split(cc); 5655 dest->Split(cc);
5639 } 5656 }
5640 } else { 5657 } else {
5641 // Neither side is a constant Smi, constant 1-char string, or constant null. 5658 // Neither side is a constant Smi, constant 1-char string, or constant null.
5642 // If either side is a non-smi constant, skip the smi check. 5659 // If either side is a non-smi constant, skip the smi check.
5643 bool known_non_smi = 5660 bool known_non_smi =
5644 (left_side.is_constant() && !left_side.handle()->IsSmi()) || 5661 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
5645 (right_side.is_constant() && !right_side.handle()->IsSmi()); 5662 (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
5663 left_side.type_info().IsDouble() ||
5664 right_side.type_info().IsDouble();
5646 5665
5647 NaNInformation nan_info = 5666 NaNInformation nan_info =
5648 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? 5667 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
5649 kBothCouldBeNaN : 5668 kBothCouldBeNaN :
5650 kCantBothBeNaN; 5669 kCantBothBeNaN;
5651 5670
5671 // Inline number comparison handling any combination of smi's and heap
5672 // numbers if:
5673 // code is in a loop
5674 // the compare operation is different from equal
5675 // compare is not a for-loop comparison
5676 // The reason for excluding equal is that it will most likely be done
5677 // with smi's (not heap numbers) and the code to comparing smi's is inlined
5678 // separately. The same reason applies for for-loop comparison which will
5679 // also most likely be smi comparisons.
5680 bool is_loop_condition = (node->AsExpression() != NULL)
5681 && node->AsExpression()->is_loop_condition();
5682 bool inline_number_compare =
5683 loop_nesting() > 0 && cc != equal && !is_loop_condition;
5684
5652 left_side.ToRegister(); 5685 left_side.ToRegister();
5653 right_side.ToRegister(); 5686 right_side.ToRegister();
5654 5687
5655 if (known_non_smi) { 5688 if (known_non_smi) {
5689 // Inlined equality check:
5656 // If at least one of the objects is not NaN, then if the objects 5690 // If at least one of the objects is not NaN, then if the objects
5657 // are identical, they are equal. 5691 // are identical, they are equal.
5658 if (nan_info == kCantBothBeNaN && cc == equal) { 5692 if (nan_info == kCantBothBeNaN && cc == equal) {
5659 __ cmpq(left_side.reg(), right_side.reg()); 5693 __ cmpq(left_side.reg(), right_side.reg());
5660 dest->true_target()->Branch(equal); 5694 dest->true_target()->Branch(equal);
5661 } 5695 }
5662 5696
5663 // When non-smi, call out to the compare stub. 5697 // Inlined number comparison:
5664 CompareStub stub(cc, strict); 5698 if (inline_number_compare) {
5699 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
5700 }
5701
5702 // Call the compare stub.
5703 // TODO(whesse@chromium.org): Enable the inlining flag once
5704 // GenerateInlineNumberComparison is implemented.
5705 CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
5665 Result answer = frame_->CallStub(&stub, &left_side, &right_side); 5706 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5666 // The result is a Smi, which is negative, zero, or positive. 5707 // The result is a Smi, which is negative, zero, or positive.
5667 __ SmiTest(answer.reg()); // Sets both zero and sign flag. 5708 __ SmiTest(answer.reg()); // Sets both zero and sign flag.
5668 answer.Unuse(); 5709 answer.Unuse();
5669 dest->Split(cc); 5710 dest->Split(cc);
5670 } else { 5711 } else {
5671 // Here we split control flow to the stub call and inlined cases 5712 // Here we split control flow to the stub call and inlined cases
5672 // before finally splitting it to the control destination. We use 5713 // before finally splitting it to the control destination. We use
5673 // a jump target and branching to duplicate the virtual frame at 5714 // a jump target and branching to duplicate the virtual frame at
5674 // the first split. We manually handle the off-frame references 5715 // the first split. We manually handle the off-frame references
5675 // by reconstituting them on the non-fall-through path. 5716 // by reconstituting them on the non-fall-through path.
5676 JumpTarget is_smi; 5717 JumpTarget is_smi;
5677 Register left_reg = left_side.reg(); 5718 Register left_reg = left_side.reg();
5678 Register right_reg = right_side.reg(); 5719 Register right_reg = right_side.reg();
5679 5720
5680 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg); 5721 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5681 is_smi.Branch(both_smi); 5722 is_smi.Branch(both_smi);
5682 // When non-smi, call out to the compare stub, after inlined checks. 5723
5683 // If at least one of the objects is not NaN, then if the objects 5724 // Inline the equality check if both operands can't be a NaN. If both
5684 // are identical, they are equal. 5725 // objects are the same they are equal.
5685 if (nan_info == kCantBothBeNaN && cc == equal) { 5726 if (nan_info == kCantBothBeNaN && cc == equal) {
5686 __ cmpq(left_side.reg(), right_side.reg()); 5727 __ cmpq(left_side.reg(), right_side.reg());
5687 dest->true_target()->Branch(equal); 5728 dest->true_target()->Branch(equal);
5688 } 5729 }
5689 5730
5690 CompareStub stub(cc, strict); 5731 // Inlined number comparison:
5732 if (inline_number_compare) {
5733 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
5734 }
5735
5736 // Call the compare stub.
5737 // TODO(whesse@chromium.org): Enable the inlining flag once
5738 // GenerateInlineNumberComparison is implemented.
5739 CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
5691 Result answer = frame_->CallStub(&stub, &left_side, &right_side); 5740 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5692 __ SmiTest(answer.reg()); // Sets both zero and sign flags. 5741 __ SmiTest(answer.reg()); // Sets both zero and sign flags.
5693 answer.Unuse(); 5742 answer.Unuse();
5694 dest->true_target()->Branch(cc); 5743 dest->true_target()->Branch(cc);
5695 dest->false_target()->Jump(); 5744 dest->false_target()->Jump();
5696 5745
5697 is_smi.Bind(); 5746 is_smi.Bind();
5698 left_side = Result(left_reg); 5747 left_side = Result(left_reg);
5699 right_side = Result(right_reg); 5748 right_side = Result(right_reg);
5700 __ SmiCompare(left_side.reg(), right_side.reg()); 5749 __ SmiCompare(left_side.reg(), right_side.reg());
5701 right_side.Unuse(); 5750 right_side.Unuse();
5702 left_side.Unuse(); 5751 left_side.Unuse();
5703 dest->Split(cc); 5752 dest->Split(cc);
5704 } 5753 }
5705 } 5754 }
5706 } 5755 }
5707 5756
5708 5757
5758 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
5759 Result* right_side,
5760 Condition cc,
5761 ControlDestination* dest) {
5762 ASSERT(left_side->is_register());
5763 ASSERT(right_side->is_register());
5764 // TODO(whesse@chromium.org): Implement this function, and enable the
5765 // corresponding flags in the CompareStub.
5766 }
5767
5768
5709 class DeferredInlineBinaryOperation: public DeferredCode { 5769 class DeferredInlineBinaryOperation: public DeferredCode {
5710 public: 5770 public:
5711 DeferredInlineBinaryOperation(Token::Value op, 5771 DeferredInlineBinaryOperation(Token::Value op,
5712 Register dst, 5772 Register dst,
5713 Register left, 5773 Register left,
5714 Register right, 5774 Register right,
5715 OverwriteMode mode) 5775 OverwriteMode mode)
5716 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { 5776 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5717 set_comment("[ DeferredInlineBinaryOperation"); 5777 set_comment("[ DeferredInlineBinaryOperation");
5718 } 5778 }
(...skipping 2055 matching lines...) Expand 10 before | Expand all | Expand 10 after
7774 // Generate code to lookup number in the number string cache. 7834 // Generate code to lookup number in the number string cache.
7775 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); 7835 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
7776 __ ret(1 * kPointerSize); 7836 __ ret(1 * kPointerSize);
7777 7837
7778 __ bind(&runtime); 7838 __ bind(&runtime);
7779 // Handle number to string in the runtime system if not found in the cache. 7839 // Handle number to string in the runtime system if not found in the cache.
7780 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); 7840 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
7781 } 7841 }
7782 7842
7783 7843
7844 static int NegativeComparisonResult(Condition cc) {
7845 ASSERT(cc != equal);
7846 ASSERT((cc == less) || (cc == less_equal)
7847 || (cc == greater) || (cc == greater_equal));
7848 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
7849 }
7850
7784 void CompareStub::Generate(MacroAssembler* masm) { 7851 void CompareStub::Generate(MacroAssembler* masm) {
7785 Label call_builtin, done; 7852 Label call_builtin, done;
7786 7853
7787 // NOTICE! This code is only reached after a smi-fast-case check, so 7854 // NOTICE! This code is only reached after a smi-fast-case check, so
7788 // it is certain that at least one operand isn't a smi. 7855 // it is certain that at least one operand isn't a smi.
7789 7856
7857 // Identical objects can be compared fast, but there are some tricky cases
7858 // for NaN and undefined.
7859 {
7860 Label not_identical;
7861 __ cmpq(rax, rdx);
7862 __ j(not_equal, &not_identical);
7863
7864 if (cc_ != equal) {
7865 // Check for undefined. undefined OP undefined is false even though
7866 // undefined == undefined.
7867 Label check_for_nan;
7868 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
7869 __ j(not_equal, &check_for_nan);
7870 __ Set(rax, NegativeComparisonResult(cc_));
7871 __ ret(0);
7872 __ bind(&check_for_nan);
7873 }
7874
7875 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7876 // so we do the second best thing - test it ourselves.
7877 // Note: if cc_ != equal, never_nan_nan_ is not used.
7878 if (never_nan_nan_ && (cc_ == equal)) {
7879 __ Set(rax, EQUAL);
7880 __ ret(0);
7881 } else {
7882 Label return_equal;
7883 Label heap_number;
7884 // If it's not a heap number, then return equal.
7885 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
7886 Factory::heap_number_map());
7887 __ j(equal, &heap_number);
7888 __ bind(&return_equal);
7889 __ Set(rax, EQUAL);
7890 __ ret(0);
7891
7892 __ bind(&heap_number);
7893 // It is a heap number, so return non-equal if it's NaN and equal if
7894 // it's not NaN.
7895 // The representation of NaN values has all exponent bits (52..62) set,
7896 // and not all mantissa bits (0..51) clear.
7897 // We only allow QNaNs, which have bit 51 set (which also rules out
7898 // the value being Infinity).
7899
7900 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
7901 // all bits in the mask are set. We only need to check the word
7902 // that contains the exponent and high bit of the mantissa.
7903 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
7904 __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
7905 __ xorl(rax, rax);
7906 __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
7907 __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
7908 if (cc_ == equal) {
7909 __ setcc(above_equal, rax);
7910 __ ret(0);
7911 } else {
7912 Label nan;
7913 __ j(above_equal, &nan);
7914 __ Set(rax, EQUAL);
7915 __ ret(0);
7916 __ bind(&nan);
7917 __ Set(rax, NegativeComparisonResult(cc_));
7918 __ ret(0);
7919 }
7920 }
7921
7922 __ bind(&not_identical);
7923 }
7924
7790 if (cc_ == equal) { // Both strict and non-strict. 7925 if (cc_ == equal) { // Both strict and non-strict.
7791 Label slow; // Fallthrough label. 7926 Label slow; // Fallthrough label.
7792 // Equality is almost reflexive (everything but NaN), so start by testing
7793 // for "identity and not NaN".
7794 {
7795 Label not_identical;
7796 __ cmpq(rax, rdx);
7797 __ j(not_equal, &not_identical);
7798 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7799 // so we do the second best thing - test it ourselves.
7800
7801 if (never_nan_nan_) {
7802 __ xor_(rax, rax);
7803 __ ret(0);
7804 } else {
7805 Label return_equal;
7806 Label heap_number;
7807 // If it's not a heap number, then return equal.
7808 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
7809 Factory::heap_number_map());
7810 __ j(equal, &heap_number);
7811 __ bind(&return_equal);
7812 __ xor_(rax, rax);
7813 __ ret(0);
7814
7815 __ bind(&heap_number);
7816 // It is a heap number, so return non-equal if it's NaN and equal if
7817 // it's not NaN.
7818 // The representation of NaN values has all exponent bits (52..62) set,
7819 // and not all mantissa bits (0..51) clear.
7820 // We only allow QNaNs, which have bit 51 set (which also rules out
7821 // the value being Infinity).
7822
7823 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
7824 // all bits in the mask are set. We only need to check the word
7825 // that contains the exponent and high bit of the mantissa.
7826 ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
7827 __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
7828 __ xorl(rax, rax);
7829 __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
7830 __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
7831 __ setcc(above_equal, rax);
7832 __ ret(0);
7833 }
7834
7835 __ bind(&not_identical);
7836 }
7837 7927
7838 // If we're doing a strict equality comparison, we don't have to do 7928 // If we're doing a strict equality comparison, we don't have to do
7839 // type conversion, so we generate code to do fast comparison for objects 7929 // type conversion, so we generate code to do fast comparison for objects
7840 // and oddballs. Non-smi numbers and strings still go through the usual 7930 // and oddballs. Non-smi numbers and strings still go through the usual
7841 // slow-case code. 7931 // slow-case code.
7842 if (strict_) { 7932 if (strict_) {
7843 // If either is a Smi (we know that not both are), then they can only 7933 // If either is a Smi (we know that not both are), then they can only
7844 // be equal if the other is a HeapNumber. If so, use the slow case. 7934 // be equal if the other is a HeapNumber. If so, use the slow case.
7845 { 7935 {
7846 Label not_smis; 7936 Label not_smis;
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
7889 } 7979 }
7890 __ bind(&slow); 7980 __ bind(&slow);
7891 } 7981 }
7892 7982
7893 // Push arguments below the return address to prepare jump to builtin. 7983 // Push arguments below the return address to prepare jump to builtin.
7894 __ pop(rcx); 7984 __ pop(rcx);
7895 __ push(rax); 7985 __ push(rax);
7896 __ push(rdx); 7986 __ push(rdx);
7897 __ push(rcx); 7987 __ push(rcx);
7898 7988
7899 // Inlined floating point compare. 7989 // Generate the number comparison code.
7900 // Call builtin if operands are not floating point or smi. 7990 if (include_number_compare_) {
7901 Label check_for_symbols; 7991 Label non_number_comparison;
7902 // Push arguments on stack, for helper functions. 7992 Label unordered;
7903 FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols); 7993 FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
7904 FloatingPointHelper::LoadFloatOperands(masm, rax, rdx); 7994 &non_number_comparison);
7905 __ FCmp(); 7995 FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
7996 &non_number_comparison);
7906 7997
7907 // Jump to builtin for NaN. 7998 __ comisd(xmm0, xmm1);
7908 __ j(parity_even, &call_builtin);
7909 7999
7910 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up. 8000 // Don't base result on EFLAGS when a NaN is involved.
7911 Label below_lbl, above_lbl; 8001 __ j(parity_even, &unordered);
7912 // use rdx, rax to convert unsigned to signed comparison 8002 // Return a result of -1, 0, or 1, based on EFLAGS.
7913 __ j(below, &below_lbl); 8003 __ movq(rax, Immediate(0)); // equal
7914 __ j(above, &above_lbl); 8004 __ movq(rcx, Immediate(1));
8005 __ cmovq(above, rax, rcx);
8006 __ movq(rcx, Immediate(-1));
8007 __ cmovq(below, rax, rcx);
8008 __ ret(2 * kPointerSize); // rax, rdx were pushed
7915 8009
7916 __ xor_(rax, rax); // equal 8010 // If one of the numbers was NaN, then the result is always false.
7917 __ ret(2 * kPointerSize); 8011 // The cc is never not-equal.
8012 __ bind(&unordered);
8013 ASSERT(cc_ != not_equal);
8014 if (cc_ == less || cc_ == less_equal) {
8015 __ Set(rax, 1);
8016 } else {
8017 __ Set(rax, -1);
8018 }
8019 __ ret(2 * kPointerSize); // rax, rdx were pushed
7918 8020
7919 __ bind(&below_lbl); 8021 // The number comparison code did not provide a valid result.
7920 __ movq(rax, Immediate(-1)); 8022 __ bind(&non_number_comparison);
7921 __ ret(2 * kPointerSize); 8023 }
7922
7923 __ bind(&above_lbl);
7924 __ movq(rax, Immediate(1));
7925 __ ret(2 * kPointerSize); // rax, rdx were pushed
7926 8024
7927 // Fast negative check for symbol-to-symbol equality. 8025 // Fast negative check for symbol-to-symbol equality.
7928 __ bind(&check_for_symbols);
7929 Label check_for_strings; 8026 Label check_for_strings;
7930 if (cc_ == equal) { 8027 if (cc_ == equal) {
7931 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister); 8028 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
7932 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister); 8029 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
7933 8030
7934 // We've already checked for object identity, so if both operands 8031 // We've already checked for object identity, so if both operands
7935 // are symbols they aren't equal. Register eax (not rax) already holds a 8032 // are symbols they aren't equal. Register eax (not rax) already holds a
7936 // non-zero value, which indicates not equal, so just return. 8033 // non-zero value, which indicates not equal, so just return.
7937 __ ret(2 * kPointerSize); 8034 __ ret(2 * kPointerSize);
7938 } 8035 }
(...skipping 22 matching lines...) Expand all
7961 __ pop(rax); 8058 __ pop(rax);
7962 __ push(rdx); 8059 __ push(rdx);
7963 __ push(rax); 8060 __ push(rax);
7964 8061
7965 // Figure out which native to call and setup the arguments. 8062 // Figure out which native to call and setup the arguments.
7966 Builtins::JavaScript builtin; 8063 Builtins::JavaScript builtin;
7967 if (cc_ == equal) { 8064 if (cc_ == equal) {
7968 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; 8065 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7969 } else { 8066 } else {
7970 builtin = Builtins::COMPARE; 8067 builtin = Builtins::COMPARE;
7971 int ncr; // NaN compare result 8068 __ push(Immediate(NegativeComparisonResult(cc_)));
7972 if (cc_ == less || cc_ == less_equal) {
7973 ncr = GREATER;
7974 } else {
7975 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
7976 ncr = LESS;
7977 }
7978 __ Push(Smi::FromInt(ncr));
7979 } 8069 }
7980 8070
7981 // Restore return address on the stack. 8071 // Restore return address on the stack.
7982 __ push(rcx); 8072 __ push(rcx);
7983 8073
7984 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 8074 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7985 // tagged as a small integer. 8075 // tagged as a small integer.
7986 __ InvokeBuiltin(builtin, JUMP_FUNCTION); 8076 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
7987 } 8077 }
7988 8078
(...skipping 768 matching lines...) Expand 10 before | Expand all | Expand 10 after
8757 __ jmp(&done); 8847 __ jmp(&done);
8758 8848
8759 __ bind(&load_smi); 8849 __ bind(&load_smi);
8760 __ SmiToInteger32(src, src); 8850 __ SmiToInteger32(src, src);
8761 __ cvtlsi2sd(dst, src); 8851 __ cvtlsi2sd(dst, src);
8762 8852
8763 __ bind(&done); 8853 __ bind(&done);
8764 } 8854 }
8765 8855
8766 8856
8857 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
8858 Register src,
8859 XMMRegister dst,
8860 Label* not_number) {
8861 Label load_smi, done;
8862 ASSERT(!src.is(kScratchRegister));
8863 __ JumpIfSmi(src, &load_smi);
8864 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
8865 __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
8866 __ j(not_equal, not_number);
8867 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
8868 __ jmp(&done);
8869
8870 __ bind(&load_smi);
8871 __ SmiToInteger32(kScratchRegister, src);
8872 __ cvtlsi2sd(dst, kScratchRegister);
8873
8874 __ bind(&done);
8875 }
8876
8877
8767 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 8878 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
8768 XMMRegister dst1, 8879 XMMRegister dst1,
8769 XMMRegister dst2) { 8880 XMMRegister dst2) {
8770 __ movq(kScratchRegister, rdx); 8881 __ movq(kScratchRegister, rdx);
8771 LoadFloatOperand(masm, kScratchRegister, dst1); 8882 LoadFloatOperand(masm, kScratchRegister, dst1);
8772 __ movq(kScratchRegister, rax); 8883 __ movq(kScratchRegister, rax);
8773 LoadFloatOperand(masm, kScratchRegister, dst2); 8884 LoadFloatOperand(masm, kScratchRegister, dst2);
8774 } 8885 }
8775 8886
8776 8887
(...skipping 1708 matching lines...) Expand 10 before | Expand all | Expand 10 after
10485 // Call the function from C++. 10596 // Call the function from C++.
10486 return FUNCTION_CAST<ModuloFunction>(buffer); 10597 return FUNCTION_CAST<ModuloFunction>(buffer);
10487 } 10598 }
10488 10599
10489 #endif 10600 #endif
10490 10601
10491 10602
10492 #undef __ 10603 #undef __
10493 10604
10494 } } // namespace v8::internal 10605 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698