OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
600 Register second, | 600 Register second, |
601 Register scratch1, | 601 Register scratch1, |
602 Register scratch2, | 602 Register scratch2, |
603 Register scratch3, | 603 Register scratch3, |
604 Label* on_success, | 604 Label* on_success, |
605 Label* on_not_smis, | 605 Label* on_not_smis, |
606 ConvertUndefined convert_undefined); | 606 ConvertUndefined convert_undefined); |
607 }; | 607 }; |
608 | 608 |
609 | 609 |
610 // Get the integer part of a heap number. | 610 void DoubleToIStub::Generate(MacroAssembler* masm) { |
611 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx. | 611 Register input_reg = this->source(); |
612 void IntegerConvert(MacroAssembler* masm, | 612 Register final_result_reg = this->destination(); |
613 Register result, | 613 ASSERT(is_truncating()); |
614 Register source) { | |
615 // Result may be rcx. If result and source are the same register, source will | |
616 // be overwritten. | |
617 ASSERT(!result.is(rdi) && !result.is(rbx)); | |
618 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use | |
619 // cvttsd2si (32-bit version) directly. | |
620 Register double_exponent = rbx; | |
621 Register double_value = rdi; | |
622 Label done, exponent_63_plus; | |
623 // Get double and extract exponent. | |
624 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); | |
625 // Clear result preemptively, in case we need to return zero. | |
626 __ xorl(result, result); | |
627 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. | |
628 // Double to remove sign bit, shift exponent down to least significant bits. | |
629 // and subtract bias to get the unshifted, unbiased exponent. | |
630 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0)); | |
631 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); | |
632 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); | |
633 // Check whether the exponent is too big for a 63 bit unsigned integer. | |
634 __ cmpl(double_exponent, Immediate(63)); | |
635 __ j(above_equal, &exponent_63_plus, Label::kNear); | |
636 // Handle exponent range 0..62. | |
637 __ cvttsd2siq(result, xmm0); | |
638 __ jmp(&done, Label::kNear); | |
639 | 614 |
640 __ bind(&exponent_63_plus); | 615 Label check_negative, process_64_bits, done; |
641 // Exponent negative or 63+. | |
642 __ cmpl(double_exponent, Immediate(83)); | |
643 // If exponent negative or above 83, number contains no significant bits in | |
644 // the range 0..2^31, so result is zero, and rcx already holds zero. | |
645 __ j(above, &done, Label::kNear); | |
646 | 616 |
647 // Exponent in rage 63..83. | 617 int double_offset = offset(); |
648 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely | |
649 // the least significant exponent-52 bits. | |
650 | 618 |
651 // Negate low bits of mantissa if value is negative. | 619 // Account for return address and saved regs if input is rsp. |
652 __ addq(double_value, double_value); // Move sign bit to carry. | 620 if (input_reg.is(rsp)) double_offset += 3 * kPointerSize; |
653 __ sbbl(result, result); // And convert carry to -1 in result register. | |
654 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. | |
655 __ addl(double_value, result); | |
656 // Do xor in opposite directions depending on where we want the result | |
657 // (depending on whether result is rcx or not). | |
658 | 621 |
659 if (result.is(rcx)) { | 622 MemOperand mantissa_operand(MemOperand(input_reg, double_offset)); |
660 __ xorl(double_value, result); | 623 MemOperand exponent_operand(MemOperand(input_reg, |
661 // Left shift mantissa by (exponent - mantissabits - 1) to save the | 624 double_offset + kDoubleSize / 2)); |
662 // bits that have positional values below 2^32 (the extra -1 comes from the | |
663 // doubling done above to move the sign bit into the carry flag). | |
664 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); | |
665 __ shll_cl(double_value); | |
666 __ movl(result, double_value); | |
667 } else { | |
668 // As the then-branch, but move double-value to result before shifting. | |
669 __ xorl(result, double_value); | |
670 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); | |
671 __ shll_cl(result); | |
672 } | |
673 | 625 |
674 __ bind(&done); | 626 Register scratch1; |
| 627 Register scratch_candidates[3] = { rbx, rdx, rdi }; |
| 628 for (int i = 0; i < 3; i++) { |
| 629 scratch1 = scratch_candidates[i]; |
| 630 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break; |
| 631 } |
| 632 |
| 633 // Since we must use rcx for shifts below, use some other register (rax) |
| 634 // to calculate the result if ecx is the requested return register. |
| 635 Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg; |
| 636 // Save ecx if it isn't the return register and therefore volatile, or if it |
| 637 // is the return register, then save the temp register we use in its stead |
| 638 // for the result. |
| 639 Register save_reg = final_result_reg.is(rcx) ? rax : rcx; |
| 640 __ push(scratch1); |
| 641 __ push(save_reg); |
| 642 |
| 643 bool stash_exponent_copy = !input_reg.is(rsp); |
| 644 __ movl(scratch1, mantissa_operand); |
| 645 __ movsd(xmm0, mantissa_operand); |
| 646 __ movl(rcx, exponent_operand); |
| 647 if (stash_exponent_copy) __ push(rcx); |
| 648 |
| 649 __ andl(rcx, Immediate(HeapNumber::kExponentMask)); |
| 650 __ shrl(rcx, Immediate(HeapNumber::kExponentShift)); |
| 651 __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias)); |
| 652 __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits)); |
| 653 __ j(below, &process_64_bits); |
| 654 |
| 655 // Result is entirely in lower 32-bits of mantissa |
| 656 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; |
| 657 __ subl(rcx, Immediate(delta)); |
| 658 __ xorl(result_reg, result_reg); |
| 659 __ cmpl(rcx, Immediate(31)); |
| 660 __ j(above, &done); |
| 661 __ shll_cl(scratch1); |
| 662 __ jmp(&check_negative); |
| 663 |
| 664 __ bind(&process_64_bits); |
| 665 __ cvttsd2siq(result_reg, xmm0); |
| 666 __ jmp(&done, Label::kNear); |
| 667 |
| 668 // If the double was negative, negate the integer result. |
| 669 __ bind(&check_negative); |
| 670 __ movl(result_reg, scratch1); |
| 671 __ negl(result_reg); |
| 672 if (stash_exponent_copy) { |
| 673 __ cmpl(MemOperand(rsp, 0), Immediate(0)); |
| 674 } else { |
| 675 __ cmpl(exponent_operand, Immediate(0)); |
| 676 } |
| 677 __ cmovl(greater, result_reg, scratch1); |
| 678 |
| 679 // Restore registers |
| 680 __ bind(&done); |
| 681 if (stash_exponent_copy) { |
| 682 __ addq(rsp, Immediate(kDoubleSize)); |
| 683 } |
| 684 if (!final_result_reg.is(result_reg)) { |
| 685 ASSERT(final_result_reg.is(rcx)); |
| 686 __ movl(final_result_reg, result_reg); |
| 687 } |
| 688 __ pop(save_reg); |
| 689 __ pop(scratch1); |
| 690 __ ret(0); |
675 } | 691 } |
676 | 692 |
677 | 693 |
678 void BinaryOpStub::Initialize() {} | 694 void BinaryOpStub::Initialize() {} |
679 | 695 |
680 | 696 |
681 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 697 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
682 __ pop(rcx); // Save return address. | 698 __ pop(rcx); // Save return address. |
683 __ push(rdx); | 699 __ push(rdx); |
684 __ push(rax); | 700 __ push(rax); |
(...skipping 865 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1550 Label done; | 1566 Label done; |
1551 Label rax_is_smi; | 1567 Label rax_is_smi; |
1552 Label rax_is_object; | 1568 Label rax_is_object; |
1553 Label rdx_is_object; | 1569 Label rdx_is_object; |
1554 | 1570 |
1555 __ JumpIfNotSmi(rdx, &rdx_is_object); | 1571 __ JumpIfNotSmi(rdx, &rdx_is_object); |
1556 __ SmiToInteger32(rdx, rdx); | 1572 __ SmiToInteger32(rdx, rdx); |
1557 __ JumpIfSmi(rax, &rax_is_smi); | 1573 __ JumpIfSmi(rax, &rax_is_smi); |
1558 | 1574 |
1559 __ bind(&rax_is_object); | 1575 __ bind(&rax_is_object); |
1560 IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. | 1576 DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag, |
| 1577 true); |
| 1578 __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 1579 |
1561 __ jmp(&done); | 1580 __ jmp(&done); |
1562 | 1581 |
1563 __ bind(&rdx_is_object); | 1582 __ bind(&rdx_is_object); |
1564 IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. | 1583 DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag, |
| 1584 true); |
| 1585 __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
1565 __ JumpIfNotSmi(rax, &rax_is_object); | 1586 __ JumpIfNotSmi(rax, &rax_is_object); |
| 1587 |
1566 __ bind(&rax_is_smi); | 1588 __ bind(&rax_is_smi); |
1567 __ SmiToInteger32(rcx, rax); | 1589 __ SmiToInteger32(rcx, rax); |
1568 | 1590 |
1569 __ bind(&done); | 1591 __ bind(&done); |
1570 __ movl(rax, rdx); | 1592 __ movl(rax, rdx); |
1571 } | 1593 } |
1572 | 1594 |
1573 | 1595 |
1574 // Input: rdx, rax are the left and right objects of a bit op. | 1596 // Input: rdx, rax are the left and right objects of a bit op. |
1575 // Output: rax, rcx are left and right integers for a bit op. | 1597 // Output: rax, rcx are left and right integers for a bit op. |
(...skipping 14 matching lines...) Expand all Loading... |
1590 __ bind(&check_undefined_arg1); | 1612 __ bind(&check_undefined_arg1); |
1591 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); | 1613 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
1592 __ j(not_equal, conversion_failure); | 1614 __ j(not_equal, conversion_failure); |
1593 __ Set(r8, 0); | 1615 __ Set(r8, 0); |
1594 __ jmp(&load_arg2); | 1616 __ jmp(&load_arg2); |
1595 | 1617 |
1596 __ bind(&arg1_is_object); | 1618 __ bind(&arg1_is_object); |
1597 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); | 1619 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
1598 __ j(not_equal, &check_undefined_arg1); | 1620 __ j(not_equal, &check_undefined_arg1); |
1599 // Get the untagged integer version of the rdx heap number in rcx. | 1621 // Get the untagged integer version of the rdx heap number in rcx. |
1600 IntegerConvert(masm, r8, rdx); | 1622 DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag, |
| 1623 true); |
| 1624 __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
1601 | 1625 |
1602 // Here r8 has the untagged integer, rax has a Smi or a heap number. | 1626 // Here r8 has the untagged integer, rax has a Smi or a heap number. |
1603 __ bind(&load_arg2); | 1627 __ bind(&load_arg2); |
1604 // Test if arg2 is a Smi. | 1628 // Test if arg2 is a Smi. |
1605 __ JumpIfNotSmi(rax, &arg2_is_object); | 1629 __ JumpIfNotSmi(rax, &arg2_is_object); |
1606 __ SmiToInteger32(rcx, rax); | 1630 __ SmiToInteger32(rcx, rax); |
1607 __ jmp(&done); | 1631 __ jmp(&done); |
1608 | 1632 |
1609 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 1633 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
1610 __ bind(&check_undefined_arg2); | 1634 __ bind(&check_undefined_arg2); |
1611 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 1635 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
1612 __ j(not_equal, conversion_failure); | 1636 __ j(not_equal, conversion_failure); |
1613 __ Set(rcx, 0); | 1637 __ Set(rcx, 0); |
1614 __ jmp(&done); | 1638 __ jmp(&done); |
1615 | 1639 |
1616 __ bind(&arg2_is_object); | 1640 __ bind(&arg2_is_object); |
1617 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); | 1641 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
1618 __ j(not_equal, &check_undefined_arg2); | 1642 __ j(not_equal, &check_undefined_arg2); |
1619 // Get the untagged integer version of the rax heap number in rcx. | 1643 // Get the untagged integer version of the rax heap number in rcx. |
1620 IntegerConvert(masm, rcx, rax); | 1644 DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag, |
| 1645 true); |
| 1646 __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 1647 |
1621 __ bind(&done); | 1648 __ bind(&done); |
1622 __ movl(rax, r8); | 1649 __ movl(rax, r8); |
1623 } | 1650 } |
1624 | 1651 |
1625 | 1652 |
1626 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { | 1653 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
1627 __ SmiToInteger32(kScratchRegister, rdx); | 1654 __ SmiToInteger32(kScratchRegister, rdx); |
1628 __ cvtlsi2sd(xmm0, kScratchRegister); | 1655 __ cvtlsi2sd(xmm0, kScratchRegister); |
1629 __ SmiToInteger32(kScratchRegister, rax); | 1656 __ SmiToInteger32(kScratchRegister, rax); |
1630 __ cvtlsi2sd(xmm1, kScratchRegister); | 1657 __ cvtlsi2sd(xmm1, kScratchRegister); |
(...skipping 1698 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3329 | 3356 |
3330 // If either operand is a JSObject or an oddball value, then they are not | 3357 // If either operand is a JSObject or an oddball value, then they are not |
3331 // equal since their pointers are different | 3358 // equal since their pointers are different |
3332 // There is no test for undetectability in strict equality. | 3359 // There is no test for undetectability in strict equality. |
3333 | 3360 |
3334 // If the first object is a JS object, we have done pointer comparison. | 3361 // If the first object is a JS object, we have done pointer comparison. |
3335 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 3362 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
3336 Label first_non_object; | 3363 Label first_non_object; |
3337 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); | 3364 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); |
3338 __ j(below, &first_non_object, Label::kNear); | 3365 __ j(below, &first_non_object, Label::kNear); |
3339 // Return non-zero (eax (not rax) is not zero) | 3366 // Return non-zero (rax (not rax) is not zero) |
3340 Label return_not_equal; | 3367 Label return_not_equal; |
3341 STATIC_ASSERT(kHeapObjectTag != 0); | 3368 STATIC_ASSERT(kHeapObjectTag != 0); |
3342 __ bind(&return_not_equal); | 3369 __ bind(&return_not_equal); |
3343 __ ret(0); | 3370 __ ret(0); |
3344 | 3371 |
3345 __ bind(&first_non_object); | 3372 __ bind(&first_non_object); |
3346 // Check for oddballs: true, false, null, undefined. | 3373 // Check for oddballs: true, false, null, undefined. |
3347 __ CmpInstanceType(rcx, ODDBALL_TYPE); | 3374 __ CmpInstanceType(rcx, ODDBALL_TYPE); |
3348 __ j(equal, &return_not_equal); | 3375 __ j(equal, &return_not_equal); |
3349 | 3376 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3391 | 3418 |
3392 // Fast negative check for internalized-to-internalized equality. | 3419 // Fast negative check for internalized-to-internalized equality. |
3393 Label check_for_strings; | 3420 Label check_for_strings; |
3394 if (cc == equal) { | 3421 if (cc == equal) { |
3395 BranchIfNotInternalizedString( | 3422 BranchIfNotInternalizedString( |
3396 masm, &check_for_strings, rax, kScratchRegister); | 3423 masm, &check_for_strings, rax, kScratchRegister); |
3397 BranchIfNotInternalizedString( | 3424 BranchIfNotInternalizedString( |
3398 masm, &check_for_strings, rdx, kScratchRegister); | 3425 masm, &check_for_strings, rdx, kScratchRegister); |
3399 | 3426 |
3400 // We've already checked for object identity, so if both operands are | 3427 // We've already checked for object identity, so if both operands are |
3401 // internalized strings they aren't equal. Register eax (not rax) already | 3428 // internalized strings they aren't equal. Register rax (not rax) already |
3402 // holds a non-zero value, which indicates not equal, so just return. | 3429 // holds a non-zero value, which indicates not equal, so just return. |
3403 __ ret(0); | 3430 __ ret(0); |
3404 } | 3431 } |
3405 | 3432 |
3406 __ bind(&check_for_strings); | 3433 __ bind(&check_for_strings); |
3407 | 3434 |
3408 __ JumpIfNotBothSequentialAsciiStrings( | 3435 __ JumpIfNotBothSequentialAsciiStrings( |
3409 rdx, rax, rcx, rbx, &check_unequal_objects); | 3436 rdx, rax, rcx, rbx, &check_unequal_objects); |
3410 | 3437 |
3411 // Inline comparison of ASCII strings. | 3438 // Inline comparison of ASCII strings. |
(...skipping 3388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6800 __ bind(&fast_elements_case); | 6827 __ bind(&fast_elements_case); |
6801 GenerateCase(masm, FAST_ELEMENTS); | 6828 GenerateCase(masm, FAST_ELEMENTS); |
6802 } | 6829 } |
6803 | 6830 |
6804 | 6831 |
6805 #undef __ | 6832 #undef __ |
6806 | 6833 |
6807 } } // namespace v8::internal | 6834 } } // namespace v8::internal |
6808 | 6835 |
6809 #endif // V8_TARGET_ARCH_X64 | 6836 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |