Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(572)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 18612005: Implement truncated d-to-i with a stub on x86 (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Add support for all register combinations Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after
612 // from |scratch|, it will contain that int32 value. 612 // from |scratch|, it will contain that int32 value.
613 static void CheckSSE2OperandIsInt32(MacroAssembler* masm, 613 static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
614 Label* non_int32, 614 Label* non_int32,
615 XMMRegister operand, 615 XMMRegister operand,
616 Register int32_result, 616 Register int32_result,
617 Register scratch, 617 Register scratch,
618 XMMRegister xmm_scratch); 618 XMMRegister xmm_scratch);
619 }; 619 };
620 620
621 621
622 // Get the integer part of a heap number. Surprisingly, all this bit twiddling 622 void DoubleToIStub::Generate(MacroAssembler* masm) {
623 // is faster than using the built-in instructions on floating point registers. 623 Register input_reg = this->source();
624 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the 624 Register final_result_reg = this->destination();
625 // trashed registers. 625 ASSERT(is_truncating());
626 static void IntegerConvert(MacroAssembler* masm, 626
627 Register source, 627 Label check_negative, process_64_bits, done, done_no_stash;
628 bool use_sse3, 628
629 Label* conversion_failure) { 629 int double_offset = offset();
630 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); 630
631 Label done, right_exponent, normal_exponent; 631 // Account for return address and saved regs if input is esp.
632 Register scratch = ebx; 632 if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
633 Register scratch2 = edi; 633
634 // Get exponent word. 634 MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
635 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); 635 MemOperand exponent_operand(MemOperand(input_reg,
636 // Get exponent alone in scratch2. 636 double_offset + kPointerSize));
637 __ mov(scratch2, scratch); 637
638 __ and_(scratch2, HeapNumber::kExponentMask); 638 Register scratch1 = ebx;
639 __ shr(scratch2, HeapNumber::kExponentShift); 639 if (final_result_reg.is(ebx)) {
640 __ sub(scratch2, Immediate(HeapNumber::kExponentBias)); 640 scratch1 = input_reg.is(edx) ? edi : edx;
641 // Load ecx with zero. We use this either for the final shift or 641 } else if (final_result_reg.is(edx)) {
642 // for the answer. 642 scratch1 = input_reg.is(ebx) ? edi : ebx;
643 __ xor_(ecx, ecx); 643 } else if (input_reg.is(ebx)) {
644 // If the exponent is above 83, the number contains no significant 644 scratch1 = edx;
645 // bits in the range 0..2^31, so the result is zero. 645 }
Yang 2013/07/11 12:00:27 Seems easy to introduce bugs in here. How about s
danno 2013/07/11 16:00:23 Done.
646 static const uint32_t kResultIsZeroExponent = 83; 646 Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
647 __ cmp(scratch2, Immediate(kResultIsZeroExponent)); 647 Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
Yang 2013/07/11 12:00:27 why do we need to spill eax if the result is expec
danno 2013/07/11 16:00:23 No, it's because we use eax above to calculate the
648 __ push(scratch1);
649 __ push(save_reg);
650
651 bool stash_exponent_copy = !input_reg.is(esp);
652 if (!input_reg.is(ecx)) {
653 __ mov(ecx, exponent_operand);
654 if (stash_exponent_copy) __ push(ecx);
655 }
656 __ mov(scratch1, mantissa_operand);
657 if (CpuFeatures::IsSupported(SSE3)) {
658 CpuFeatureScope scope(masm, SSE3);
659 // Load x87 register with heap number.
660 __ fld_d(mantissa_operand);
661 }
662 if (input_reg.is(ecx)) {
663 __ mov(ecx, exponent_operand);
664 if (stash_exponent_copy) __ push(ecx);
665 }
Yang 2013/07/11 12:00:27 At this point, we have overwritten ecx regardless
danno 2013/07/11 16:00:23 Done.
666 __ shr(ecx, HeapNumber::kExponentShift);
667 __ and_(ecx,
668 Immediate(HeapNumber::kExponentMask >> HeapNumber::kExponentShift));
Yang 2013/07/11 12:00:27 Looks like we could first mask and then shift, lik
danno 2013/07/11 16:00:23 Done.
669 __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
Yang 2013/07/11 12:00:27 As far as I can see the value in result_reg is not
danno 2013/07/11 16:00:23 This is actually a different test. The cmp and bel
670 __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
671 __ j(below, &process_64_bits);
672
673 // Result is entirely in lower 32-bits of mantissa
674 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
675 if (CpuFeatures::IsSupported(SSE3)) {
676 __ fstp(0);
677 }
678 __ sub(ecx, Immediate(delta));
679 __ mov(result_reg, Immediate(0));
Yang 2013/07/11 12:00:27 Use Set instead of mov to use xor, which is shorte
danno 2013/07/11 16:00:23 Done.
680 __ cmp(ecx, Immediate(31));
648 __ j(above, &done); 681 __ j(above, &done);
649 if (use_sse3) { 682 __ shl_cl(scratch1);
683 __ jmp(&check_negative);
684
685 __ bind(&process_64_bits);
686 if (CpuFeatures::IsSupported(SSE3)) {
650 CpuFeatureScope scope(masm, SSE3); 687 CpuFeatureScope scope(masm, SSE3);
651 // Check whether the exponent is too big for a 64 bit signed integer. 688 if (stash_exponent_copy) {
652 static const uint32_t kTooBigExponent = 63; 689 // Already a copy of the mantissa on the stack, overwrite it.
Yang 2013/07/11 12:00:27 Comment seems wrong. We pushed a copy of the expon
danno 2013/07/11 16:00:23 Done.
653 __ cmp(scratch2, Immediate(kTooBigExponent)); 690 __ sub(esp, Immediate(kDoubleSize / 2));
654 __ j(greater_equal, conversion_failure); 691 } else {
655 // Load x87 register with heap number. 692 // Reserve space for 64 bit answer.
656 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); 693 __ sub(esp, Immediate(kDoubleSize)); // Nolint.
657 // Reserve space for 64 bit answer. 694 }
658 __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
659 // Do conversion, which cannot fail because we checked the exponent. 695 // Do conversion, which cannot fail because we checked the exponent.
660 __ fisttp_d(Operand(esp, 0)); 696 __ fisttp_d(Operand(esp, 0));
661 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. 697 __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
662 __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. 698 __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
Yang 2013/07/11 12:00:27 We should use kDoubleSize for consistency. Also, d
danno 2013/07/11 16:00:23 Done.
699 __ jmp(&done_no_stash);
663 } else { 700 } else {
664 // Check whether the exponent matches a 32 bit signed int that cannot be 701 // Result must be extracted from shifted 32-bit mantissa
665 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the 702 __ mov(result_reg, Immediate(delta));
666 // exponent is 30 (biased). This is the exponent that we are fastest at and 703 __ sub(result_reg, ecx);
667 // also the highest exponent we can handle here. 704 __ mov(ecx, result_reg);
Yang 2013/07/11 12:00:27 sub(ecx, Immediate(delta)); neg(ecx); should do t
danno 2013/07/11 16:00:23 Done.
668 const uint32_t non_smi_exponent = 30; 705 if (stash_exponent_copy) {
669 __ cmp(scratch2, Immediate(non_smi_exponent)); 706 __ mov(result_reg, MemOperand(esp, 0));
670 // If we have a match of the int32-but-not-Smi exponent then skip some 707 } else {
671 // logic. 708 __ mov(result_reg, exponent_operand);
672 __ j(equal, &right_exponent, Label::kNear); 709 }
673 // If the exponent is higher than that then go to slow case. This catches 710 __ and_(result_reg,
674 // numbers that don't fit in a signed int32, infinities and NaNs. 711 Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
675 __ j(less, &normal_exponent, Label::kNear); 712 __ add(result_reg,
713 Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
714 __ shrd(result_reg, scratch1);
715 __ shr_cl(result_reg);
716 __ test(ecx, Immediate(32));
717 if (CpuFeatures::IsSupported(CMOV)) {
718 CpuFeatureScope use_cmov(masm, CMOV);
719 __ cmov(not_equal, scratch1, result_reg);
720 } else {
721 Label skip_mov;
722 __ j(equal, &skip_mov);
Yang 2013/07/11 12:00:27 mark as near jump.
danno 2013/07/11 16:00:23 Done.
723 __ mov(scratch1, result_reg);
724 __ bind(&skip_mov);
725 }
726 }
676 727
677 { 728 // If the double was negative, negate the integer result.
678 // Handle a big exponent. The only reason we have this code is that the 729 __ bind(&check_negative);
679 // >>> operator has a tendency to generate numbers with an exponent of 31. 730 __ mov(result_reg, scratch1);
680 const uint32_t big_non_smi_exponent = 31; 731 __ neg(result_reg);
681 __ cmp(scratch2, Immediate(big_non_smi_exponent)); 732 if (stash_exponent_copy) {
682 __ j(not_equal, conversion_failure); 733 __ cmp(MemOperand(esp, 0), Immediate(0));
683 // We have the big exponent, typically from >>>. This means the number is 734 } else {
684 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. 735 __ cmp(exponent_operand, Immediate(0));
685 __ mov(scratch2, scratch); 736 }
686 __ and_(scratch2, HeapNumber::kMantissaMask); 737 if (CpuFeatures::IsSupported(CMOV)) {
687 // Put back the implicit 1. 738 CpuFeatureScope use_cmov(masm, CMOV);
688 __ or_(scratch2, 1 << HeapNumber::kExponentShift); 739 __ cmov(greater, result_reg, scratch1);
689 // Shift up the mantissa bits to take up the space the exponent used to 740 } else {
690 // take. We just orred in the implicit bit so that took care of one and 741 Label skip_mov;
691 // we want to use the full unsigned range so we subtract 1 bit from the 742 __ j(less_equal, &skip_mov);
Yang 2013/07/11 12:00:27 mark as near jump
danno 2013/07/11 16:00:23 Done.
692 // shift distance. 743 __ mov(result_reg, scratch1);
693 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; 744 __ bind(&skip_mov);
694 __ shl(scratch2, big_shift_distance); 745 }
695 // Get the second half of the double.
696 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
697 // Shift down 21 bits to get the most significant 11 bits or the low
698 // mantissa word.
699 __ shr(ecx, 32 - big_shift_distance);
700 __ or_(ecx, scratch2);
701 // We have the answer in ecx, but we may need to negate it.
702 __ test(scratch, scratch);
703 __ j(positive, &done, Label::kNear);
704 __ neg(ecx);
705 __ jmp(&done, Label::kNear);
706 }
707 746
708 __ bind(&normal_exponent); 747 // Restore registers
709 // Exponent word in scratch, exponent in scratch2. Zero in ecx. 748 __ bind(&done);
710 // We know that 0 <= exponent < 30. 749 if (stash_exponent_copy) {
711 __ mov(ecx, Immediate(30)); 750 __ add(esp, Immediate(kDoubleSize / 2));
712 __ sub(ecx, scratch2);
713
714 __ bind(&right_exponent);
715 // Here ecx is the shift, scratch is the exponent word.
716 // Get the top bits of the mantissa.
717 __ and_(scratch, HeapNumber::kMantissaMask);
718 // Put back the implicit 1.
719 __ or_(scratch, 1 << HeapNumber::kExponentShift);
720 // Shift up the mantissa bits to take up the space the exponent used to
721 // take. We have kExponentShift + 1 significant bits int he low end of the
722 // word. Shift them to the top bits.
723 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
724 __ shl(scratch, shift_distance);
725 // Get the second half of the double. For some exponents we don't
726 // actually need this because the bits get shifted out again, but
727 // it's probably slower to test than just to do it.
728 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
729 // Shift down 22 bits to get the most significant 10 bits or the low
730 // mantissa word.
731 __ shr(scratch2, 32 - shift_distance);
732 __ or_(scratch2, scratch);
733 // Move down according to the exponent.
734 __ shr_cl(scratch2);
735 // Now the unsigned answer is in scratch2. We need to move it to ecx and
736 // we may need to fix the sign.
737 Label negative;
738 __ xor_(ecx, ecx);
739 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
740 __ j(greater, &negative, Label::kNear);
741 __ mov(ecx, scratch2);
742 __ jmp(&done, Label::kNear);
743 __ bind(&negative);
744 __ sub(ecx, scratch2);
745 } 751 }
746 __ bind(&done); 752 __ bind(&done_no_stash);
753 if (final_result_reg.is(ecx)) __ mov(ecx, save_reg);
Yang 2013/07/11 12:00:27 I think something like if (!final_result_reg.is(r
danno 2013/07/11 16:00:23 Done.
754 __ pop(save_reg);
755 __ pop(scratch1);
756 __ ret(0);
747 } 757 }
748 758
749 759
750 // Uses SSE2 to convert the heap number in |source| to an integer. Jumps to 760 // Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
751 // |conversion_failure| if the heap number did not contain an int32 value. 761 // |conversion_failure| if the heap number did not contain an int32 value.
752 // Result is in ecx. Trashes ebx, xmm0, and xmm1. 762 // Result is in ecx. Trashes ebx, xmm0, and xmm1.
753 static void ConvertHeapNumberToInt32(MacroAssembler* masm, 763 static void ConvertHeapNumberToInt32(MacroAssembler* masm,
754 Register source, 764 Register source,
755 Label* conversion_failure) { 765 Label* conversion_failure) {
756 __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset)); 766 __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
963 } 973 }
964 974
965 975
966 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, 976 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
967 Label* slow) { 977 Label* slow) {
968 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); 978 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
969 __ cmp(edx, masm->isolate()->factory()->heap_number_map()); 979 __ cmp(edx, masm->isolate()->factory()->heap_number_map());
970 __ j(not_equal, slow); 980 __ j(not_equal, slow);
971 981
972 // Convert the heap number in eax to an untagged integer in ecx. 982 // Convert the heap number in eax to an untagged integer in ecx.
973 IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow); 983 DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kSmiTagSize, true);
Yang 2013/07/11 12:00:27 I think you want to use (HeapNumber::kValueOffset
danno 2013/07/11 16:00:23 Done.
984 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
974 985
975 // Do the bitwise operation and check if the result fits in a smi. 986 // Do the bitwise operation and check if the result fits in a smi.
976 Label try_float; 987 Label try_float;
977 __ not_(ecx); 988 __ not_(ecx);
978 __ cmp(ecx, 0xc0000000); 989 __ cmp(ecx, 0xc0000000);
979 __ j(sign, &try_float, Label::kNear); 990 __ j(sign, &try_float, Label::kNear);
980 991
981 // Tag the result as a smi and we're done. 992 // Tag the result as a smi and we're done.
982 STATIC_ASSERT(kSmiTagSize == 1); 993 STATIC_ASSERT(kSmiTagSize == 1);
983 __ lea(eax, Operand(ecx, times_2, kSmiTag)); 994 __ lea(eax, Operand(ecx, times_2, kSmiTag));
(...skipping 11 matching lines...) Expand all
995 { 1006 {
996 FrameScope scope(masm, StackFrame::INTERNAL); 1007 FrameScope scope(masm, StackFrame::INTERNAL);
997 // Push the original HeapNumber on the stack. The integer value can't 1008 // Push the original HeapNumber on the stack. The integer value can't
998 // be stored since it's untagged and not in the smi range (so we can't 1009 // be stored since it's untagged and not in the smi range (so we can't
999 // smi-tag it). We'll recalculate the value after the GC instead. 1010 // smi-tag it). We'll recalculate the value after the GC instead.
1000 __ push(ebx); 1011 __ push(ebx);
1001 __ CallRuntime(Runtime::kNumberAlloc, 0); 1012 __ CallRuntime(Runtime::kNumberAlloc, 0);
1002 // New HeapNumber is in eax. 1013 // New HeapNumber is in eax.
1003 __ pop(edx); 1014 __ pop(edx);
1004 } 1015 }
1005 // IntegerConvert uses ebx and edi as scratch registers. 1016 DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kSmiTagSize, true);
Yang 2013/07/11 12:00:27 Ditto.
danno 2013/07/11 16:00:23 Done.
1006 // This conversion won't go slow-case. 1017 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
1007 IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow); 1018
1008 __ not_(ecx); 1019 __ not_(ecx);
1009 1020
1010 __ bind(&heapnumber_allocated); 1021 __ bind(&heapnumber_allocated);
1011 } 1022 }
1012 if (CpuFeatures::IsSupported(SSE2)) { 1023 if (CpuFeatures::IsSupported(SSE2)) {
1013 CpuFeatureScope use_sse2(masm, SSE2); 1024 CpuFeatureScope use_sse2(masm, SSE2);
1014 __ cvtsi2sd(xmm0, ecx); 1025 __ cvtsi2sd(xmm0, ecx);
1015 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1026 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1016 } else { 1027 } else {
1017 __ push(ecx); 1028 __ push(ecx);
(...skipping 1658 matching lines...) Expand 10 before | Expand all | Expand 10 after
2676 __ bind(&arg1_is_object); 2687 __ bind(&arg1_is_object);
2677 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); 2688 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2678 __ cmp(ebx, factory->heap_number_map()); 2689 __ cmp(ebx, factory->heap_number_map());
2679 __ j(not_equal, &check_undefined_arg1); 2690 __ j(not_equal, &check_undefined_arg1);
2680 2691
2681 // Get the untagged integer version of the edx heap number in ecx. 2692 // Get the untagged integer version of the edx heap number in ecx.
2682 if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { 2693 if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
2683 CpuFeatureScope use_sse2(masm, SSE2); 2694 CpuFeatureScope use_sse2(masm, SSE2);
2684 ConvertHeapNumberToInt32(masm, edx, conversion_failure); 2695 ConvertHeapNumberToInt32(masm, edx, conversion_failure);
2685 } else { 2696 } else {
2686 IntegerConvert(masm, edx, use_sse3, conversion_failure); 2697 DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kSmiTagSize, true);
Yang 2013/07/11 12:00:27 Ditto.
danno 2013/07/11 16:00:23 Done.
2698 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
2687 } 2699 }
2688 __ mov(edx, ecx); 2700 __ mov(edx, ecx);
2689 2701
2690 // Here edx has the untagged integer, eax has a Smi or a heap number. 2702 // Here edx has the untagged integer, eax has a Smi or a heap number.
2691 __ bind(&load_arg2); 2703 __ bind(&load_arg2);
2692 2704
2693 // Test if arg2 is a Smi. 2705 // Test if arg2 is a Smi.
2694 if (right_type == BinaryOpIC::SMI) { 2706 if (right_type == BinaryOpIC::SMI) {
2695 __ JumpIfNotSmi(eax, conversion_failure); 2707 __ JumpIfNotSmi(eax, conversion_failure);
2696 } else { 2708 } else {
(...skipping 14 matching lines...) Expand all
2711 __ bind(&arg2_is_object); 2723 __ bind(&arg2_is_object);
2712 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); 2724 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2713 __ cmp(ebx, factory->heap_number_map()); 2725 __ cmp(ebx, factory->heap_number_map());
2714 __ j(not_equal, &check_undefined_arg2); 2726 __ j(not_equal, &check_undefined_arg2);
2715 // Get the untagged integer version of the eax heap number in ecx. 2727 // Get the untagged integer version of the eax heap number in ecx.
2716 2728
2717 if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { 2729 if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
2718 CpuFeatureScope use_sse2(masm, SSE2); 2730 CpuFeatureScope use_sse2(masm, SSE2);
2719 ConvertHeapNumberToInt32(masm, eax, conversion_failure); 2731 ConvertHeapNumberToInt32(masm, eax, conversion_failure);
2720 } else { 2732 } else {
2721 IntegerConvert(masm, eax, use_sse3, conversion_failure); 2733 DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kSmiTagSize, true);
Yang 2013/07/11 12:00:27 Ditto.
danno 2013/07/11 16:00:23 Done.
2734 __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
2722 } 2735 }
2723 2736
2724 __ bind(&done); 2737 __ bind(&done);
2725 __ mov(eax, edx); 2738 __ mov(eax, edx);
2726 } 2739 }
2727 2740
2728 2741
2729 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 2742 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2730 Register number) { 2743 Register number) {
2731 Label load_smi, done; 2744 Label load_smi, done;
(...skipping 5287 matching lines...) Expand 10 before | Expand all | Expand 10 after
8019 __ bind(&fast_elements_case); 8032 __ bind(&fast_elements_case);
8020 GenerateCase(masm, FAST_ELEMENTS); 8033 GenerateCase(masm, FAST_ELEMENTS);
8021 } 8034 }
8022 8035
8023 8036
8024 #undef __ 8037 #undef __
8025 8038
8026 } } // namespace v8::internal 8039 } } // namespace v8::internal
8027 8040
8028 #endif // V8_TARGET_ARCH_IA32 8041 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698