Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 6879081: Added type recording for unary minus and unary bitwise negation. Note that the (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Hopefully final version of this patch... Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); 367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
368 368
369 // Checks that the two floating point numbers loaded into xmm0 and xmm1 369 // Checks that the two floating point numbers loaded into xmm0 and xmm1
370 // have int32 values. 370 // have int32 values.
371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, 371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
372 Label* non_int32, 372 Label* non_int32,
373 Register scratch); 373 Register scratch);
374 }; 374 };
375 375
376 376
377 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
378 // is faster than using the built-in instructions on floating point registers.
379 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
380 // trashed registers.
381 static void IntegerConvert(MacroAssembler* masm,
382 Register source,
383 TypeInfo type_info,
384 bool use_sse3,
385 Label* conversion_failure) {
386 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
387 Label done, right_exponent, normal_exponent;
388 Register scratch = ebx;
389 Register scratch2 = edi;
390 if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
391 CpuFeatures::Scope scope(SSE2);
392 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
393 return;
394 }
395 if (!type_info.IsInteger32() || !use_sse3) {
396 // Get exponent word.
397 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
398 // Get exponent alone in scratch2.
399 __ mov(scratch2, scratch);
400 __ and_(scratch2, HeapNumber::kExponentMask);
401 }
402 if (use_sse3) {
403 CpuFeatures::Scope scope(SSE3);
404 if (!type_info.IsInteger32()) {
405 // Check whether the exponent is too big for a 64 bit signed integer.
406 static const uint32_t kTooBigExponent =
407 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
408 __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
409 __ j(greater_equal, conversion_failure);
410 }
411 // Load x87 register with heap number.
412 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
413 // Reserve space for 64 bit answer.
414 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
415 // Do conversion, which cannot fail because we checked the exponent.
416 __ fisttp_d(Operand(esp, 0));
417 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
418 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
419 } else {
420 // Load ecx with zero. We use this either for the final shift or
421 // for the answer.
422 __ xor_(ecx, Operand(ecx));
423 // Check whether the exponent matches a 32 bit signed int that cannot be
424 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
425 // exponent is 30 (biased). This is the exponent that we are fastest at and
426 // also the highest exponent we can handle here.
427 const uint32_t non_smi_exponent =
428 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
429 __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
430 // If we have a match of the int32-but-not-Smi exponent then skip some
431 // logic.
432 __ j(equal, &right_exponent);
433 // If the exponent is higher than that then go to slow case. This catches
434 // numbers that don't fit in a signed int32, infinities and NaNs.
435 __ j(less, &normal_exponent);
436
437 {
438 // Handle a big exponent. The only reason we have this code is that the
439 // >>> operator has a tendency to generate numbers with an exponent of 31.
440 const uint32_t big_non_smi_exponent =
441 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
442 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
443 __ j(not_equal, conversion_failure);
444 // We have the big exponent, typically from >>>. This means the number is
445 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
446 __ mov(scratch2, scratch);
447 __ and_(scratch2, HeapNumber::kMantissaMask);
448 // Put back the implicit 1.
449 __ or_(scratch2, 1 << HeapNumber::kExponentShift);
450 // Shift up the mantissa bits to take up the space the exponent used to
451 // take. We just orred in the implicit bit so that took care of one and
452 // we want to use the full unsigned range so we subtract 1 bit from the
453 // shift distance.
454 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
455 __ shl(scratch2, big_shift_distance);
456 // Get the second half of the double.
457 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
458 // Shift down 21 bits to get the most significant 11 bits or the low
459 // mantissa word.
460 __ shr(ecx, 32 - big_shift_distance);
461 __ or_(ecx, Operand(scratch2));
462 // We have the answer in ecx, but we may need to negate it.
463 __ test(scratch, Operand(scratch));
464 __ j(positive, &done);
465 __ neg(ecx);
466 __ jmp(&done);
467 }
468
469 __ bind(&normal_exponent);
470 // Exponent word in scratch, exponent part of exponent word in scratch2.
471 // Zero in ecx.
472 // We know the exponent is smaller than 30 (biased). If it is less than
473 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
474 // it rounds to zero.
475 const uint32_t zero_exponent =
476 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
477 __ sub(Operand(scratch2), Immediate(zero_exponent));
478 // ecx already has a Smi zero.
479 __ j(less, &done);
480
481 // We have a shifted exponent between 0 and 30 in scratch2.
482 __ shr(scratch2, HeapNumber::kExponentShift);
483 __ mov(ecx, Immediate(30));
484 __ sub(ecx, Operand(scratch2));
485
486 __ bind(&right_exponent);
487 // Here ecx is the shift, scratch is the exponent word.
488 // Get the top bits of the mantissa.
489 __ and_(scratch, HeapNumber::kMantissaMask);
490 // Put back the implicit 1.
491 __ or_(scratch, 1 << HeapNumber::kExponentShift);
492 // Shift up the mantissa bits to take up the space the exponent used to
493 // take. We have kExponentShift + 1 significant bits int he low end of the
494 // word. Shift them to the top bits.
495 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
496 __ shl(scratch, shift_distance);
497 // Get the second half of the double. For some exponents we don't
498 // actually need this because the bits get shifted out again, but
499 // it's probably slower to test than just to do it.
500 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
501 // Shift down 22 bits to get the most significant 10 bits or the low
502 // mantissa word.
503 __ shr(scratch2, 32 - shift_distance);
504 __ or_(scratch2, Operand(scratch));
505 // Move down according to the exponent.
506 __ shr_cl(scratch2);
507 // Now the unsigned answer is in scratch2. We need to move it to ecx and
508 // we may need to fix the sign.
509 NearLabel negative;
510 __ xor_(ecx, Operand(ecx));
511 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
512 __ j(greater, &negative);
513 __ mov(ecx, scratch2);
514 __ jmp(&done);
515 __ bind(&negative);
516 __ sub(ecx, Operand(scratch2));
517 __ bind(&done);
518 }
519 }
520
521
522 Handle<Code> GetTypeRecordingUnaryOpStub(int key,
523 TRUnaryOpIC::TypeInfo type_info) {
524 TypeRecordingUnaryOpStub stub(key, type_info);
525 return stub.GetCode();
526 }
527
528
529 const char* TypeRecordingUnaryOpStub::GetName() {
530 if (name_ != NULL) return name_;
531 const int kMaxNameLength = 100;
532 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
533 kMaxNameLength);
534 if (name_ == NULL) return "OOM";
535 const char* op_name = Token::Name(op_);
536 const char* overwrite_name;
537 switch (mode_) {
538 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
539 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
540 }
541
542 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
543 "TypeRecordingUnaryOpStub_%s_%s_%s",
544 op_name,
545 overwrite_name,
546 TRUnaryOpIC::GetName(operand_type_));
547 return name_;
548 }
549
550
551 // TODO(svenpanne): Use virtual functions instead of switch.
552 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
553 switch (operand_type_) {
554 case TRUnaryOpIC::UNINITIALIZED:
555 GenerateTypeTransition(masm);
556 break;
557 case TRUnaryOpIC::SMI:
558 GenerateSmiStub(masm);
559 break;
560 case TRUnaryOpIC::HEAP_NUMBER:
561 GenerateHeapNumberStub(masm);
562 break;
563 case TRUnaryOpIC::GENERIC:
564 GenerateGenericStub(masm);
565 break;
566 }
567 }
568
569
570 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
571 __ pop(ecx); // Save return address.
572 __ push(eax);
573 // the argument is now on top.
574 // Push this stub's key. Although the operation and the type info are
575 // encoded into the key, the encoding is opaque, so push them too.
576 __ push(Immediate(Smi::FromInt(MinorKey())));
577 __ push(Immediate(Smi::FromInt(op_)));
578 __ push(Immediate(Smi::FromInt(operand_type_)));
579
580 __ push(ecx); // Push return address.
581
582 // Patch the caller to an appropriate specialized stub and return the
583 // operation result to the caller of the stub.
584 __ TailCallExternalReference(
585 ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
586 masm->isolate()),
587 4,
588 1);
589 }
590
591
592 // TODO(svenpanne): Use virtual functions instead of switch.
593 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
594 switch (op_) {
595 case Token::SUB:
596 GenerateSmiStubSub(masm);
597 break;
598 case Token::BIT_NOT:
599 GenerateSmiStubBitNot(masm);
600 break;
601 default:
602 UNREACHABLE();
603 }
604 }
605
606
607 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
608 NearLabel non_smi;
609 Label undo, slow;
610 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
611 __ bind(&undo);
612 GenerateSmiCodeUndo(masm);
613 __ bind(&non_smi);
614 __ bind(&slow);
615 GenerateTypeTransition(masm);
616 }
617
618
619 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
620 NearLabel non_smi;
621 GenerateSmiCodeBitNot(masm, &non_smi);
622 __ bind(&non_smi);
623 GenerateTypeTransition(masm);
624 }
625
626
627 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
628 NearLabel* non_smi,
629 Label* undo,
630 Label* slow) {
631 // Check whether the value is a smi.
632 __ test(eax, Immediate(kSmiTagMask));
633 __ j(not_zero, non_smi);
634
635 // We can't handle -0 with smis, so use a type transition for that case.
636 __ test(eax, Operand(eax));
637 __ j(zero, slow);
638
639 // Try optimistic subtraction '0 - value', saving operand in eax for undo.
640 __ mov(edx, Operand(eax));
641 __ Set(eax, Immediate(0));
642 __ sub(eax, Operand(edx));
643 __ j(overflow, undo);
644 __ ret(0);
645 }
646
647
648 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
649 NearLabel* non_smi) {
650 // Check whether the value is a smi.
651 __ test(eax, Immediate(kSmiTagMask));
652 __ j(not_zero, non_smi);
653
654 // Flip bits and revert inverted smi-tag.
655 __ not_(eax);
656 __ and_(eax, ~kSmiTagMask);
657 __ ret(0);
658 }
659
660
661 void TypeRecordingUnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
662 __ mov(eax, Operand(edx));
663 }
664
665
666 // TODO(svenpanne): Use virtual functions instead of switch.
667 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
668 switch (op_) {
669 case Token::SUB:
670 GenerateHeapNumberStubSub(masm);
671 break;
672 case Token::BIT_NOT:
673 GenerateHeapNumberStubBitNot(masm);
674 break;
675 default:
676 UNREACHABLE();
677 }
678 }
679
680
681 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
682 NearLabel non_smi;
683 Label undo, slow;
684 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
685 __ bind(&non_smi);
686 GenerateHeapNumberCodeSub(masm, &slow);
687 __ bind(&undo);
688 GenerateSmiCodeUndo(masm);
689 __ bind(&slow);
690 GenerateTypeTransition(masm);
691 }
692
693
694 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
695 MacroAssembler* masm) {
696 NearLabel non_smi;
697 Label slow;
698 GenerateSmiCodeBitNot(masm, &non_smi);
699 __ bind(&non_smi);
700 GenerateHeapNumberCodeBitNot(masm, &slow);
701 __ bind(&slow);
702 GenerateTypeTransition(masm);
703 }
704
705
706 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
707 Label* slow) {
708 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
709 __ cmp(edx, masm->isolate()->factory()->heap_number_map());
710 __ j(not_equal, slow);
711
712 if (mode_ == UNARY_OVERWRITE) {
713 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
714 __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
715 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
716 } else {
717 __ mov(edx, Operand(eax));
718 // edx: operand
719
fschneider 2011/04/26 13:32:52 Remove one extra line.
Sven Panne 2011/04/27 00:45:54 Done.
720
721 Label slow_allocate_heapnumber, heapnumber_allocated;
722 __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
723 __ jmp(&heapnumber_allocated);
724
725 __ bind(&slow_allocate_heapnumber);
726 __ EnterInternalFrame();
727 __ push(edx);
728 __ CallRuntime(Runtime::kNumberAlloc, 0);
729 __ pop(edx);
730 __ LeaveInternalFrame();
731
732 __ bind(&heapnumber_allocated);
733 // eax: allocated 'empty' number
734 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
735 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
736 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
737 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
738 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
739 }
740 __ ret(0);
741 }
742
743
744 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
745 MacroAssembler* masm,
746 Label* slow) {
747 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
748 __ cmp(edx, masm->isolate()->factory()->heap_number_map());
749 __ j(not_equal, slow);
750
751 // Convert the heap number in eax to an untagged integer in ecx.
752 IntegerConvert(masm, eax, TypeInfo::Unknown(), CpuFeatures::IsSupported(SSE3),
753 slow);
754
755 // Do the bitwise operation and check if the result fits in a smi.
756 NearLabel try_float;
757 __ not_(ecx);
758 __ cmp(ecx, 0xc0000000);
759 __ j(sign, &try_float);
760
761 // Tag the result as a smi and we're done.
762 STATIC_ASSERT(kSmiTagSize == 1);
763 __ lea(eax, Operand(ecx, times_2, kSmiTag));
764 __ ret(0);
765
766 // Try to store the result in a heap number.
767 __ bind(&try_float);
768 if (mode_ == UNARY_NO_OVERWRITE) {
769 Label slow_allocate_heapnumber, heapnumber_allocated;
770 __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
771 __ jmp(&heapnumber_allocated);
772
773 __ bind(&slow_allocate_heapnumber);
774 __ EnterInternalFrame();
775 __ push(ecx);
776 __ CallRuntime(Runtime::kNumberAlloc, 0);
777 __ pop(ecx);
778 __ LeaveInternalFrame();
779
780 __ bind(&heapnumber_allocated);
781 }
782 if (CpuFeatures::IsSupported(SSE2)) {
783 CpuFeatures::Scope use_sse2(SSE2);
784 __ cvtsi2sd(xmm0, Operand(ecx));
785 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
786 } else {
787 __ push(ecx);
788 __ fild_s(Operand(esp, 0));
789 __ pop(ecx);
790 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
791 }
792 __ ret(0);
793 }
794
795
796 // TODO(svenpanne): Use virtual functions instead of switch.
797 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
798 switch (op_) {
799 case Token::SUB:
800 GenerateGenericStubSub(masm);
801 break;
802 case Token::BIT_NOT:
803 GenerateGenericStubBitNot(masm);
804 break;
805 default:
806 UNREACHABLE();
807 }
808 }
809
810
811 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
812 NearLabel non_smi;
813 Label undo, slow;
814 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
815 __ bind(&non_smi);
816 GenerateHeapNumberCodeSub(masm, &slow);
817 __ bind(&undo);
818 GenerateSmiCodeUndo(masm);
819 __ bind(&slow);
820 GenerateGenericCodeFallback(masm);
821 }
822
823
824 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
825 NearLabel non_smi;
826 Label slow;
827 GenerateSmiCodeBitNot(masm, &non_smi);
828 __ bind(&non_smi);
829 GenerateHeapNumberCodeBitNot(masm, &slow);
830 __ bind(&slow);
831 GenerateGenericCodeFallback(masm);
832 }
833
834
835 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
836 MacroAssembler* masm) {
837 // Handle the slow case by jumping to the corresponding JavaScript builtin.
838 __ pop(ecx); // pop return address.
839 __ push(eax);
840 __ push(ecx); // push return address
841 switch (op_) {
842 case Token::SUB:
843 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
844 break;
845 case Token::BIT_NOT:
846 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
847 break;
848 default:
849 UNREACHABLE();
850 }
851 }
852
853
377 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 854 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
378 TRBinaryOpIC::TypeInfo type_info, 855 TRBinaryOpIC::TypeInfo type_info,
379 TRBinaryOpIC::TypeInfo result_type_info) { 856 TRBinaryOpIC::TypeInfo result_type_info) {
380 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 857 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
381 return stub.GetCode(); 858 return stub.GetCode();
382 } 859 }
383 860
384 861
385 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 862 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
386 __ pop(ecx); // Save return address. 863 __ pop(ecx); // Save return address.
(...skipping 1540 matching lines...) Expand 10 before | Expand all | Expand 10 after
1927 __ bind(&done); 2404 __ bind(&done);
1928 } else { 2405 } else {
1929 ASSERT(type_ == TranscendentalCache::LOG); 2406 ASSERT(type_ == TranscendentalCache::LOG);
1930 __ fldln2(); 2407 __ fldln2();
1931 __ fxch(); 2408 __ fxch();
1932 __ fyl2x(); 2409 __ fyl2x();
1933 } 2410 }
1934 } 2411 }
1935 2412
1936 2413
1937 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
1938 // is faster than using the built-in instructions on floating point registers.
1939 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
1940 // trashed registers.
1941 void IntegerConvert(MacroAssembler* masm,
1942 Register source,
1943 TypeInfo type_info,
1944 bool use_sse3,
1945 Label* conversion_failure) {
1946 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
1947 Label done, right_exponent, normal_exponent;
1948 Register scratch = ebx;
1949 Register scratch2 = edi;
1950 if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
1951 CpuFeatures::Scope scope(SSE2);
1952 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
1953 return;
1954 }
1955 if (!type_info.IsInteger32() || !use_sse3) {
1956 // Get exponent word.
1957 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
1958 // Get exponent alone in scratch2.
1959 __ mov(scratch2, scratch);
1960 __ and_(scratch2, HeapNumber::kExponentMask);
1961 }
1962 if (use_sse3) {
1963 CpuFeatures::Scope scope(SSE3);
1964 if (!type_info.IsInteger32()) {
1965 // Check whether the exponent is too big for a 64 bit signed integer.
1966 static const uint32_t kTooBigExponent =
1967 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
1968 __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
1969 __ j(greater_equal, conversion_failure);
1970 }
1971 // Load x87 register with heap number.
1972 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
1973 // Reserve space for 64 bit answer.
1974 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
1975 // Do conversion, which cannot fail because we checked the exponent.
1976 __ fisttp_d(Operand(esp, 0));
1977 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
1978 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
1979 } else {
1980 // Load ecx with zero. We use this either for the final shift or
1981 // for the answer.
1982 __ xor_(ecx, Operand(ecx));
1983 // Check whether the exponent matches a 32 bit signed int that cannot be
1984 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
1985 // exponent is 30 (biased). This is the exponent that we are fastest at and
1986 // also the highest exponent we can handle here.
1987 const uint32_t non_smi_exponent =
1988 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1989 __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
1990 // If we have a match of the int32-but-not-Smi exponent then skip some
1991 // logic.
1992 __ j(equal, &right_exponent);
1993 // If the exponent is higher than that then go to slow case. This catches
1994 // numbers that don't fit in a signed int32, infinities and NaNs.
1995 __ j(less, &normal_exponent);
1996
1997 {
1998 // Handle a big exponent. The only reason we have this code is that the
1999 // >>> operator has a tendency to generate numbers with an exponent of 31.
2000 const uint32_t big_non_smi_exponent =
2001 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
2002 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
2003 __ j(not_equal, conversion_failure);
2004 // We have the big exponent, typically from >>>. This means the number is
2005 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
2006 __ mov(scratch2, scratch);
2007 __ and_(scratch2, HeapNumber::kMantissaMask);
2008 // Put back the implicit 1.
2009 __ or_(scratch2, 1 << HeapNumber::kExponentShift);
2010 // Shift up the mantissa bits to take up the space the exponent used to
2011 // take. We just orred in the implicit bit so that took care of one and
2012 // we want to use the full unsigned range so we subtract 1 bit from the
2013 // shift distance.
2014 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
2015 __ shl(scratch2, big_shift_distance);
2016 // Get the second half of the double.
2017 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
2018 // Shift down 21 bits to get the most significant 11 bits or the low
2019 // mantissa word.
2020 __ shr(ecx, 32 - big_shift_distance);
2021 __ or_(ecx, Operand(scratch2));
2022 // We have the answer in ecx, but we may need to negate it.
2023 __ test(scratch, Operand(scratch));
2024 __ j(positive, &done);
2025 __ neg(ecx);
2026 __ jmp(&done);
2027 }
2028
2029 __ bind(&normal_exponent);
2030 // Exponent word in scratch, exponent part of exponent word in scratch2.
2031 // Zero in ecx.
2032 // We know the exponent is smaller than 30 (biased). If it is less than
2033 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
2034 // it rounds to zero.
2035 const uint32_t zero_exponent =
2036 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
2037 __ sub(Operand(scratch2), Immediate(zero_exponent));
2038 // ecx already has a Smi zero.
2039 __ j(less, &done);
2040
2041 // We have a shifted exponent between 0 and 30 in scratch2.
2042 __ shr(scratch2, HeapNumber::kExponentShift);
2043 __ mov(ecx, Immediate(30));
2044 __ sub(ecx, Operand(scratch2));
2045
2046 __ bind(&right_exponent);
2047 // Here ecx is the shift, scratch is the exponent word.
2048 // Get the top bits of the mantissa.
2049 __ and_(scratch, HeapNumber::kMantissaMask);
2050 // Put back the implicit 1.
2051 __ or_(scratch, 1 << HeapNumber::kExponentShift);
2052 // Shift up the mantissa bits to take up the space the exponent used to
2053 // take. We have kExponentShift + 1 significant bits int he low end of the
2054 // word. Shift them to the top bits.
2055 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2056 __ shl(scratch, shift_distance);
2057 // Get the second half of the double. For some exponents we don't
2058 // actually need this because the bits get shifted out again, but
2059 // it's probably slower to test than just to do it.
2060 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
2061 // Shift down 22 bits to get the most significant 10 bits or the low
2062 // mantissa word.
2063 __ shr(scratch2, 32 - shift_distance);
2064 __ or_(scratch2, Operand(scratch));
2065 // Move down according to the exponent.
2066 __ shr_cl(scratch2);
2067 // Now the unsigned answer is in scratch2. We need to move it to ecx and
2068 // we may need to fix the sign.
2069 NearLabel negative;
2070 __ xor_(ecx, Operand(ecx));
2071 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
2072 __ j(greater, &negative);
2073 __ mov(ecx, scratch2);
2074 __ jmp(&done);
2075 __ bind(&negative);
2076 __ sub(ecx, Operand(scratch2));
2077 __ bind(&done);
2078 }
2079 }
2080
2081
2082 // Input: edx, eax are the left and right objects of a bit op. 2414 // Input: edx, eax are the left and right objects of a bit op.
2083 // Output: eax, ecx are left and right integers for a bit op. 2415 // Output: eax, ecx are left and right integers for a bit op.
2084 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, 2416 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
2085 TypeInfo type_info, 2417 TypeInfo type_info,
2086 bool use_sse3, 2418 bool use_sse3,
2087 Label* conversion_failure) { 2419 Label* conversion_failure) {
2088 // Check float operands. 2420 // Check float operands.
2089 Label arg1_is_object, check_undefined_arg1; 2421 Label arg1_is_object, check_undefined_arg1;
2090 Label arg2_is_object, check_undefined_arg2; 2422 Label arg2_is_object, check_undefined_arg2;
2091 Label load_arg2, done; 2423 Label load_arg2, done;
(...skipping 3569 matching lines...) Expand 10 before | Expand all | Expand 10 after
5661 // Do a tail call to the rewritten stub. 5993 // Do a tail call to the rewritten stub.
5662 __ jmp(Operand(edi)); 5994 __ jmp(Operand(edi));
5663 } 5995 }
5664 5996
5665 5997
5666 #undef __ 5998 #undef __
5667 5999
5668 } } // namespace v8::internal 6000 } } // namespace v8::internal
5669 6001
5670 #endif // V8_TARGET_ARCH_IA32 6002 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698