Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 196077: X64: Extract all smi operations into MacroAssembler macros. (Closed)
Patch Set: Created 11 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after
405 movq(dst, Immediate(x)); 405 movq(dst, Immediate(x));
406 } else if (is_uint32(x)) { 406 } else if (is_uint32(x)) {
407 movl(dst, Immediate(x)); 407 movl(dst, Immediate(x));
408 } else { 408 } else {
409 movq(kScratchRegister, x, RelocInfo::NONE); 409 movq(kScratchRegister, x, RelocInfo::NONE);
410 movq(dst, kScratchRegister); 410 movq(dst, kScratchRegister);
411 } 411 }
412 } 412 }
413 413
414 414
415 // ----------------------------------------------------------------------------
416 // Smi tagging, untagging and tag detection.
417
418
419 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
420 ASSERT_EQ(1, kSmiTagSize);
421 ASSERT_EQ(0, kSmiTag);
422 #ifdef DEBUG
423 cmpq(src, Immediate(0xC0000000u));
424 Check(positive, "Smi conversion overflow");
425 #endif
426 if (dst.is(src)) {
427 addl(dst, src);
428 } else {
429 lea(dst, Operand(src, src, times_1, 0));
430 }
431 }
432
433
434 void MacroAssembler::Integer32ToSmi(Register dst,
435 Register src,
436 Label* on_overflow) {
437 ASSERT_EQ(1, kSmiTagSize);
438 ASSERT_EQ(0, kSmiTag);
439 if (!dst.is(src)) {
440 movl(dst, src);
441 }
442 addl(dst, src);
443 j(overflow, on_overflow);
444 }
445
446
447 void MacroAssembler::Integer64AddToSmi(Register dst,
448 Register src,
449 int constant) {
450 #ifdef DEBUG
451 movl(kScratchRegister, src);
452 addl(kScratchRegister, Immediate(constant));
453 Check(no_overflow, "Add-and-smi-convert overflow");
454 Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
455 Check(valid, "Add-and-smi-convert overflow");
456 #endif
457 lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
458 }
459
460
461 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
462 ASSERT_EQ(1, kSmiTagSize);
463 ASSERT_EQ(0, kSmiTag);
464 if (!dst.is(src)) {
465 movl(dst, src);
466 }
467 sarl(dst, Immediate(kSmiTagSize));
468 }
469
470
471 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
472 ASSERT_EQ(1, kSmiTagSize);
473 ASSERT_EQ(0, kSmiTag);
474 movsxlq(dst, src);
475 sar(dst, Immediate(kSmiTagSize));
476 }
477
478
479 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
480 Register src,
481 int power) {
482 ASSERT(power >= 0);
483 ASSERT(power < 64);
484 if (power == 0) {
485 SmiToInteger64(dst, src);
486 return;
487 }
488 shl(dst, Immediate(power - 1));
489 }
490
491 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
492 ASSERT_EQ(0, kSmiTag);
493 testl(src, Immediate(kSmiTagMask));
494 j(zero, on_smi);
495 }
496
497
498 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
499 Condition not_smi = CheckNotSmi(src);
500 j(not_smi, on_not_smi);
501 }
502
503
504 void MacroAssembler::JumpIfNotPositiveSmi(Register src,
505 Label* on_not_positive_smi) {
506 Condition not_positive_smi = CheckNotPositiveSmi(src);
507 j(not_positive_smi, on_not_positive_smi);
508 }
509
510
511 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
512 int constant,
513 Label* on_equals) {
514 if (Smi::IsValid(constant)) {
515 Condition are_equal = CheckSmiEqualsConstant(src, constant);
516 j(are_equal, on_equals);
517 }
518 }
519
520
521 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
522 Condition is_valid = CheckInteger32ValidSmiValue(src);
523 j(ReverseCondition(is_valid), on_invalid);
524 }
525
526
527
528 void MacroAssembler::JumpIfNotBothSmi(Register src1,
529 Register src2,
530 Label* on_not_both_smi) {
531 Condition not_both_smi = CheckNotBothSmi(src1, src2);
532 j(not_both_smi, on_not_both_smi);
533 }
534
535 Condition MacroAssembler::CheckSmi(Register src) {
536 testb(src, Immediate(kSmiTagMask));
537 return zero;
538 }
539
540
541 Condition MacroAssembler::CheckNotSmi(Register src) {
542 ASSERT_EQ(0, kSmiTag);
543 testb(src, Immediate(kSmiTagMask));
544 return not_zero;
545 }
546
547
548 Condition MacroAssembler::CheckPositiveSmi(Register src) {
549 ASSERT_EQ(0, kSmiTag);
550 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
551 return zero;
552 }
553
554
555 Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
556 ASSERT_EQ(0, kSmiTag);
557 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
558 return not_zero;
559 }
560
561
562 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
563 if (first.is(second)) {
564 return CheckSmi(first);
565 }
566 movl(kScratchRegister, first);
567 orl(kScratchRegister, second);
568 return CheckSmi(kScratchRegister);
569 }
570
571
572 Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
573 ASSERT_EQ(0, kSmiTag);
574 if (first.is(second)) {
575 return CheckNotSmi(first);
576 }
577 movl(kScratchRegister, first);
578 or_(kScratchRegister, second);
579 return CheckNotSmi(kScratchRegister);
580 }
581
582
583 Condition MacroAssembler::CheckIsMinSmi(Register src) {
584 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
585 cmpl(src, Immediate(0x40000000));
586 return equal;
587 }
588
589 Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
590 if (constant == 0) {
591 testl(src, src);
592 return zero;
593 }
594 if (Smi::IsValid(constant)) {
595 cmpl(src, Immediate(Smi::FromInt(constant)));
596 return zero;
597 }
598 // Can't be equal.
599 UNREACHABLE();
600 return no_condition;
601 }
602
603
604 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
605 // A 32-bit integer value can be converted to a smi if it is in the
606 // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
607 // representation have bits 30 and 31 be equal.
608 cmpl(src, Immediate(0xC0000000u));
609 return positive;
610 }
611
612
613 void MacroAssembler::SmiNeg(Register dst,
614 Register src,
615 Label* on_not_smi_result) {
616 if (!dst.is(src)) {
617 movl(dst, src);
618 }
619 negl(dst);
620 testl(dst, Immediate(0x7fffffff));
621 // If the result is zero or 0x80000000, negation failed to create a smi.
622 j(equal, on_not_smi_result);
623 }
624
625
626 void MacroAssembler::SmiAdd(Register dst,
627 Register src1,
628 Register src2,
629 Label* on_not_smi_result) {
630 ASSERT(!dst.is(src2));
631 if (!dst.is(src1)) {
632 movl(dst, src1);
633 }
634 addl(dst, src2);
635 if (!dst.is(src1)) {
636 j(overflow, on_not_smi_result);
637 } else {
638 Label smi_result;
639 j(no_overflow, &smi_result);
640 // Restore src1.
641 subl(src1, src2);
642 jmp(on_not_smi_result);
643 bind(&smi_result);
644 }
645 }
646
647
648
649 void MacroAssembler::SmiSub(Register dst,
650 Register src1,
651 Register src2,
652 Label* on_not_smi_result) {
653 ASSERT(!dst.is(src2));
654 if (!dst.is(src1)) {
655 movl(dst, src1);
656 }
657 subl(dst, src2);
658 if (!dst.is(src1)) {
659 j(overflow, on_not_smi_result);
660 } else {
661 Label smi_result;
662 j(no_overflow, &smi_result);
663 // Restore src1.
664 addl(src1, src2);
665 jmp(on_not_smi_result);
666 bind(&smi_result);
667 }
668 }
669
670
671 void MacroAssembler::SmiMul(Register dst,
672 Register src1,
673 Register src2,
674 Label* on_not_smi_result) {
675 ASSERT(!dst.is(src2));
676
677 if (dst.is(src1)) {
678 movq(kScratchRegister, src1);
679 }
680 SmiToInteger32(dst, src1);
681
682 imull(dst, src2);
683 j(overflow, on_not_smi_result);
684
685 // Check for negative zero result. If product is zero, and one
686 // argument is negative, go to slow case. The frame is unchanged
687 // in this block, so local control flow can use a Label rather
688 // than a JumpTarget.
689 Label non_zero_result;
690 testl(dst, dst);
691 j(not_zero, &non_zero_result);
692
693 // Test whether either operand is negative (the other must be zero).
694 orl(kScratchRegister, src2);
695 j(negative, on_not_smi_result);
696 bind(&non_zero_result);
697 }
698
699
700 void MacroAssembler::SmiTryAddConstant(Register dst,
701 Register src,
702 int32_t constant,
703 Label* on_not_smi_result) {
704 // Does not assume that src is a smi.
705 ASSERT_EQ(1, kSmiTagMask);
706 ASSERT_EQ(0, kSmiTag);
707 ASSERT(Smi::IsValid(constant));
708
709 Register tmp = (src.is(dst) ? kScratchRegister : dst);
710 movl(tmp, src);
711 addl(tmp, Immediate(Smi::FromInt(constant)));
712 if (tmp.is(kScratchRegister)) {
713 j(overflow, on_not_smi_result);
714 testl(tmp, Immediate(kSmiTagMask));
715 j(not_zero, on_not_smi_result);
716 movl(dst, tmp);
717 } else {
718 movl(kScratchRegister, Immediate(kSmiTagMask));
719 cmovl(overflow, dst, kScratchRegister);
720 testl(dst, kScratchRegister);
721 j(not_zero, on_not_smi_result);
722 }
723 }
724
725
726 void MacroAssembler::SmiAddConstant(Register dst,
727 Register src,
728 int32_t constant,
729 Label* on_not_smi_result) {
730 ASSERT(Smi::IsValid(constant));
731 if (on_not_smi_result == NULL) {
732 if (dst.is(src)) {
733 movl(dst, src);
734 } else {
735 lea(dst, Operand(src, constant << kSmiTagSize));
736 }
737 } else {
738 if (!dst.is(src)) {
739 movl(dst, src);
740 }
741 addl(dst, Immediate(Smi::FromInt(constant)));
742 if (!dst.is(src)) {
743 j(overflow, on_not_smi_result);
744 } else {
745 Label result_ok;
746 j(no_overflow, &result_ok);
747 subl(dst, Immediate(Smi::FromInt(constant)));
748 jmp(on_not_smi_result);
749 bind(&result_ok);
750 }
751 }
752 }
753
754
755 void MacroAssembler::SmiSubConstant(Register dst,
756 Register src,
757 int32_t constant,
758 Label* on_not_smi_result) {
759 ASSERT(Smi::IsValid(constant));
760 Smi* smi_value = Smi::FromInt(constant);
761 if (dst.is(src)) {
762 // Optimistic subtract - may change value of dst register,
763 // if it has garbage bits in the higher half, but will not change
764 // the value as a tagged smi.
765 subl(dst, Immediate(smi_value));
766 if (on_not_smi_result != NULL) {
767 Label add_success;
768 j(no_overflow, &add_success);
769 addl(dst, Immediate(smi_value));
770 jmp(on_not_smi_result);
771 bind(&add_success);
772 }
773 } else {
774 UNIMPLEMENTED(); // Not used yet.
775 }
776 }
777
778
779 void MacroAssembler::SmiDiv(Register dst,
780 Register src1,
781 Register src2,
782 Label* on_not_smi_result) {
783 ASSERT(!src2.is(rax));
784 ASSERT(!src2.is(rdx));
785 ASSERT(!src1.is(rdx));
786
787 // Check for 0 divisor (result is +/-Infinity).
788 Label positive_divisor;
789 testl(src2, src2);
790 j(zero, on_not_smi_result);
791 j(positive, &positive_divisor);
792 // Check for negative zero result. If the dividend is zero, and the
793 // divisor is negative, return a floating point negative zero.
794 testl(src1, src1);
795 j(zero, on_not_smi_result);
796 bind (&positive_divisor);
797
798 // Sign extend src1 into edx:eax.
799 if (!src1.is(rax)) {
800 movl(rax, src1);
801 }
802 cdq();
803
804 idivl(src2);
805 // Check for the corner case of dividing the most negative smi by
806 // -1. We cannot use the overflow flag, since it is not set by
807 // idiv instruction.
808 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
809 cmpl(rax, Immediate(0x40000000));
810 j(equal, on_not_smi_result);
811 // Check that the remainder is zero.
812 testl(rdx, rdx);
813 j(not_zero, on_not_smi_result);
814 // Tag the result and store it in the destination register.
815 Integer32ToSmi(dst, rax);
816 }
817
818
819 void MacroAssembler::SmiMod(Register dst,
820 Register src1,
821 Register src2,
822 Label* on_not_smi_result) {
823 ASSERT(!dst.is(kScratchRegister));
824 ASSERT(!src1.is(kScratchRegister));
825 ASSERT(!src2.is(kScratchRegister));
826 ASSERT(!src2.is(rax));
827 ASSERT(!src2.is(rdx));
828 ASSERT(!src1.is(rdx));
829
830 testl(src2, src2);
831 j(zero, on_not_smi_result);
832
833 if (src1.is(rax)) {
834 // Mist remember the value to see if a zero result should
835 // be a negative zero.
836 movl(kScratchRegister, rax);
837 } else {
838 movl(rax, src1);
839 }
840 // Sign extend eax into edx:eax.
841 cdq();
842 idivl(src2);
843 // Check for a negative zero result. If the result is zero, and the
844 // dividend is negative, return a floating point negative zero.
845 Label non_zero_result;
846 testl(rdx, rdx);
847 j(not_zero, &non_zero_result);
848 if (src1.is(rax)) {
849 testl(kScratchRegister, kScratchRegister);
850 } else {
851 testl(src1, src1);
852 }
853 j(negative, on_not_smi_result);
854 bind(&non_zero_result);
855 if (!dst.is(rdx)) {
856 movl(dst, rdx);
857 }
858 }
859
860
861 void MacroAssembler::SmiNot(Register dst, Register src) {
862 if (dst.is(src)) {
863 not_(dst);
864 // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
865 xor_(src, Immediate(kSmiTagMask));
866 } else {
867 ASSERT_EQ(0, kSmiTag);
868 lea(dst, Operand(src, kSmiTagMask));
869 not_(dst);
870 }
871 }
872
873
874 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
875 if (!dst.is(src1)) {
876 movl(dst, src1);
877 }
878 and_(dst, src2);
879 }
880
881
882 void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
883 ASSERT(Smi::IsValid(constant));
884 if (!dst.is(src)) {
885 movl(dst, src);
886 }
887 and_(dst, Immediate(Smi::FromInt(constant)));
888 }
889
890
891 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
892 if (!dst.is(src1)) {
893 movl(dst, src1);
894 }
895 or_(dst, src2);
896 }
897
898
899 void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
900 ASSERT(Smi::IsValid(constant));
901 if (!dst.is(src)) {
902 movl(dst, src);
903 }
904 or_(dst, Immediate(Smi::FromInt(constant)));
905 }
906
907 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
908 if (!dst.is(src1)) {
909 movl(dst, src1);
910 }
911 xor_(dst, src2);
912 }
913
914
915 void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
916 ASSERT(Smi::IsValid(constant));
917 if (!dst.is(src)) {
918 movl(dst, src);
919 }
920 xor_(dst, Immediate(Smi::FromInt(constant)));
921 }
922
923
924
925 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
926 Register src,
927 int shift_value) {
928 if (shift_value > 0) {
929 if (dst.is(src)) {
930 sarl(dst, Immediate(shift_value));
931 and_(dst, Immediate(~kSmiTagMask));
932 } else {
933 UNIMPLEMENTED(); // Not used.
934 }
935 }
936 }
937
938
939 void MacroAssembler::SmiShiftLogicRightConstant(Register dst,
940 Register src,
941 int shift_value,
942 Label* on_not_smi_result) {
943 // Logic right shift interprets its result as an *unsigned* number.
944 if (dst.is(src)) {
945 UNIMPLEMENTED(); // Not used.
946 } else {
947 movl(dst, src);
948 // Untag the smi.
949 sarl(dst, Immediate(kSmiTagSize));
950 if (shift_value < 2) {
951 // A negative Smi shifted right two is in the positive Smi range,
952 // but if shifted only by zero or one, it never is.
953 j(negative, on_not_smi_result);
954 }
955 if (shift_value > 0) {
956 // Do the right shift on the integer value.
957 shrl(dst, Immediate(shift_value));
958 }
959 // Re-tag the result.
960 addl(dst, dst);
961 }
962 }
963
964
965 void MacroAssembler::SmiShiftLeftConstant(Register dst,
966 Register src,
967 int shift_value,
968 Label* on_not_smi_result) {
969 if (dst.is(src)) {
970 UNIMPLEMENTED(); // Not used.
971 } else {
972 movl(dst, src);
973 if (shift_value > 0) {
974 // Treat dst as an untagged integer value equal to two times the
975 // smi value of src, i.e., already shifted left by one.
976 if (shift_value > 1) {
977 shll(dst, Immediate(shift_value - 1));
978 }
979 // Convert int result to Smi, checking that it is in smi range.
980 ASSERT(kSmiTagSize == 1); // adjust code if not the case
981 Integer32ToSmi(dst, dst, on_not_smi_result);
982 }
983 }
984 }
985
986
987 void MacroAssembler::SmiShiftLeft(Register dst,
988 Register src1,
989 Register src2,
990 Label* on_not_smi_result) {
991 ASSERT(!dst.is(rcx));
992 Label result_ok;
993 // Untag both operands.
994 SmiToInteger32(dst, src1);
995 SmiToInteger32(rcx, src2);
996 shll(dst);
997 // Check that the *signed* result fits in a smi.
998 Condition is_valid = CheckInteger32ValidSmiValue(dst);
999 j(is_valid, &result_ok);
1000 // Restore the relevant bits of the source registers
1001 // and call the slow version.
1002 if (dst.is(src1)) {
1003 shrl(dst);
1004 Integer32ToSmi(dst, dst);
1005 }
1006 Integer32ToSmi(rcx, rcx);
1007 jmp(on_not_smi_result);
1008 bind(&result_ok);
1009 Integer32ToSmi(dst, dst);
1010 }
1011
1012
1013 void MacroAssembler::SmiShiftLogicRight(Register dst,
1014 Register src1,
1015 Register src2,
1016 Label* on_not_smi_result) {
1017 ASSERT(!dst.is(rcx));
1018 Label result_ok;
1019 // Untag both operands.
1020 SmiToInteger32(dst, src1);
1021 SmiToInteger32(rcx, src2);
1022
1023 shrl(dst);
1024 // Check that the *unsigned* result fits in a smi.
1025 // I.e., that it is a valid positive smi value. The positive smi
1026 // values are 0..0x3fffffff, i.e., neither of the top-most two
1027 // bits can be set.
1028 //
1029 // These two cases can only happen with shifts by 0 or 1 when
1030 // handed a valid smi. If the answer cannot be represented by a
1031 // smi, restore the left and right arguments, and jump to slow
1032 // case. The low bit of the left argument may be lost, but only
1033 // in a case where it is dropped anyway.
1034 testl(dst, Immediate(0xc0000000));
1035 j(zero, &result_ok);
1036 if (dst.is(src1)) {
1037 shll(dst);
1038 Integer32ToSmi(dst, dst);
1039 }
1040 Integer32ToSmi(rcx, rcx);
1041 jmp(on_not_smi_result);
1042 bind(&result_ok);
1043 // Smi-tag the result in answer.
1044 Integer32ToSmi(dst, dst);
1045 }
1046
1047
1048 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1049 Register src1,
1050 Register src2) {
1051 ASSERT(!dst.is(rcx));
1052 // Untag both operands.
1053 SmiToInteger32(dst, src1);
1054 SmiToInteger32(rcx, src2);
1055 // Shift as integer.
1056 sarl(dst);
1057 // Retag result.
1058 Integer32ToSmi(dst, dst);
1059 }
1060
1061
1062 void MacroAssembler::SelectNonSmi(Register dst,
1063 Register src1,
1064 Register src2,
1065 Label* on_not_smis) {
1066 ASSERT(!dst.is(src1));
1067 ASSERT(!dst.is(src2));
1068 // Both operands must not be smis.
1069 #ifdef DEBUG
1070 Condition not_both_smis = CheckNotBothSmi(src1, src2);
1071 Check(not_both_smis, "Both registers were smis.");
1072 #endif
1073 ASSERT_EQ(0, kSmiTag);
1074 ASSERT_EQ(0, Smi::FromInt(0));
1075 movq(kScratchRegister, Immediate(kSmiTagMask));
1076 and_(kScratchRegister, src1);
1077 testl(kScratchRegister, src2);
1078 j(not_zero, on_not_smis);
1079 // One operand is a smi.
1080
1081 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1082 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1083 subq(kScratchRegister, Immediate(1));
1084 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1085 movq(dst, src1);
1086 xor_(dst, src2);
1087 and_(dst, kScratchRegister);
1088 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1089 xor_(dst, src1);
1090 // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
1091 }
1092
1093
1094
415 bool MacroAssembler::IsUnsafeSmi(Smi* value) { 1095 bool MacroAssembler::IsUnsafeSmi(Smi* value) {
416 return false; 1096 return false;
417 } 1097 }
418 1098
419 void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) { 1099 void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
420 UNIMPLEMENTED(); 1100 UNIMPLEMENTED();
421 } 1101 }
422 1102
423 1103
424 void MacroAssembler::Move(Register dst, Handle<Object> source) { 1104 void MacroAssembler::Move(Register dst, Handle<Object> source) {
(...skipping 975 matching lines...) Expand 10 before | Expand all | Expand 10 after
1400 movq(kScratchRegister, new_space_allocation_top); 2080 movq(kScratchRegister, new_space_allocation_top);
1401 #ifdef DEBUG 2081 #ifdef DEBUG
1402 cmpq(object, Operand(kScratchRegister, 0)); 2082 cmpq(object, Operand(kScratchRegister, 0));
1403 Check(below, "Undo allocation of non allocated memory"); 2083 Check(below, "Undo allocation of non allocated memory");
1404 #endif 2084 #endif
1405 movq(Operand(kScratchRegister, 0), object); 2085 movq(Operand(kScratchRegister, 0), object);
1406 } 2086 }
1407 2087
1408 2088
1409 } } // namespace v8::internal 2089 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698