Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 2885018: X64: Added register holding Smi::FromInt(1). (Closed)
Patch Set: Addressed review comments Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/register-allocator-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after
460 cmpq(target, Operand(rsp, 0)); 460 cmpq(target, Operand(rsp, 0));
461 Assert(equal, "Builtin code object changed"); 461 Assert(equal, "Builtin code object changed");
462 pop(target); 462 pop(target);
463 } 463 }
464 lea(target, FieldOperand(target, Code::kHeaderSize)); 464 lea(target, FieldOperand(target, Code::kHeaderSize));
465 } 465 }
466 466
467 467
468 void MacroAssembler::Set(Register dst, int64_t x) { 468 void MacroAssembler::Set(Register dst, int64_t x) {
469 if (x == 0) { 469 if (x == 0) {
470 xor_(dst, dst); 470 xorl(dst, dst);
471 } else if (is_int32(x)) { 471 } else if (is_int32(x)) {
472 movq(dst, Immediate(static_cast<int32_t>(x))); 472 movq(dst, Immediate(static_cast<int32_t>(x)));
473 } else if (is_uint32(x)) { 473 } else if (is_uint32(x)) {
474 movl(dst, Immediate(static_cast<uint32_t>(x))); 474 movl(dst, Immediate(static_cast<uint32_t>(x)));
475 } else { 475 } else {
476 movq(dst, x, RelocInfo::NONE); 476 movq(dst, x, RelocInfo::NONE);
477 } 477 }
478 } 478 }
479 479
480
481 void MacroAssembler::Set(const Operand& dst, int64_t x) { 480 void MacroAssembler::Set(const Operand& dst, int64_t x) {
482 if (is_int32(x)) { 481 if (is_int32(x)) {
483 movq(dst, Immediate(static_cast<int32_t>(x))); 482 movq(dst, Immediate(static_cast<int32_t>(x)));
484 } else { 483 } else {
485 movq(kScratchRegister, x, RelocInfo::NONE); 484 movq(kScratchRegister, x, RelocInfo::NONE);
486 movq(dst, kScratchRegister); 485 movq(dst, kScratchRegister);
487 } 486 }
488 } 487 }
489 488
490 // ---------------------------------------------------------------------------- 489 // ----------------------------------------------------------------------------
491 // Smi tagging, untagging and tag detection. 490 // Smi tagging, untagging and tag detection.
492 491
493 static int kSmiShift = kSmiTagSize + kSmiShiftSize; 492 static int kSmiShift = kSmiTagSize + kSmiShiftSize;
494 493
494 Register MacroAssembler::GetSmiConstant(Smi* source) {
495 int value = source->value();
496 if (value == 0) {
497 xorl(kScratchRegister, kScratchRegister);
498 return kScratchRegister;
499 }
500 if (value == 1) {
501 return kSmiConstantRegister;
502 }
503 LoadSmiConstant(kScratchRegister, source);
504 return kScratchRegister;
505 }
506
507 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
508 if (FLAG_debug_code) {
509 movq(dst,
510 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
511 RelocInfo::NONE);
512 cmpq(dst, kSmiConstantRegister);
513 if (allow_stub_calls()) {
514 Assert(equal, "Uninitialized kSmiConstantRegister");
515 } else {
516 Label ok;
517 j(equal, &ok);
518 int3();
519 bind(&ok);
520 }
521 }
522 if (source->value() == 0) {
523 xorl(dst, dst);
524 return;
525 }
526 int value = source->value();
527 bool negative = value < 0;
528 unsigned int uvalue = negative ? -value : value;
529
530 switch (uvalue) {
531 case 9:
532 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
533 break;
534 case 8:
535 xorl(dst, dst);
536 lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
537 break;
538 case 4:
539 xorl(dst, dst);
540 lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
541 break;
542 case 5:
543 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
544 break;
545 case 3:
546 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
547 break;
548 case 2:
549 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
550 break;
551 case 1:
552 movq(dst, kSmiConstantRegister);
553 break;
554 case 0:
555 UNREACHABLE();
556 return;
557 default:
558 movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
559 return;
560 }
561 if (negative) {
562 neg(dst);
563 }
564 }
565
495 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { 566 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
496 ASSERT_EQ(0, kSmiTag); 567 ASSERT_EQ(0, kSmiTag);
497 if (!dst.is(src)) { 568 if (!dst.is(src)) {
498 movl(dst, src); 569 movl(dst, src);
499 } 570 }
500 shl(dst, Immediate(kSmiShift)); 571 shl(dst, Immediate(kSmiShift));
501 } 572 }
502 573
503 574
504 void MacroAssembler::Integer32ToSmi(Register dst, 575 void MacroAssembler::Integer32ToSmi(Register dst,
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
645 716
646 Condition MacroAssembler::CheckSmi(Register src) { 717 Condition MacroAssembler::CheckSmi(Register src) {
647 ASSERT_EQ(0, kSmiTag); 718 ASSERT_EQ(0, kSmiTag);
648 testb(src, Immediate(kSmiTagMask)); 719 testb(src, Immediate(kSmiTagMask));
649 return zero; 720 return zero;
650 } 721 }
651 722
652 723
653 Condition MacroAssembler::CheckPositiveSmi(Register src) { 724 Condition MacroAssembler::CheckPositiveSmi(Register src) {
654 ASSERT_EQ(0, kSmiTag); 725 ASSERT_EQ(0, kSmiTag);
726 // Make mask 0x8000000000000001 and test that both bits are zero.
655 movq(kScratchRegister, src); 727 movq(kScratchRegister, src);
656 rol(kScratchRegister, Immediate(1)); 728 rol(kScratchRegister, Immediate(1));
657 testl(kScratchRegister, Immediate(0x03)); 729 testb(kScratchRegister, Immediate(3));
658 return zero; 730 return zero;
659 } 731 }
660 732
661 733
662 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { 734 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
663 if (first.is(second)) { 735 if (first.is(second)) {
664 return CheckSmi(first); 736 return CheckSmi(first);
665 } 737 }
666 ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); 738 ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
667 leal(kScratchRegister, Operand(first, second, times_1, 0)); 739 leal(kScratchRegister, Operand(first, second, times_1, 0));
668 testb(kScratchRegister, Immediate(0x03)); 740 testb(kScratchRegister, Immediate(0x03));
669 return zero; 741 return zero;
670 } 742 }
671 743
672 744
673 Condition MacroAssembler::CheckBothPositiveSmi(Register first, 745 Condition MacroAssembler::CheckBothPositiveSmi(Register first,
674 Register second) { 746 Register second) {
675 if (first.is(second)) { 747 if (first.is(second)) {
676 return CheckPositiveSmi(first); 748 return CheckPositiveSmi(first);
677 } 749 }
678 movq(kScratchRegister, first); 750 movq(kScratchRegister, first);
679 or_(kScratchRegister, second); 751 or_(kScratchRegister, second);
680 rol(kScratchRegister, Immediate(1)); 752 rol(kScratchRegister, Immediate(1));
681 testl(kScratchRegister, Immediate(0x03)); 753 testl(kScratchRegister, Immediate(0x03));
682 return zero; 754 return zero;
683 } 755 }
684 756
685 757
686
687 Condition MacroAssembler::CheckEitherSmi(Register first, Register second) { 758 Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
688 if (first.is(second)) { 759 if (first.is(second)) {
689 return CheckSmi(first); 760 return CheckSmi(first);
690 } 761 }
691 movl(kScratchRegister, first); 762 movl(kScratchRegister, first);
692 andl(kScratchRegister, second); 763 andl(kScratchRegister, second);
693 testb(kScratchRegister, Immediate(kSmiTagMask)); 764 testb(kScratchRegister, Immediate(kSmiTagMask));
694 return zero; 765 return zero;
695 } 766 }
696 767
697 768
698 Condition MacroAssembler::CheckIsMinSmi(Register src) { 769 Condition MacroAssembler::CheckIsMinSmi(Register src) {
699 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 770 ASSERT(!src.is(kScratchRegister));
700 movq(kScratchRegister, src); 771 // If we overflow by subtracting one, it's the minimal smi value.
701 rol(kScratchRegister, Immediate(1)); 772 cmpq(src, kSmiConstantRegister);
702 cmpq(kScratchRegister, Immediate(1)); 773 return overflow;
703 return equal;
704 } 774 }
705 775
706 776
707 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { 777 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
708 // A 32-bit integer value can always be converted to a smi. 778 // A 32-bit integer value can always be converted to a smi.
709 return always; 779 return always;
710 } 780 }
711 781
712 782
713 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { 783 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
714 // An unsigned 32-bit integer value is valid as long as the high bit 784 // An unsigned 32-bit integer value is valid as long as the high bit
715 // is not set. 785 // is not set.
716 testq(src, Immediate(0x80000000)); 786 testl(src, src);
717 return zero; 787 return positive;
718 } 788 }
719 789
720 790
721 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) { 791 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
722 if (dst.is(src)) { 792 if (dst.is(src)) {
723 ASSERT(!dst.is(kScratchRegister)); 793 ASSERT(!dst.is(kScratchRegister));
724 movq(kScratchRegister, src); 794 movq(kScratchRegister, src);
725 neg(dst); // Low 32 bits are retained as zero by negation. 795 neg(dst); // Low 32 bits are retained as zero by negation.
726 // Test if result is zero or Smi::kMinValue. 796 // Test if result is zero or Smi::kMinValue.
727 cmpq(dst, kScratchRegister); 797 cmpq(dst, kScratchRegister);
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
800 // No overflow checking. Use only when it's known that 870 // No overflow checking. Use only when it's known that
801 // overflowing is impossible (e.g., subtracting two positive smis). 871 // overflowing is impossible (e.g., subtracting two positive smis).
802 if (dst.is(src1)) { 872 if (dst.is(src1)) {
803 subq(dst, src2); 873 subq(dst, src2);
804 } else { 874 } else {
805 movq(dst, src1); 875 movq(dst, src1);
806 subq(dst, src2); 876 subq(dst, src2);
807 } 877 }
808 Assert(no_overflow, "Smi subtraction overflow"); 878 Assert(no_overflow, "Smi subtraction overflow");
809 } else if (dst.is(src1)) { 879 } else if (dst.is(src1)) {
810 movq(kScratchRegister, src1); 880 movq(kScratchRegister, src2);
811 subq(kScratchRegister, src2); 881 cmpq(src1, kScratchRegister);
812 j(overflow, on_not_smi_result); 882 j(overflow, on_not_smi_result);
813 movq(src1, kScratchRegister); 883 subq(src1, kScratchRegister);
814 } else { 884 } else {
815 movq(dst, src1); 885 movq(dst, src1);
816 subq(dst, src2); 886 subq(dst, src2);
817 j(overflow, on_not_smi_result); 887 j(overflow, on_not_smi_result);
818 } 888 }
819 } 889 }
820 890
821 void MacroAssembler::SmiMul(Register dst, 891 void MacroAssembler::SmiMul(Register dst,
822 Register src1, 892 Register src1,
823 Register src2, 893 Register src2,
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 Smi* constant, 946 Smi* constant,
877 Label* on_not_smi_result) { 947 Label* on_not_smi_result) {
878 // Does not assume that src is a smi. 948 // Does not assume that src is a smi.
879 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask)); 949 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
880 ASSERT_EQ(0, kSmiTag); 950 ASSERT_EQ(0, kSmiTag);
881 ASSERT(!dst.is(kScratchRegister)); 951 ASSERT(!dst.is(kScratchRegister));
882 ASSERT(!src.is(kScratchRegister)); 952 ASSERT(!src.is(kScratchRegister));
883 953
884 JumpIfNotSmi(src, on_not_smi_result); 954 JumpIfNotSmi(src, on_not_smi_result);
885 Register tmp = (dst.is(src) ? kScratchRegister : dst); 955 Register tmp = (dst.is(src) ? kScratchRegister : dst);
886 Move(tmp, constant); 956 LoadSmiConstant(tmp, constant);
887 addq(tmp, src); 957 addq(tmp, src);
888 j(overflow, on_not_smi_result); 958 j(overflow, on_not_smi_result);
889 if (dst.is(src)) { 959 if (dst.is(src)) {
890 movq(dst, tmp); 960 movq(dst, tmp);
891 } 961 }
892 } 962 }
893 963
894 964
895 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { 965 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
896 if (constant->value() == 0) { 966 if (constant->value() == 0) {
897 if (!dst.is(src)) { 967 if (!dst.is(src)) {
898 movq(dst, src); 968 movq(dst, src);
899 } 969 }
970 return;
900 } else if (dst.is(src)) { 971 } else if (dst.is(src)) {
901 ASSERT(!dst.is(kScratchRegister)); 972 ASSERT(!dst.is(kScratchRegister));
902 973 switch (constant->value()) {
903 Move(kScratchRegister, constant); 974 case 1:
904 addq(dst, kScratchRegister); 975 addq(dst, kSmiConstantRegister);
976 return;
977 case 2:
978 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
979 return;
980 case 4:
981 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
982 return;
983 case 8:
984 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
985 return;
986 default:
987 Register constant_reg = GetSmiConstant(constant);
988 addq(dst, constant_reg);
989 return;
990 }
905 } else { 991 } else {
906 Move(dst, constant); 992 switch (constant->value()) {
907 addq(dst, src); 993 case 1:
994 lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
995 return;
996 case 2:
997 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
998 return;
999 case 4:
1000 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1001 return;
1002 case 8:
1003 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1004 return;
1005 default:
1006 LoadSmiConstant(dst, constant);
1007 addq(dst, src);
1008 return;
1009 }
908 } 1010 }
909 } 1011 }
910 1012
911 1013
912 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { 1014 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
913 if (constant->value() != 0) { 1015 if (constant->value() != 0) {
914 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); 1016 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
915 } 1017 }
916 } 1018 }
917 1019
918 1020
919 void MacroAssembler::SmiAddConstant(Register dst, 1021 void MacroAssembler::SmiAddConstant(Register dst,
920 Register src, 1022 Register src,
921 Smi* constant, 1023 Smi* constant,
922 Label* on_not_smi_result) { 1024 Label* on_not_smi_result) {
923 if (constant->value() == 0) { 1025 if (constant->value() == 0) {
924 if (!dst.is(src)) { 1026 if (!dst.is(src)) {
925 movq(dst, src); 1027 movq(dst, src);
926 } 1028 }
927 } else if (dst.is(src)) { 1029 } else if (dst.is(src)) {
928 ASSERT(!dst.is(kScratchRegister)); 1030 ASSERT(!dst.is(kScratchRegister));
929 1031
930 Move(kScratchRegister, constant); 1032 LoadSmiConstant(kScratchRegister, constant);
931 addq(kScratchRegister, dst); 1033 addq(kScratchRegister, src);
932 j(overflow, on_not_smi_result); 1034 j(overflow, on_not_smi_result);
933 movq(dst, kScratchRegister); 1035 movq(dst, kScratchRegister);
934 } else { 1036 } else {
935 Move(dst, constant); 1037 LoadSmiConstant(dst, constant);
936 addq(dst, src); 1038 addq(dst, src);
937 j(overflow, on_not_smi_result); 1039 j(overflow, on_not_smi_result);
938 } 1040 }
939 } 1041 }
940 1042
941 1043
942 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { 1044 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
943 if (constant->value() == 0) { 1045 if (constant->value() == 0) {
944 if (!dst.is(src)) { 1046 if (!dst.is(src)) {
945 movq(dst, src); 1047 movq(dst, src);
946 } 1048 }
947 } else if (dst.is(src)) { 1049 } else if (dst.is(src)) {
948 ASSERT(!dst.is(kScratchRegister)); 1050 ASSERT(!dst.is(kScratchRegister));
949 1051 Register constant_reg = GetSmiConstant(constant);
950 Move(kScratchRegister, constant); 1052 subq(dst, constant_reg);
951 subq(dst, kScratchRegister);
952 } else { 1053 } else {
953 // Subtract by adding the negative, to do it in two operations.
954 if (constant->value() == Smi::kMinValue) { 1054 if (constant->value() == Smi::kMinValue) {
955 Move(dst, constant); 1055 LoadSmiConstant(dst, constant);
956 // Adding and subtracting the min-value gives the same result, it only 1056 // Adding and subtracting the min-value gives the same result, it only
957 // differs on the overflow bit, which we don't check here. 1057 // differs on the overflow bit, which we don't check here.
958 addq(dst, src); 1058 addq(dst, src);
959 } else { 1059 } else {
960 // Subtract by adding the negation. 1060 // Subtract by adding the negation.
961 Move(dst, Smi::FromInt(-constant->value())); 1061 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
962 addq(dst, src); 1062 addq(dst, src);
963 } 1063 }
964 } 1064 }
965 } 1065 }
966 1066
967 1067
968 void MacroAssembler::SmiSubConstant(Register dst, 1068 void MacroAssembler::SmiSubConstant(Register dst,
969 Register src, 1069 Register src,
970 Smi* constant, 1070 Smi* constant,
971 Label* on_not_smi_result) { 1071 Label* on_not_smi_result) {
972 if (constant->value() == 0) { 1072 if (constant->value() == 0) {
973 if (!dst.is(src)) { 1073 if (!dst.is(src)) {
974 movq(dst, src); 1074 movq(dst, src);
975 } 1075 }
976 } else if (dst.is(src)) { 1076 } else if (dst.is(src)) {
977 ASSERT(!dst.is(kScratchRegister)); 1077 ASSERT(!dst.is(kScratchRegister));
978 if (constant->value() == Smi::kMinValue) { 1078 if (constant->value() == Smi::kMinValue) {
979 // Subtracting min-value from any non-negative value will overflow. 1079 // Subtracting min-value from any non-negative value will overflow.
980 // We test the non-negativeness before doing the subtraction. 1080 // We test the non-negativeness before doing the subtraction.
981 testq(src, src); 1081 testq(src, src);
982 j(not_sign, on_not_smi_result); 1082 j(not_sign, on_not_smi_result);
983 Move(kScratchRegister, constant); 1083 LoadSmiConstant(kScratchRegister, constant);
984 subq(dst, kScratchRegister); 1084 subq(dst, kScratchRegister);
985 } else { 1085 } else {
986 // Subtract by adding the negation. 1086 // Subtract by adding the negation.
987 Move(kScratchRegister, Smi::FromInt(-constant->value())); 1087 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
988 addq(kScratchRegister, dst); 1088 addq(kScratchRegister, dst);
989 j(overflow, on_not_smi_result); 1089 j(overflow, on_not_smi_result);
990 movq(dst, kScratchRegister); 1090 movq(dst, kScratchRegister);
991 } 1091 }
992 } else { 1092 } else {
993 if (constant->value() == Smi::kMinValue) { 1093 if (constant->value() == Smi::kMinValue) {
994 // Subtracting min-value from any non-negative value will overflow. 1094 // Subtracting min-value from any non-negative value will overflow.
995 // We test the non-negativeness before doing the subtraction. 1095 // We test the non-negativeness before doing the subtraction.
996 testq(src, src); 1096 testq(src, src);
997 j(not_sign, on_not_smi_result); 1097 j(not_sign, on_not_smi_result);
998 Move(dst, constant); 1098 LoadSmiConstant(dst, constant);
999 // Adding and subtracting the min-value gives the same result, it only 1099 // Adding and subtracting the min-value gives the same result, it only
1000 // differs on the overflow bit, which we don't check here. 1100 // differs on the overflow bit, which we don't check here.
1001 addq(dst, src); 1101 addq(dst, src);
1002 } else { 1102 } else {
1003 // Subtract by adding the negation. 1103 // Subtract by adding the negation.
1004 Move(dst, Smi::FromInt(-(constant->value()))); 1104 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1005 addq(dst, src); 1105 addq(dst, src);
1006 j(overflow, on_not_smi_result); 1106 j(overflow, on_not_smi_result);
1007 } 1107 }
1008 } 1108 }
1009 } 1109 }
1010 1110
1011 1111
1012 void MacroAssembler::SmiDiv(Register dst, 1112 void MacroAssembler::SmiDiv(Register dst,
1013 Register src1, 1113 Register src1,
1014 Register src2, 1114 Register src2,
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1148 } 1248 }
1149 and_(dst, src2); 1249 and_(dst, src2);
1150 } 1250 }
1151 1251
1152 1252
1153 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { 1253 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1154 if (constant->value() == 0) { 1254 if (constant->value() == 0) {
1155 xor_(dst, dst); 1255 xor_(dst, dst);
1156 } else if (dst.is(src)) { 1256 } else if (dst.is(src)) {
1157 ASSERT(!dst.is(kScratchRegister)); 1257 ASSERT(!dst.is(kScratchRegister));
1158 Move(kScratchRegister, constant); 1258 Register constant_reg = GetSmiConstant(constant);
1159 and_(dst, kScratchRegister); 1259 and_(dst, constant_reg);
1160 } else { 1260 } else {
1161 Move(dst, constant); 1261 LoadSmiConstant(dst, constant);
1162 and_(dst, src); 1262 and_(dst, src);
1163 } 1263 }
1164 } 1264 }
1165 1265
1166 1266
1167 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { 1267 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1168 if (!dst.is(src1)) { 1268 if (!dst.is(src1)) {
1169 movq(dst, src1); 1269 movq(dst, src1);
1170 } 1270 }
1171 or_(dst, src2); 1271 or_(dst, src2);
1172 } 1272 }
1173 1273
1174 1274
1175 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { 1275 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1176 if (dst.is(src)) { 1276 if (dst.is(src)) {
1177 ASSERT(!dst.is(kScratchRegister)); 1277 ASSERT(!dst.is(kScratchRegister));
1178 Move(kScratchRegister, constant); 1278 Register constant_reg = GetSmiConstant(constant);
1179 or_(dst, kScratchRegister); 1279 or_(dst, constant_reg);
1180 } else { 1280 } else {
1181 Move(dst, constant); 1281 LoadSmiConstant(dst, constant);
1182 or_(dst, src); 1282 or_(dst, src);
1183 } 1283 }
1184 } 1284 }
1185 1285
1186 1286
1187 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { 1287 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1188 if (!dst.is(src1)) { 1288 if (!dst.is(src1)) {
1189 movq(dst, src1); 1289 movq(dst, src1);
1190 } 1290 }
1191 xor_(dst, src2); 1291 xor_(dst, src2);
1192 } 1292 }
1193 1293
1194 1294
1195 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { 1295 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1196 if (dst.is(src)) { 1296 if (dst.is(src)) {
1197 ASSERT(!dst.is(kScratchRegister)); 1297 ASSERT(!dst.is(kScratchRegister));
1198 Move(kScratchRegister, constant); 1298 Register constant_reg = GetSmiConstant(constant);
1199 xor_(dst, kScratchRegister); 1299 xor_(dst, constant_reg);
1200 } else { 1300 } else {
1201 Move(dst, constant); 1301 LoadSmiConstant(dst, constant);
1202 xor_(dst, src); 1302 xor_(dst, src);
1203 } 1303 }
1204 } 1304 }
1205 1305
1206 1306
1207 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, 1307 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1208 Register src, 1308 Register src,
1209 int shift_value) { 1309 int shift_value) {
1210 ASSERT(is_uint5(shift_value)); 1310 ASSERT(is_uint5(shift_value));
1211 if (shift_value > 0) { 1311 if (shift_value > 0) {
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
1359 subq(kScratchRegister, Immediate(1)); 1459 subq(kScratchRegister, Immediate(1));
1360 // If src1 is a smi, then scratch register all 1s, else it is all 0s. 1460 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1361 movq(dst, src1); 1461 movq(dst, src1);
1362 xor_(dst, src2); 1462 xor_(dst, src2);
1363 and_(dst, kScratchRegister); 1463 and_(dst, kScratchRegister);
1364 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. 1464 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1365 xor_(dst, src1); 1465 xor_(dst, src1);
1366 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. 1466 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1367 } 1467 }
1368 1468
1469
1369 SmiIndex MacroAssembler::SmiToIndex(Register dst, 1470 SmiIndex MacroAssembler::SmiToIndex(Register dst,
1370 Register src, 1471 Register src,
1371 int shift) { 1472 int shift) {
1372 ASSERT(is_uint6(shift)); 1473 ASSERT(is_uint6(shift));
1373 // There is a possible optimization if shift is in the range 60-63, but that 1474 // There is a possible optimization if shift is in the range 60-63, but that
1374 // will (and must) never happen. 1475 // will (and must) never happen.
1375 if (!dst.is(src)) { 1476 if (!dst.is(src)) {
1376 movq(dst, src); 1477 movq(dst, src);
1377 } 1478 }
1378 if (shift < kSmiShift) { 1479 if (shift < kSmiShift) {
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 push(kScratchRegister); 1685 push(kScratchRegister);
1585 } 1686 }
1586 } 1687 }
1587 1688
1588 1689
1589 void MacroAssembler::Push(Smi* source) { 1690 void MacroAssembler::Push(Smi* source) {
1590 intptr_t smi = reinterpret_cast<intptr_t>(source); 1691 intptr_t smi = reinterpret_cast<intptr_t>(source);
1591 if (is_int32(smi)) { 1692 if (is_int32(smi)) {
1592 push(Immediate(static_cast<int32_t>(smi))); 1693 push(Immediate(static_cast<int32_t>(smi)));
1593 } else { 1694 } else {
1594 Set(kScratchRegister, smi); 1695 Register constant = GetSmiConstant(source);
1595 push(kScratchRegister); 1696 push(constant);
1596 } 1697 }
1597 } 1698 }
1598 1699
1599 1700
1600 void MacroAssembler::Drop(int stack_elements) { 1701 void MacroAssembler::Drop(int stack_elements) {
1601 if (stack_elements > 0) { 1702 if (stack_elements > 0) {
1602 addq(rsp, Immediate(stack_elements * kPointerSize)); 1703 addq(rsp, Immediate(stack_elements * kPointerSize));
1603 } 1704 }
1604 } 1705 }
1605 1706
(...skipping 519 matching lines...) Expand 10 before | Expand all | Expand 10 after
2125 // Save the frame pointer and the context in top. 2226 // Save the frame pointer and the context in top.
2126 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); 2227 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2127 ExternalReference context_address(Top::k_context_address); 2228 ExternalReference context_address(Top::k_context_address);
2128 movq(r14, rax); // Backup rax before we use it. 2229 movq(r14, rax); // Backup rax before we use it.
2129 2230
2130 movq(rax, rbp); 2231 movq(rax, rbp);
2131 store_rax(c_entry_fp_address); 2232 store_rax(c_entry_fp_address);
2132 movq(rax, rsi); 2233 movq(rax, rsi);
2133 store_rax(context_address); 2234 store_rax(context_address);
2134 2235
2135 // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame, 2236 // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
2136 // so it must be retained across the C-call. 2237 // so it must be retained across the C-call.
2137 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; 2238 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2138 lea(r15, Operand(rbp, r14, times_pointer_size, offset)); 2239 lea(r12, Operand(rbp, r14, times_pointer_size, offset));
2139 2240
2140 #ifdef ENABLE_DEBUGGER_SUPPORT 2241 #ifdef ENABLE_DEBUGGER_SUPPORT
2141 // Save the state of all registers to the stack from the memory 2242 // Save the state of all registers to the stack from the memory
2142 // location. This is needed to allow nested break points. 2243 // location. This is needed to allow nested break points.
2143 if (mode == ExitFrame::MODE_DEBUG) { 2244 if (mode == ExitFrame::MODE_DEBUG) {
2144 // TODO(1243899): This should be symmetric to 2245 // TODO(1243899): This should be symmetric to
2145 // CopyRegistersFromStackToMemory() but it isn't! esp is assumed 2246 // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
2146 // correct here, but computed for the other call. Very error 2247 // correct here, but computed for the other call. Very error
2147 // prone! FIX THIS. Actually there are deeper problems with 2248 // prone! FIX THIS. Actually there are deeper problems with
2148 // register saving than this asymmetry (see the bug report 2249 // register saving than this asymmetry (see the bug report
(...skipping 25 matching lines...) Expand all
2174 and_(rsp, kScratchRegister); 2275 and_(rsp, kScratchRegister);
2175 } 2276 }
2176 2277
2177 // Patch the saved entry sp. 2278 // Patch the saved entry sp.
2178 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); 2279 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2179 } 2280 }
2180 2281
2181 2282
2182 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) { 2283 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
2183 // Registers: 2284 // Registers:
2184 // r15 : argv 2285 // r12 : argv
2185 #ifdef ENABLE_DEBUGGER_SUPPORT 2286 #ifdef ENABLE_DEBUGGER_SUPPORT
2186 // Restore the memory copy of the registers by digging them out from 2287 // Restore the memory copy of the registers by digging them out from
2187 // the stack. This is needed to allow nested break points. 2288 // the stack. This is needed to allow nested break points.
2188 if (mode == ExitFrame::MODE_DEBUG) { 2289 if (mode == ExitFrame::MODE_DEBUG) {
2189 // It's okay to clobber register rbx below because we don't need 2290 // It's okay to clobber register rbx below because we don't need
2190 // the function pointer after this. 2291 // the function pointer after this.
2191 const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; 2292 const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
2192 int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; 2293 int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
2193 lea(rbx, Operand(rbp, kOffset)); 2294 lea(rbx, Operand(rbp, kOffset));
2194 CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved); 2295 CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
2195 } 2296 }
2196 #endif 2297 #endif
2197 2298
2198 // Get the return address from the stack and restore the frame pointer. 2299 // Get the return address from the stack and restore the frame pointer.
2199 movq(rcx, Operand(rbp, 1 * kPointerSize)); 2300 movq(rcx, Operand(rbp, 1 * kPointerSize));
2200 movq(rbp, Operand(rbp, 0 * kPointerSize)); 2301 movq(rbp, Operand(rbp, 0 * kPointerSize));
2201 2302
2202 // Pop everything up to and including the arguments and the receiver 2303 // Pop everything up to and including the arguments and the receiver
2203 // from the caller stack. 2304 // from the caller stack.
2204 lea(rsp, Operand(r15, 1 * kPointerSize)); 2305 lea(rsp, Operand(r12, 1 * kPointerSize));
2205 2306
2206 // Restore current context from top and clear it in debug mode. 2307 // Restore current context from top and clear it in debug mode.
2207 ExternalReference context_address(Top::k_context_address); 2308 ExternalReference context_address(Top::k_context_address);
2208 movq(kScratchRegister, context_address); 2309 movq(kScratchRegister, context_address);
2209 movq(rsi, Operand(kScratchRegister, 0)); 2310 movq(rsi, Operand(kScratchRegister, 0));
2210 #ifdef DEBUG 2311 #ifdef DEBUG
2211 movq(Operand(kScratchRegister, 0), Immediate(0)); 2312 movq(Operand(kScratchRegister, 0), Immediate(0));
2212 #endif 2313 #endif
2213 2314
2214 // Push the return address to get ready to return. 2315 // Push the return address to get ready to return.
(...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after
2778 CPU::FlushICache(address_, size_); 2879 CPU::FlushICache(address_, size_);
2779 2880
2780 // Check that the code was patched as expected. 2881 // Check that the code was patched as expected.
2781 ASSERT(masm_.pc_ == address_ + size_); 2882 ASSERT(masm_.pc_ == address_ + size_);
2782 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2883 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2783 } 2884 }
2784 2885
2785 } } // namespace v8::internal 2886 } } // namespace v8::internal
2786 2887
2787 #endif // V8_TARGET_ARCH_X64 2888 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/register-allocator-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698