Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(803)

Side by Side Diff: src/mips64/assembler-mips64.h

Issue 1534183002: MIPS64: r6 compact branch optimization. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 393 matching lines...) Expand 10 before | Expand all | Expand 10 after
404 // 404 //
405 // Label L; // unbound label 405 // Label L; // unbound label
406 // j(cc, &L); // forward branch to unbound label 406 // j(cc, &L); // forward branch to unbound label
407 // bind(&L); // bind label to the current pc 407 // bind(&L); // bind label to the current pc
408 // j(cc, &L); // backward branch to bound label 408 // j(cc, &L); // backward branch to bound label
409 // bind(&L); // illegal: a label may be bound only once 409 // bind(&L); // illegal: a label may be bound only once
410 // 410 //
411 // Note: The same Label can be used for forward and backward branches 411 // Note: The same Label can be used for forward and backward branches
412 // but it may be bound only once. 412 // but it may be bound only once.
413 void bind(Label* L); // Binds an unbound label L to current code position. 413 void bind(Label* L); // Binds an unbound label L to current code position.
414
415 enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
416
414 // Determines if Label is bound and near enough so that branch instruction 417 // Determines if Label is bound and near enough so that branch instruction
415 // can be used to reach it, instead of jump instruction. 418 // can be used to reach it, instead of jump instruction.
416 bool is_near(Label* L); 419 bool is_near(Label* L);
420 bool is_near(Label* L, OffsetSize bits);
421 bool is_near_branch(Label* L);
422 inline bool is_near_pre_r6(Label* L) {
423 DCHECK(!(kArchVariant == kMips64r6));
424 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
425 }
426 inline bool is_near_r6(Label* L) {
427 DCHECK(kArchVariant == kMips64r6);
428 return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
429 }
430
431 int BranchOffset(Instr instr);
417 432
418 // Returns the branch offset to the given label from the current code 433 // Returns the branch offset to the given label from the current code
419 // position. Links the label to the current position if it is still unbound. 434 // position. Links the label to the current position if it is still unbound.
420 // Manages the jump elimination optimization if the second parameter is true. 435 // Manages the jump elimination optimization if the second parameter is true.
421 int32_t branch_offset(Label* L, bool jump_elimination_allowed); 436 int32_t branch_offset_helper(Label* L, OffsetSize bits);
422 int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); 437 inline int32_t branch_offset(Label* L) {
423 int32_t branch_offset21(Label* L, bool jump_elimination_allowed); 438 return branch_offset_helper(L, OffsetSize::kOffset16);
424 int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
425 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
426 int32_t o = branch_offset(L, jump_elimination_allowed);
427 DCHECK((o & 3) == 0); // Assert the offset is aligned.
428 return o >> 2;
429 } 439 }
430 int32_t shifted_branch_offset_compact(Label* L, 440 inline int32_t branch_offset21(Label* L) {
431 bool jump_elimination_allowed) { 441 return branch_offset_helper(L, OffsetSize::kOffset21);
432 int32_t o = branch_offset_compact(L, jump_elimination_allowed); 442 }
433 DCHECK((o & 3) == 0); // Assert the offset is aligned. 443 inline int32_t branch_offset26(Label* L) {
434 return o >> 2; 444 return branch_offset_helper(L, OffsetSize::kOffset26);
445 }
446 inline int32_t shifted_branch_offset(Label* L) {
447 return branch_offset(L) >> 2;
448 }
449 inline int32_t shifted_branch_offset21(Label* L) {
450 return branch_offset21(L) >> 2;
451 }
452 inline int32_t shifted_branch_offset26(Label* L) {
453 return branch_offset26(L) >> 2;
435 } 454 }
436 uint64_t jump_address(Label* L); 455 uint64_t jump_address(Label* L);
437 uint64_t jump_offset(Label* L); 456 uint64_t jump_offset(Label* L);
438 457
439 // Puts a labels target address at the given position. 458 // Puts a labels target address at the given position.
440 // The high 8 bits are set to zero. 459 // The high 8 bits are set to zero.
441 void label_at_put(Label* L, int at_offset); 460 void label_at_put(Label* L, int at_offset);
442 461
443 // Read/Modify the code target address in the branch/call instruction at pc. 462 // Read/Modify the code target address in the branch/call instruction at pc.
444 static Address target_address_at(Address pc); 463 static Address target_address_at(Address pc);
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 void nop(unsigned int type = 0) { 584 void nop(unsigned int type = 0) {
566 DCHECK(type < 32); 585 DCHECK(type < 32);
567 Register nop_rt_reg = (type == 0) ? zero_reg : at; 586 Register nop_rt_reg = (type == 0) ? zero_reg : at;
568 sll(zero_reg, nop_rt_reg, type, true); 587 sll(zero_reg, nop_rt_reg, type, true);
569 } 588 }
570 589
571 590
572 // --------Branch-and-jump-instructions---------- 591 // --------Branch-and-jump-instructions----------
573 // We don't use likely variant of instructions. 592 // We don't use likely variant of instructions.
574 void b(int16_t offset); 593 void b(int16_t offset);
575 void b(Label* L) { b(branch_offset(L, false)>>2); } 594 inline void b(Label* L) { b(shifted_branch_offset(L)); }
576 void bal(int16_t offset); 595 void bal(int16_t offset);
577 void bal(Label* L) { bal(branch_offset(L, false)>>2); } 596 inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
578 void bc(int32_t offset); 597 void bc(int32_t offset);
579 void bc(Label* L) { bc(branch_offset(L, false) >> 2); } 598 inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
580 void balc(int32_t offset); 599 void balc(int32_t offset);
581 void balc(Label* L) { balc(branch_offset(L, false) >> 2); } 600 inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
582 601
583 void beq(Register rs, Register rt, int16_t offset); 602 void beq(Register rs, Register rt, int16_t offset);
584 void beq(Register rs, Register rt, Label* L) { 603 inline void beq(Register rs, Register rt, Label* L) {
585 beq(rs, rt, branch_offset(L, false) >> 2); 604 beq(rs, rt, shifted_branch_offset(L));
586 } 605 }
587 void bgez(Register rs, int16_t offset); 606 void bgez(Register rs, int16_t offset);
588 void bgezc(Register rt, int16_t offset); 607 void bgezc(Register rt, int16_t offset);
589 void bgezc(Register rt, Label* L) { 608 inline void bgezc(Register rt, Label* L) {
590 bgezc(rt, branch_offset_compact(L, false)>>2); 609 bgezc(rt, shifted_branch_offset(L));
591 } 610 }
592 void bgeuc(Register rs, Register rt, int16_t offset); 611 void bgeuc(Register rs, Register rt, int16_t offset);
593 void bgeuc(Register rs, Register rt, Label* L) { 612 inline void bgeuc(Register rs, Register rt, Label* L) {
594 bgeuc(rs, rt, branch_offset_compact(L, false)>>2); 613 bgeuc(rs, rt, shifted_branch_offset(L));
595 } 614 }
596 void bgec(Register rs, Register rt, int16_t offset); 615 void bgec(Register rs, Register rt, int16_t offset);
597 void bgec(Register rs, Register rt, Label* L) { 616 inline void bgec(Register rs, Register rt, Label* L) {
598 bgec(rs, rt, branch_offset_compact(L, false)>>2); 617 bgec(rs, rt, shifted_branch_offset(L));
599 } 618 }
600 void bgezal(Register rs, int16_t offset); 619 void bgezal(Register rs, int16_t offset);
601 void bgezalc(Register rt, int16_t offset); 620 void bgezalc(Register rt, int16_t offset);
602 void bgezalc(Register rt, Label* L) { 621 inline void bgezalc(Register rt, Label* L) {
603 bgezalc(rt, branch_offset_compact(L, false)>>2); 622 bgezalc(rt, shifted_branch_offset(L));
604 } 623 }
605 void bgezall(Register rs, int16_t offset); 624 void bgezall(Register rs, int16_t offset);
606 void bgezall(Register rs, Label* L) { 625 inline void bgezall(Register rs, Label* L) {
607 bgezall(rs, branch_offset(L, false)>>2); 626 bgezall(rs, branch_offset(L) >> 2);
608 } 627 }
609 void bgtz(Register rs, int16_t offset); 628 void bgtz(Register rs, int16_t offset);
610 void bgtzc(Register rt, int16_t offset); 629 void bgtzc(Register rt, int16_t offset);
611 void bgtzc(Register rt, Label* L) { 630 inline void bgtzc(Register rt, Label* L) {
612 bgtzc(rt, branch_offset_compact(L, false)>>2); 631 bgtzc(rt, shifted_branch_offset(L));
613 } 632 }
614 void blez(Register rs, int16_t offset); 633 void blez(Register rs, int16_t offset);
615 void blezc(Register rt, int16_t offset); 634 void blezc(Register rt, int16_t offset);
616 void blezc(Register rt, Label* L) { 635 inline void blezc(Register rt, Label* L) {
617 blezc(rt, branch_offset_compact(L, false)>>2); 636 blezc(rt, shifted_branch_offset(L));
618 } 637 }
619 void bltz(Register rs, int16_t offset); 638 void bltz(Register rs, int16_t offset);
620 void bltzc(Register rt, int16_t offset); 639 void bltzc(Register rt, int16_t offset);
621 void bltzc(Register rt, Label* L) { 640 inline void bltzc(Register rt, Label* L) {
622 bltzc(rt, branch_offset_compact(L, false)>>2); 641 bltzc(rt, shifted_branch_offset(L));
623 } 642 }
624 void bltuc(Register rs, Register rt, int16_t offset); 643 void bltuc(Register rs, Register rt, int16_t offset);
625 void bltuc(Register rs, Register rt, Label* L) { 644 inline void bltuc(Register rs, Register rt, Label* L) {
626 bltuc(rs, rt, branch_offset_compact(L, false)>>2); 645 bltuc(rs, rt, shifted_branch_offset(L));
627 } 646 }
628 void bltc(Register rs, Register rt, int16_t offset); 647 void bltc(Register rs, Register rt, int16_t offset);
629 void bltc(Register rs, Register rt, Label* L) { 648 inline void bltc(Register rs, Register rt, Label* L) {
630 bltc(rs, rt, branch_offset_compact(L, false)>>2); 649 bltc(rs, rt, shifted_branch_offset(L));
631 } 650 }
632
633 void bltzal(Register rs, int16_t offset); 651 void bltzal(Register rs, int16_t offset);
634 void blezalc(Register rt, int16_t offset); 652 void blezalc(Register rt, int16_t offset);
635 void blezalc(Register rt, Label* L) { 653 inline void blezalc(Register rt, Label* L) {
636 blezalc(rt, branch_offset_compact(L, false)>>2); 654 blezalc(rt, shifted_branch_offset(L));
637 } 655 }
638 void bltzalc(Register rt, int16_t offset); 656 void bltzalc(Register rt, int16_t offset);
639 void bltzalc(Register rt, Label* L) { 657 inline void bltzalc(Register rt, Label* L) {
640 bltzalc(rt, branch_offset_compact(L, false)>>2); 658 bltzalc(rt, shifted_branch_offset(L));
641 } 659 }
642 void bgtzalc(Register rt, int16_t offset); 660 void bgtzalc(Register rt, int16_t offset);
643 void bgtzalc(Register rt, Label* L) { 661 inline void bgtzalc(Register rt, Label* L) {
644 bgtzalc(rt, branch_offset_compact(L, false)>>2); 662 bgtzalc(rt, shifted_branch_offset(L));
645 } 663 }
646 void beqzalc(Register rt, int16_t offset); 664 void beqzalc(Register rt, int16_t offset);
647 void beqzalc(Register rt, Label* L) { 665 inline void beqzalc(Register rt, Label* L) {
648 beqzalc(rt, branch_offset_compact(L, false)>>2); 666 beqzalc(rt, shifted_branch_offset(L));
649 } 667 }
650 void beqc(Register rs, Register rt, int16_t offset); 668 void beqc(Register rs, Register rt, int16_t offset);
651 void beqc(Register rs, Register rt, Label* L) { 669 inline void beqc(Register rs, Register rt, Label* L) {
652 beqc(rs, rt, branch_offset_compact(L, false)>>2); 670 beqc(rs, rt, shifted_branch_offset(L));
653 } 671 }
654 void beqzc(Register rs, int32_t offset); 672 void beqzc(Register rs, int32_t offset);
655 void beqzc(Register rs, Label* L) { 673 inline void beqzc(Register rs, Label* L) {
656 beqzc(rs, branch_offset21_compact(L, false)>>2); 674 beqzc(rs, shifted_branch_offset21(L));
657 } 675 }
658 void bnezalc(Register rt, int16_t offset); 676 void bnezalc(Register rt, int16_t offset);
659 void bnezalc(Register rt, Label* L) { 677 inline void bnezalc(Register rt, Label* L) {
660 bnezalc(rt, branch_offset_compact(L, false)>>2); 678 bnezalc(rt, shifted_branch_offset(L));
661 } 679 }
662 void bnec(Register rs, Register rt, int16_t offset); 680 void bnec(Register rs, Register rt, int16_t offset);
663 void bnec(Register rs, Register rt, Label* L) { 681 inline void bnec(Register rs, Register rt, Label* L) {
664 bnec(rs, rt, branch_offset_compact(L, false)>>2); 682 bnec(rs, rt, shifted_branch_offset(L));
665 } 683 }
666 void bnezc(Register rt, int32_t offset); 684 void bnezc(Register rt, int32_t offset);
667 void bnezc(Register rt, Label* L) { 685 inline void bnezc(Register rt, Label* L) {
668 bnezc(rt, branch_offset21_compact(L, false)>>2); 686 bnezc(rt, shifted_branch_offset21(L));
669 } 687 }
670 void bne(Register rs, Register rt, int16_t offset); 688 void bne(Register rs, Register rt, int16_t offset);
671 void bne(Register rs, Register rt, Label* L) { 689 inline void bne(Register rs, Register rt, Label* L) {
672 bne(rs, rt, branch_offset(L, false)>>2); 690 bne(rs, rt, shifted_branch_offset(L));
673 } 691 }
674 void bovc(Register rs, Register rt, int16_t offset); 692 void bovc(Register rs, Register rt, int16_t offset);
675 void bovc(Register rs, Register rt, Label* L) { 693 inline void bovc(Register rs, Register rt, Label* L) {
676 bovc(rs, rt, branch_offset_compact(L, false)>>2); 694 bovc(rs, rt, shifted_branch_offset(L));
677 } 695 }
678 void bnvc(Register rs, Register rt, int16_t offset); 696 void bnvc(Register rs, Register rt, int16_t offset);
679 void bnvc(Register rs, Register rt, Label* L) { 697 inline void bnvc(Register rs, Register rt, Label* L) {
680 bnvc(rs, rt, branch_offset_compact(L, false)>>2); 698 bnvc(rs, rt, shifted_branch_offset(L));
681 } 699 }
682 700
683 // Never use the int16_t b(l)cond version with a branch offset 701 // Never use the int16_t b(l)cond version with a branch offset
684 // instead of using the Label* version. 702 // instead of using the Label* version.
685 703
686 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. 704 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
687 void j(int64_t target); 705 void j(int64_t target);
688 void jal(int64_t target); 706 void jal(int64_t target);
689 void j(Label* target); 707 void j(Label* target);
690 void jal(Label* target); 708 void jal(Label* target);
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
965 void cvt_d_l(FPURegister fd, FPURegister fs); 983 void cvt_d_l(FPURegister fd, FPURegister fs);
966 void cvt_d_s(FPURegister fd, FPURegister fs); 984 void cvt_d_s(FPURegister fd, FPURegister fs);
967 985
968 // Conditions and branches for MIPSr6. 986 // Conditions and branches for MIPSr6.
969 void cmp(FPUCondition cond, SecondaryField fmt, 987 void cmp(FPUCondition cond, SecondaryField fmt,
970 FPURegister fd, FPURegister ft, FPURegister fs); 988 FPURegister fd, FPURegister ft, FPURegister fs);
971 void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); 989 void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
972 void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); 990 void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
973 991
974 void bc1eqz(int16_t offset, FPURegister ft); 992 void bc1eqz(int16_t offset, FPURegister ft);
975 void bc1eqz(Label* L, FPURegister ft) { 993 inline void bc1eqz(Label* L, FPURegister ft) {
976 bc1eqz(branch_offset(L, false)>>2, ft); 994 bc1eqz(shifted_branch_offset(L), ft);
977 } 995 }
978 void bc1nez(int16_t offset, FPURegister ft); 996 void bc1nez(int16_t offset, FPURegister ft);
979 void bc1nez(Label* L, FPURegister ft) { 997 inline void bc1nez(Label* L, FPURegister ft) {
980 bc1nez(branch_offset(L, false)>>2, ft); 998 bc1nez(shifted_branch_offset(L), ft);
981 } 999 }
982 1000
983 // Conditions and branches for non MIPSr6. 1001 // Conditions and branches for non MIPSr6.
984 void c(FPUCondition cond, SecondaryField fmt, 1002 void c(FPUCondition cond, SecondaryField fmt,
985 FPURegister ft, FPURegister fs, uint16_t cc = 0); 1003 FPURegister ft, FPURegister fs, uint16_t cc = 0);
986 void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); 1004 void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
987 void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); 1005 void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
988 1006
989 void bc1f(int16_t offset, uint16_t cc = 0); 1007 void bc1f(int16_t offset, uint16_t cc = 0);
990 void bc1f(Label* L, uint16_t cc = 0) { 1008 inline void bc1f(Label* L, uint16_t cc = 0) {
991 bc1f(branch_offset(L, false)>>2, cc); 1009 bc1f(shifted_branch_offset(L), cc);
992 } 1010 }
993 void bc1t(int16_t offset, uint16_t cc = 0); 1011 void bc1t(int16_t offset, uint16_t cc = 0);
994 void bc1t(Label* L, uint16_t cc = 0) { 1012 inline void bc1t(Label* L, uint16_t cc = 0) {
995 bc1t(branch_offset(L, false)>>2, cc); 1013 bc1t(shifted_branch_offset(L), cc);
996 } 1014 }
997 void fcmp(FPURegister src1, const double src2, FPUCondition cond); 1015 void fcmp(FPURegister src1, const double src2, FPUCondition cond);
998 1016
999 // Check the code size generated from label to here. 1017 // Check the code size generated from label to here.
1000 int SizeOfCodeGeneratedSince(Label* label) { 1018 int SizeOfCodeGeneratedSince(Label* label) {
1001 return pc_offset() - label->pos(); 1019 return pc_offset() - label->pos();
1002 } 1020 }
1003 1021
1004 // Check the number of instructions generated from label to here. 1022 // Check the number of instructions generated from label to here.
1005 int InstructionsGeneratedSince(Label* label) { 1023 int InstructionsGeneratedSince(Label* label) {
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1106 static void instr_at_put(byte* pc, Instr instr) { 1124 static void instr_at_put(byte* pc, Instr instr) {
1107 *reinterpret_cast<Instr*>(pc) = instr; 1125 *reinterpret_cast<Instr*>(pc) = instr;
1108 } 1126 }
1109 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } 1127 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1110 void instr_at_put(int pos, Instr instr) { 1128 void instr_at_put(int pos, Instr instr) {
1111 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 1129 *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1112 } 1130 }
1113 1131
1114 // Check if an instruction is a branch of some kind. 1132 // Check if an instruction is a branch of some kind.
1115 static bool IsBranch(Instr instr); 1133 static bool IsBranch(Instr instr);
1134 static bool IsBc(Instr instr);
1135 static bool IsBzc(Instr instr);
1136
1116 static bool IsBeq(Instr instr); 1137 static bool IsBeq(Instr instr);
1117 static bool IsBne(Instr instr); 1138 static bool IsBne(Instr instr);
1139 static bool IsBeqzc(Instr instr);
1140 static bool IsBnezc(Instr instr);
1141 static bool IsBeqc(Instr instr);
1142 static bool IsBnec(Instr instr);
1143
1118 1144
1119 static bool IsJump(Instr instr); 1145 static bool IsJump(Instr instr);
1120 static bool IsJ(Instr instr); 1146 static bool IsJ(Instr instr);
1121 static bool IsLui(Instr instr); 1147 static bool IsLui(Instr instr);
1122 static bool IsOri(Instr instr); 1148 static bool IsOri(Instr instr);
1123 1149
1124 static bool IsJal(Instr instr); 1150 static bool IsJal(Instr instr);
1125 static bool IsJr(Instr instr); 1151 static bool IsJr(Instr instr);
1126 static bool IsJalr(Instr instr); 1152 static bool IsJalr(Instr instr);
1127 1153
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1166 1192
1167 void CheckTrampolinePool(); 1193 void CheckTrampolinePool();
1168 1194
1169 void PatchConstantPoolAccessInstruction(int pc_offset, int offset, 1195 void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1170 ConstantPoolEntry::Access access, 1196 ConstantPoolEntry::Access access,
1171 ConstantPoolEntry::Type type) { 1197 ConstantPoolEntry::Type type) {
1172 // No embedded constant pool support. 1198 // No embedded constant pool support.
1173 UNREACHABLE(); 1199 UNREACHABLE();
1174 } 1200 }
1175 1201
1202 bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
1203
1176 protected: 1204 protected:
1177 // Relocation for a type-recording IC has the AST id added to it. This 1205 // Relocation for a type-recording IC has the AST id added to it. This
1178 // member variable is a way to pass the information from the call site to 1206 // member variable is a way to pass the information from the call site to
1179 // the relocation info. 1207 // the relocation info.
1180 TypeFeedbackId recorded_ast_id_; 1208 TypeFeedbackId recorded_ast_id_;
1181 1209
1182 inline static void set_target_internal_reference_encoded_at(Address pc, 1210 inline static void set_target_internal_reference_encoded_at(Address pc,
1183 Address target); 1211 Address target);
1184 1212
1185 int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } 1213 int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1232 1260
1233 void EndBlockGrowBuffer() { 1261 void EndBlockGrowBuffer() {
1234 DCHECK(block_buffer_growth_); 1262 DCHECK(block_buffer_growth_);
1235 block_buffer_growth_ = false; 1263 block_buffer_growth_ = false;
1236 } 1264 }
1237 1265
1238 bool is_buffer_growth_blocked() const { 1266 bool is_buffer_growth_blocked() const {
1239 return block_buffer_growth_; 1267 return block_buffer_growth_;
1240 } 1268 }
1241 1269
1270 void EmitPendingInstructions() {
balazs.kilvady 2015/12/18 19:18:05 Nice solution. Should be backported to 32-bit. Alt
ivica.bogosavljevic 2015/12/22 10:22:39 I was thinking the same thing as you did, to use t
balazs.kilvady 2015/12/22 11:01:56 I don't agree. Actually Assembler class is for dir
ivica.bogosavljevic 2015/12/22 12:23:41 Acknowledged.
1271 if (IsPrevInstrCompactBranch()) {
1272 nop();
1273 ClearCompactBranchState();
1274 }
1275 }
1276
1277 inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
1278
1242 private: 1279 private:
1243 // Buffer size and constant pool distance are checked together at regular 1280 // Buffer size and constant pool distance are checked together at regular
1244 // intervals of kBufferCheckInterval emitted bytes. 1281 // intervals of kBufferCheckInterval emitted bytes.
1245 static const int kBufferCheckInterval = 1*KB/2; 1282 static const int kBufferCheckInterval = 1*KB/2;
1246 1283
1247 // Code generation. 1284 // Code generation.
1248 // The relocation writer's position is at least kGap bytes below the end of 1285 // The relocation writer's position is at least kGap bytes below the end of
1249 // the generated instructions. This is so that multi-instruction sequences do 1286 // the generated instructions. This is so that multi-instruction sequences do
1250 // not have to check for overflow. The same is true for writes of large 1287 // not have to check for overflow. The same is true for writes of large
1251 // relocation info entries. 1288 // relocation info entries.
(...skipping 19 matching lines...) Expand all
1271 bool block_buffer_growth_; // Block growth when true. 1308 bool block_buffer_growth_; // Block growth when true.
1272 1309
1273 // Relocation information generation. 1310 // Relocation information generation.
1274 // Each relocation is encoded as a variable size value. 1311 // Each relocation is encoded as a variable size value.
1275 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1312 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1276 RelocInfoWriter reloc_info_writer; 1313 RelocInfoWriter reloc_info_writer;
1277 1314
1278 // The bound position, before this we cannot do instruction elimination. 1315 // The bound position, before this we cannot do instruction elimination.
1279 int last_bound_pos_; 1316 int last_bound_pos_;
1280 1317
1318 // Readable constants for compact branch handling in emit()
1319 enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
1320
1281 // Code emission. 1321 // Code emission.
1282 inline void CheckBuffer(); 1322 inline void CheckBuffer();
1283 void GrowBuffer(); 1323 void GrowBuffer();
1284 inline void emit(Instr x); 1324 inline void emit(Instr x,
1285 inline void emit(uint64_t x); 1325 CompactBranchType is_compact_branch = CompactBranchType::NO);
1286 inline void CheckTrampolinePoolQuick(int extra_instructions = 0); 1326 inline void emit(uint64_t x,
1327 CompactBranchType is_compact_branch = CompactBranchType::NO);
1287 1328
balazs.kilvady 2015/12/18 19:18:05 A 64-bit value cannot be a real instruction so it
ivica.bogosavljevic 2015/12/22 10:22:39 Acknowledged.
1288 // Instruction generation. 1329 // Instruction generation.
1289 // We have 3 different kind of encoding layout on MIPS. 1330 // We have 3 different kind of encoding layout on MIPS.
1290 // However due to many different types of objects encoded in the same fields 1331 // However due to many different types of objects encoded in the same fields
1291 // we have quite a few aliases for each mode. 1332 // we have quite a few aliases for each mode.
1292 // Using the same structure to refer to Register and FPURegister would spare a 1333 // Using the same structure to refer to Register and FPURegister would spare a
1293 // few aliases, but mixing both does not look clean to me. 1334 // few aliases, but mixing both does not look clean to me.
1294 // Anyway we could surely implement this differently. 1335 // Anyway we could surely implement this differently.
1295 1336
1296 void GenInstrRegister(Opcode opcode, 1337 void GenInstrRegister(Opcode opcode,
1297 Register rs, 1338 Register rs,
(...skipping 30 matching lines...) Expand all
1328 FPURegister fd, 1369 FPURegister fd,
1329 SecondaryField func = NULLSF); 1370 SecondaryField func = NULLSF);
1330 1371
1331 void GenInstrRegister(Opcode opcode, 1372 void GenInstrRegister(Opcode opcode,
1332 SecondaryField fmt, 1373 SecondaryField fmt,
1333 Register rt, 1374 Register rt,
1334 FPUControlRegister fs, 1375 FPUControlRegister fs,
1335 SecondaryField func = NULLSF); 1376 SecondaryField func = NULLSF);
1336 1377
1337 1378
1338 void GenInstrImmediate(Opcode opcode, 1379 void GenInstrImmediate(
1339 Register rs, 1380 Opcode opcode, Register rs, Register rt, int32_t j,
1340 Register rt, 1381 CompactBranchType is_compact_branch = CompactBranchType::NO);
1341 int32_t j); 1382 void GenInstrImmediate(
1342 void GenInstrImmediate(Opcode opcode, 1383 Opcode opcode, Register rs, SecondaryField SF, int32_t j,
1343 Register rs, 1384 CompactBranchType is_compact_branch = CompactBranchType::NO);
1344 SecondaryField SF, 1385 void GenInstrImmediate(
1345 int32_t j); 1386 Opcode opcode, Register r1, FPURegister r2, int32_t j,
1346 void GenInstrImmediate(Opcode opcode, 1387 CompactBranchType is_compact_branch = CompactBranchType::NO);
1347 Register r1, 1388 void GenInstrImmediate(
1348 FPURegister r2, 1389 Opcode opcode, Register rs, int32_t offset21,
1349 int32_t j); 1390 CompactBranchType is_compact_branch = CompactBranchType::NO);
1350 void GenInstrImmediate(Opcode opcode, Register rs, int32_t j); 1391 void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
1351 void GenInstrImmediate(Opcode opcode, int32_t offset26); 1392 void GenInstrImmediate(
1352 1393 Opcode opcode, int32_t offset26,
1394 CompactBranchType is_compact_branch = CompactBranchType::NO);
1353 1395
1354 void GenInstrJump(Opcode opcode, 1396 void GenInstrJump(Opcode opcode,
1355 uint32_t address); 1397 uint32_t address);
1356 1398
1357 // Helpers. 1399 // Helpers.
1358 void LoadRegPlusOffsetToAt(const MemOperand& src); 1400 void LoadRegPlusOffsetToAt(const MemOperand& src);
1359 1401
1360 // Labels. 1402 // Labels.
1361 void print(Label* L); 1403 void print(Label* L);
1362 void bind_to(Label* L, int pos); 1404 void bind_to(Label* L, int pos);
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1416 int32_t get_trampoline_entry(int32_t pos); 1458 int32_t get_trampoline_entry(int32_t pos);
1417 int unbound_labels_count_; 1459 int unbound_labels_count_;
1418 // After trampoline is emitted, long branches are used in generated code for 1460 // After trampoline is emitted, long branches are used in generated code for
1419 // the forward branches whose target offsets could be beyond reach of branch 1461 // the forward branches whose target offsets could be beyond reach of branch
1420 // instruction. We use this information to trigger different mode of 1462 // instruction. We use this information to trigger different mode of
1421 // branch instruction generation, where we use jump instructions rather 1463 // branch instruction generation, where we use jump instructions rather
1422 // than regular branch instructions. 1464 // than regular branch instructions.
1423 bool trampoline_emitted_; 1465 bool trampoline_emitted_;
1424 static const int kTrampolineSlotsSize = 2 * kInstrSize; 1466 static const int kTrampolineSlotsSize = 2 * kInstrSize;
1425 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; 1467 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1468 static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
1426 static const int kInvalidSlotPos = -1; 1469 static const int kInvalidSlotPos = -1;
1427 1470
1428 // Internal reference positions, required for unbounded internal reference 1471 // Internal reference positions, required for unbounded internal reference
1429 // labels. 1472 // labels.
1430 std::set<int64_t> internal_reference_positions_; 1473 std::set<int64_t> internal_reference_positions_;
1431 1474
1475 void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
1476 void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
1477 bool prev_instr_compact_branch_ = false;
1478
1432 Trampoline trampoline_; 1479 Trampoline trampoline_;
1433 bool internal_trampoline_exception_; 1480 bool internal_trampoline_exception_;
1434 1481
1435 friend class RegExpMacroAssemblerMIPS; 1482 friend class RegExpMacroAssemblerMIPS;
1436 friend class RelocInfo; 1483 friend class RelocInfo;
1437 friend class CodePatcher; 1484 friend class CodePatcher;
1438 friend class BlockTrampolinePoolScope; 1485 friend class BlockTrampolinePoolScope;
1439 1486
1440 PositionsRecorder positions_recorder_; 1487 PositionsRecorder positions_recorder_;
1441 friend class PositionsRecorder; 1488 friend class PositionsRecorder;
1442 friend class EnsureSpace; 1489 friend class EnsureSpace;
1443 }; 1490 };
1444 1491
1445 1492
1446 class EnsureSpace BASE_EMBEDDED { 1493 class EnsureSpace BASE_EMBEDDED {
1447 public: 1494 public:
1448 explicit EnsureSpace(Assembler* assembler) { 1495 explicit EnsureSpace(Assembler* assembler) {
1449 assembler->CheckBuffer(); 1496 assembler->CheckBuffer();
1450 } 1497 }
1451 }; 1498 };
1452 1499
1453 } // namespace internal 1500 } // namespace internal
1454 } // namespace v8 1501 } // namespace v8
1455 1502
1456 #endif // V8_ARM_ASSEMBLER_MIPS_H_ 1503 #endif // V8_ARM_ASSEMBLER_MIPS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698