| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 401 // | 401 // |
| 402 // Label L; // unbound label | 402 // Label L; // unbound label |
| 403 // j(cc, &L); // forward branch to unbound label | 403 // j(cc, &L); // forward branch to unbound label |
| 404 // bind(&L); // bind label to the current pc | 404 // bind(&L); // bind label to the current pc |
| 405 // j(cc, &L); // backward branch to bound label | 405 // j(cc, &L); // backward branch to bound label |
| 406 // bind(&L); // illegal: a label may be bound only once | 406 // bind(&L); // illegal: a label may be bound only once |
| 407 // | 407 // |
| 408 // Note: The same Label can be used for forward and backward branches | 408 // Note: The same Label can be used for forward and backward branches |
| 409 // but it may be bound only once. | 409 // but it may be bound only once. |
| 410 void bind(Label* L); // Binds an unbound label L to current code position. | 410 void bind(Label* L); // Binds an unbound label L to current code position. |
| 411 |
| 412 enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; |
| 413 |
| 411 // Determines if Label is bound and near enough so that branch instruction | 414 // Determines if Label is bound and near enough so that branch instruction |
| 412 // can be used to reach it, instead of jump instruction. | 415 // can be used to reach it, instead of jump instruction. |
| 413 bool is_near(Label* L); | 416 bool is_near(Label* L); |
| 417 bool is_near(Label* L, OffsetSize bits); |
| 418 bool is_near_branch(Label* L); |
| 414 | 419 |
| 415 // Returns the branch offset to the given label from the current code | 420 // Returns the branch offset to the given label from the current code |
| 416 // position. Links the label to the current position if it is still unbound. | 421 // position. Links the label to the current position if it is still unbound. |
| 417 // Manages the jump elimination optimization if the second parameter is true. | 422 // Manages the jump elimination optimization if the second parameter is true. |
| 418 int32_t branch_offset(Label* L, bool jump_elimination_allowed); | 423 int32_t branch_offset_helper(Label* L, OffsetSize bits); |
| 419 int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); | 424 inline int32_t branch_offset(Label* L) { |
| 420 int32_t branch_offset21(Label* L, bool jump_elimination_allowed); | 425 return branch_offset_helper(L, OffsetSize::kOffset16); |
| 421 int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); | |
| 422 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { | |
| 423 int32_t o = branch_offset(L, jump_elimination_allowed); | |
| 424 DCHECK((o & 3) == 0); // Assert the offset is aligned. | |
| 425 return o >> 2; | |
| 426 } | 426 } |
| 427 int32_t shifted_branch_offset_compact(Label* L, | 427 inline int32_t branch_offset21(Label* L) { |
| 428 bool jump_elimination_allowed) { | 428 return branch_offset_helper(L, OffsetSize::kOffset21); |
| 429 int32_t o = branch_offset_compact(L, jump_elimination_allowed); | 429 } |
| 430 DCHECK((o & 3) == 0); // Assert the offset is aligned. | 430 inline int32_t branch_offset26(Label* L) { |
| 431 return o >> 2; | 431 return branch_offset_helper(L, OffsetSize::kOffset26); |
| 432 } |
| 433 inline int32_t shifted_branch_offset(Label* L) { |
| 434 return branch_offset(L) >> 2; |
| 435 } |
| 436 inline int32_t shifted_branch_offset21(Label* L) { |
| 437 return branch_offset21(L) >> 2; |
| 438 } |
| 439 inline int32_t shifted_branch_offset26(Label* L) { |
| 440 return branch_offset26(L) >> 2; |
| 432 } | 441 } |
| 433 uint32_t jump_address(Label* L); | 442 uint32_t jump_address(Label* L); |
| 434 | 443 |
| 435 // Puts a labels target address at the given position. | 444 // Puts a labels target address at the given position. |
| 436 // The high 8 bits are set to zero. | 445 // The high 8 bits are set to zero. |
| 437 void label_at_put(Label* L, int at_offset); | 446 void label_at_put(Label* L, int at_offset); |
| 438 | 447 |
| 439 // Read/Modify the code target address in the branch/call instruction at pc. | 448 // Read/Modify the code target address in the branch/call instruction at pc. |
| 440 static Address target_address_at(Address pc); | 449 static Address target_address_at(Address pc); |
| 441 static void set_target_address_at(Address pc, | 450 static void set_target_address_at(Address pc, |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 564 void nop(unsigned int type = 0) { | 573 void nop(unsigned int type = 0) { |
| 565 DCHECK(type < 32); | 574 DCHECK(type < 32); |
| 566 Register nop_rt_reg = (type == 0) ? zero_reg : at; | 575 Register nop_rt_reg = (type == 0) ? zero_reg : at; |
| 567 sll(zero_reg, nop_rt_reg, type, true); | 576 sll(zero_reg, nop_rt_reg, type, true); |
| 568 } | 577 } |
| 569 | 578 |
| 570 | 579 |
| 571 // --------Branch-and-jump-instructions---------- | 580 // --------Branch-and-jump-instructions---------- |
| 572 // We don't use likely variant of instructions. | 581 // We don't use likely variant of instructions. |
| 573 void b(int16_t offset); | 582 void b(int16_t offset); |
| 574 void b(Label* L) { b(branch_offset(L, false)>>2); } | 583 inline void b(Label* L) { b(shifted_branch_offset(L)); } |
| 575 void bal(int16_t offset); | 584 void bal(int16_t offset); |
| 576 void bal(Label* L) { bal(branch_offset(L, false)>>2); } | 585 inline void bal(Label* L) { bal(shifted_branch_offset(L)); } |
| 577 void bc(int32_t offset); | 586 void bc(int32_t offset); |
| 578 void bc(Label* L) { bc(branch_offset(L, false) >> 2); } | 587 inline void bc(Label* L) { bc(shifted_branch_offset26(L)); } |
| 579 void balc(int32_t offset); | 588 void balc(int32_t offset); |
| 580 void balc(Label* L) { balc(branch_offset(L, false) >> 2); } | 589 inline void balc(Label* L) { balc(shifted_branch_offset26(L)); } |
| 581 | 590 |
| 582 void beq(Register rs, Register rt, int16_t offset); | 591 void beq(Register rs, Register rt, int16_t offset); |
| 583 void beq(Register rs, Register rt, Label* L) { | 592 inline void beq(Register rs, Register rt, Label* L) { |
| 584 beq(rs, rt, branch_offset(L, false) >> 2); | 593 beq(rs, rt, shifted_branch_offset(L)); |
| 585 } | 594 } |
| 586 void bgez(Register rs, int16_t offset); | 595 void bgez(Register rs, int16_t offset); |
| 587 void bgezc(Register rt, int16_t offset); | 596 void bgezc(Register rt, int16_t offset); |
| 588 void bgezc(Register rt, Label* L) { | 597 inline void bgezc(Register rt, Label* L) { |
| 589 bgezc(rt, branch_offset_compact(L, false)>>2); | 598 bgezc(rt, shifted_branch_offset(L)); |
| 590 } | 599 } |
| 591 void bgeuc(Register rs, Register rt, int16_t offset); | 600 void bgeuc(Register rs, Register rt, int16_t offset); |
| 592 void bgeuc(Register rs, Register rt, Label* L) { | 601 inline void bgeuc(Register rs, Register rt, Label* L) { |
| 593 bgeuc(rs, rt, branch_offset_compact(L, false)>>2); | 602 bgeuc(rs, rt, shifted_branch_offset(L)); |
| 594 } | 603 } |
| 595 void bgec(Register rs, Register rt, int16_t offset); | 604 void bgec(Register rs, Register rt, int16_t offset); |
| 596 void bgec(Register rs, Register rt, Label* L) { | 605 inline void bgec(Register rs, Register rt, Label* L) { |
| 597 bgec(rs, rt, branch_offset_compact(L, false)>>2); | 606 bgec(rs, rt, shifted_branch_offset(L)); |
| 598 } | 607 } |
| 599 void bgezal(Register rs, int16_t offset); | 608 void bgezal(Register rs, int16_t offset); |
| 600 void bgezalc(Register rt, int16_t offset); | 609 void bgezalc(Register rt, int16_t offset); |
| 601 void bgezalc(Register rt, Label* L) { | 610 inline void bgezalc(Register rt, Label* L) { |
| 602 bgezalc(rt, branch_offset_compact(L, false)>>2); | 611 bgezalc(rt, shifted_branch_offset(L)); |
| 603 } | 612 } |
| 604 void bgezall(Register rs, int16_t offset); | 613 void bgezall(Register rs, int16_t offset); |
| 605 void bgezall(Register rs, Label* L) { | 614 inline void bgezall(Register rs, Label* L) { |
| 606 bgezall(rs, branch_offset(L, false)>>2); | 615 bgezall(rs, branch_offset(L) >> 2); |
| 607 } | 616 } |
| 608 void bgtz(Register rs, int16_t offset); | 617 void bgtz(Register rs, int16_t offset); |
| 609 void bgtzc(Register rt, int16_t offset); | 618 void bgtzc(Register rt, int16_t offset); |
| 610 void bgtzc(Register rt, Label* L) { | 619 inline void bgtzc(Register rt, Label* L) { |
| 611 bgtzc(rt, branch_offset_compact(L, false)>>2); | 620 bgtzc(rt, shifted_branch_offset(L)); |
| 612 } | 621 } |
| 613 void blez(Register rs, int16_t offset); | 622 void blez(Register rs, int16_t offset); |
| 614 void blezc(Register rt, int16_t offset); | 623 void blezc(Register rt, int16_t offset); |
| 615 void blezc(Register rt, Label* L) { | 624 inline void blezc(Register rt, Label* L) { |
| 616 blezc(rt, branch_offset_compact(L, false)>>2); | 625 blezc(rt, shifted_branch_offset(L)); |
| 617 } | 626 } |
| 618 void bltz(Register rs, int16_t offset); | 627 void bltz(Register rs, int16_t offset); |
| 619 void bltzc(Register rt, int16_t offset); | 628 void bltzc(Register rt, int16_t offset); |
| 620 void bltzc(Register rt, Label* L) { | 629 inline void bltzc(Register rt, Label* L) { |
| 621 bltzc(rt, branch_offset_compact(L, false)>>2); | 630 bltzc(rt, shifted_branch_offset(L)); |
| 622 } | 631 } |
| 623 void bltuc(Register rs, Register rt, int16_t offset); | 632 void bltuc(Register rs, Register rt, int16_t offset); |
| 624 void bltuc(Register rs, Register rt, Label* L) { | 633 inline void bltuc(Register rs, Register rt, Label* L) { |
| 625 bltuc(rs, rt, branch_offset_compact(L, false)>>2); | 634 bltuc(rs, rt, shifted_branch_offset(L)); |
| 626 } | 635 } |
| 627 void bltc(Register rs, Register rt, int16_t offset); | 636 void bltc(Register rs, Register rt, int16_t offset); |
| 628 void bltc(Register rs, Register rt, Label* L) { | 637 inline void bltc(Register rs, Register rt, Label* L) { |
| 629 bltc(rs, rt, branch_offset_compact(L, false)>>2); | 638 bltc(rs, rt, shifted_branch_offset(L)); |
| 630 } | 639 } |
| 631 void bltzal(Register rs, int16_t offset); | 640 void bltzal(Register rs, int16_t offset); |
| 632 void blezalc(Register rt, int16_t offset); | 641 void blezalc(Register rt, int16_t offset); |
| 633 void blezalc(Register rt, Label* L) { | 642 inline void blezalc(Register rt, Label* L) { |
| 634 blezalc(rt, branch_offset_compact(L, false)>>2); | 643 blezalc(rt, shifted_branch_offset(L)); |
| 635 } | 644 } |
| 636 void bltzalc(Register rt, int16_t offset); | 645 void bltzalc(Register rt, int16_t offset); |
| 637 void bltzalc(Register rt, Label* L) { | 646 inline void bltzalc(Register rt, Label* L) { |
| 638 bltzalc(rt, branch_offset_compact(L, false)>>2); | 647 bltzalc(rt, shifted_branch_offset(L)); |
| 639 } | 648 } |
| 640 void bgtzalc(Register rt, int16_t offset); | 649 void bgtzalc(Register rt, int16_t offset); |
| 641 void bgtzalc(Register rt, Label* L) { | 650 inline void bgtzalc(Register rt, Label* L) { |
| 642 bgtzalc(rt, branch_offset_compact(L, false)>>2); | 651 bgtzalc(rt, shifted_branch_offset(L)); |
| 643 } | 652 } |
| 644 void beqzalc(Register rt, int16_t offset); | 653 void beqzalc(Register rt, int16_t offset); |
| 645 void beqzalc(Register rt, Label* L) { | 654 inline void beqzalc(Register rt, Label* L) { |
| 646 beqzalc(rt, branch_offset_compact(L, false)>>2); | 655 beqzalc(rt, shifted_branch_offset(L)); |
| 647 } | 656 } |
| 648 void beqc(Register rs, Register rt, int16_t offset); | 657 void beqc(Register rs, Register rt, int16_t offset); |
| 649 void beqc(Register rs, Register rt, Label* L) { | 658 inline void beqc(Register rs, Register rt, Label* L) { |
| 650 beqc(rs, rt, branch_offset_compact(L, false)>>2); | 659 beqc(rs, rt, shifted_branch_offset(L)); |
| 651 } | 660 } |
| 652 void beqzc(Register rs, int32_t offset); | 661 void beqzc(Register rs, int32_t offset); |
| 653 void beqzc(Register rs, Label* L) { | 662 inline void beqzc(Register rs, Label* L) { |
| 654 beqzc(rs, branch_offset21_compact(L, false)>>2); | 663 beqzc(rs, shifted_branch_offset21(L)); |
| 655 } | 664 } |
| 656 void bnezalc(Register rt, int16_t offset); | 665 void bnezalc(Register rt, int16_t offset); |
| 657 void bnezalc(Register rt, Label* L) { | 666 inline void bnezalc(Register rt, Label* L) { |
| 658 bnezalc(rt, branch_offset_compact(L, false)>>2); | 667 bnezalc(rt, shifted_branch_offset(L)); |
| 659 } | 668 } |
| 660 void bnec(Register rs, Register rt, int16_t offset); | 669 void bnec(Register rs, Register rt, int16_t offset); |
| 661 void bnec(Register rs, Register rt, Label* L) { | 670 inline void bnec(Register rs, Register rt, Label* L) { |
| 662 bnec(rs, rt, branch_offset_compact(L, false)>>2); | 671 bnec(rs, rt, shifted_branch_offset(L)); |
| 663 } | 672 } |
| 664 void bnezc(Register rt, int32_t offset); | 673 void bnezc(Register rt, int32_t offset); |
| 665 void bnezc(Register rt, Label* L) { | 674 inline void bnezc(Register rt, Label* L) { |
| 666 bnezc(rt, branch_offset21_compact(L, false)>>2); | 675 bnezc(rt, shifted_branch_offset21(L)); |
| 667 } | 676 } |
| 668 void bne(Register rs, Register rt, int16_t offset); | 677 void bne(Register rs, Register rt, int16_t offset); |
| 669 void bne(Register rs, Register rt, Label* L) { | 678 inline void bne(Register rs, Register rt, Label* L) { |
| 670 bne(rs, rt, branch_offset(L, false)>>2); | 679 bne(rs, rt, shifted_branch_offset(L)); |
| 671 } | 680 } |
| 672 void bovc(Register rs, Register rt, int16_t offset); | 681 void bovc(Register rs, Register rt, int16_t offset); |
| 673 void bovc(Register rs, Register rt, Label* L) { | 682 inline void bovc(Register rs, Register rt, Label* L) { |
| 674 bovc(rs, rt, branch_offset_compact(L, false)>>2); | 683 bovc(rs, rt, shifted_branch_offset(L)); |
| 675 } | 684 } |
| 676 void bnvc(Register rs, Register rt, int16_t offset); | 685 void bnvc(Register rs, Register rt, int16_t offset); |
| 677 void bnvc(Register rs, Register rt, Label* L) { | 686 inline void bnvc(Register rs, Register rt, Label* L) { |
| 678 bnvc(rs, rt, branch_offset_compact(L, false)>>2); | 687 bnvc(rs, rt, shifted_branch_offset(L)); |
| 679 } | 688 } |
| 680 | 689 |
| 681 // Never use the int16_t b(l)cond version with a branch offset | 690 // Never use the int16_t b(l)cond version with a branch offset |
| 682 // instead of using the Label* version. | 691 // instead of using the Label* version. |
| 683 | 692 |
| 684 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. | 693 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. |
| 685 void j(int32_t target); | 694 void j(int32_t target); |
| 686 void jal(int32_t target); | 695 void jal(int32_t target); |
| 687 void jalr(Register rs, Register rd = ra); | 696 void jalr(Register rs, Register rd = ra); |
| 688 void jr(Register target); | 697 void jr(Register target); |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 913 void cvt_d_l(FPURegister fd, FPURegister fs); | 922 void cvt_d_l(FPURegister fd, FPURegister fs); |
| 914 void cvt_d_s(FPURegister fd, FPURegister fs); | 923 void cvt_d_s(FPURegister fd, FPURegister fs); |
| 915 | 924 |
| 916 // Conditions and branches for MIPSr6. | 925 // Conditions and branches for MIPSr6. |
| 917 void cmp(FPUCondition cond, SecondaryField fmt, | 926 void cmp(FPUCondition cond, SecondaryField fmt, |
| 918 FPURegister fd, FPURegister ft, FPURegister fs); | 927 FPURegister fd, FPURegister ft, FPURegister fs); |
| 919 void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); | 928 void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); |
| 920 void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); | 929 void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); |
| 921 | 930 |
| 922 void bc1eqz(int16_t offset, FPURegister ft); | 931 void bc1eqz(int16_t offset, FPURegister ft); |
| 923 void bc1eqz(Label* L, FPURegister ft) { | 932 inline void bc1eqz(Label* L, FPURegister ft) { |
| 924 bc1eqz(branch_offset(L, false)>>2, ft); | 933 bc1eqz(shifted_branch_offset(L), ft); |
| 925 } | 934 } |
| 926 void bc1nez(int16_t offset, FPURegister ft); | 935 void bc1nez(int16_t offset, FPURegister ft); |
| 927 void bc1nez(Label* L, FPURegister ft) { | 936 inline void bc1nez(Label* L, FPURegister ft) { |
| 928 bc1nez(branch_offset(L, false)>>2, ft); | 937 bc1nez(shifted_branch_offset(L), ft); |
| 929 } | 938 } |
| 930 | 939 |
| 931 // Conditions and branches for non MIPSr6. | 940 // Conditions and branches for non MIPSr6. |
| 932 void c(FPUCondition cond, SecondaryField fmt, | 941 void c(FPUCondition cond, SecondaryField fmt, |
| 933 FPURegister ft, FPURegister fs, uint16_t cc = 0); | 942 FPURegister ft, FPURegister fs, uint16_t cc = 0); |
| 934 void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); | 943 void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); |
| 935 void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); | 944 void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); |
| 936 | 945 |
| 937 void bc1f(int16_t offset, uint16_t cc = 0); | 946 void bc1f(int16_t offset, uint16_t cc = 0); |
| 938 void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } | 947 inline void bc1f(Label* L, uint16_t cc = 0) { |
| 948 bc1f(shifted_branch_offset(L), cc); |
| 949 } |
| 939 void bc1t(int16_t offset, uint16_t cc = 0); | 950 void bc1t(int16_t offset, uint16_t cc = 0); |
| 940 void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } | 951 inline void bc1t(Label* L, uint16_t cc = 0) { |
| 952 bc1t(shifted_branch_offset(L), cc); |
| 953 } |
| 941 void fcmp(FPURegister src1, const double src2, FPUCondition cond); | 954 void fcmp(FPURegister src1, const double src2, FPUCondition cond); |
| 942 | 955 |
| 943 // Check the code size generated from label to here. | 956 // Check the code size generated from label to here. |
| 944 int SizeOfCodeGeneratedSince(Label* label) { | 957 int SizeOfCodeGeneratedSince(Label* label) { |
| 945 return pc_offset() - label->pos(); | 958 return pc_offset() - label->pos(); |
| 946 } | 959 } |
| 947 | 960 |
| 948 // Check the number of instructions generated from label to here. | 961 // Check the number of instructions generated from label to here. |
| 949 int InstructionsGeneratedSince(Label* label) { | 962 int InstructionsGeneratedSince(Label* label) { |
| 950 return SizeOfCodeGeneratedSince(label) / kInstrSize; | 963 return SizeOfCodeGeneratedSince(label) / kInstrSize; |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1049 static void instr_at_put(byte* pc, Instr instr) { | 1062 static void instr_at_put(byte* pc, Instr instr) { |
| 1050 *reinterpret_cast<Instr*>(pc) = instr; | 1063 *reinterpret_cast<Instr*>(pc) = instr; |
| 1051 } | 1064 } |
| 1052 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } | 1065 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } |
| 1053 void instr_at_put(int pos, Instr instr) { | 1066 void instr_at_put(int pos, Instr instr) { |
| 1054 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; | 1067 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; |
| 1055 } | 1068 } |
| 1056 | 1069 |
| 1057 // Check if an instruction is a branch of some kind. | 1070 // Check if an instruction is a branch of some kind. |
| 1058 static bool IsBranch(Instr instr); | 1071 static bool IsBranch(Instr instr); |
| 1072 static bool IsBc(Instr instr); |
| 1073 static bool IsBzc(Instr instr); |
| 1059 static bool IsBeq(Instr instr); | 1074 static bool IsBeq(Instr instr); |
| 1060 static bool IsBne(Instr instr); | 1075 static bool IsBne(Instr instr); |
| 1076 static bool IsBeqzc(Instr instr); |
| 1077 static bool IsBnezc(Instr instr); |
| 1078 static bool IsBeqc(Instr instr); |
| 1079 static bool IsBnec(Instr instr); |
| 1061 | 1080 |
| 1062 static bool IsJump(Instr instr); | 1081 static bool IsJump(Instr instr); |
| 1063 static bool IsJ(Instr instr); | 1082 static bool IsJ(Instr instr); |
| 1064 static bool IsLui(Instr instr); | 1083 static bool IsLui(Instr instr); |
| 1065 static bool IsOri(Instr instr); | 1084 static bool IsOri(Instr instr); |
| 1066 | 1085 |
| 1067 static bool IsJal(Instr instr); | 1086 static bool IsJal(Instr instr); |
| 1068 static bool IsJr(Instr instr); | 1087 static bool IsJr(Instr instr); |
| 1069 static bool IsJalr(Instr instr); | 1088 static bool IsJalr(Instr instr); |
| 1070 | 1089 |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1172 | 1191 |
| 1173 void EndBlockGrowBuffer() { | 1192 void EndBlockGrowBuffer() { |
| 1174 DCHECK(block_buffer_growth_); | 1193 DCHECK(block_buffer_growth_); |
| 1175 block_buffer_growth_ = false; | 1194 block_buffer_growth_ = false; |
| 1176 } | 1195 } |
| 1177 | 1196 |
| 1178 bool is_buffer_growth_blocked() const { | 1197 bool is_buffer_growth_blocked() const { |
| 1179 return block_buffer_growth_; | 1198 return block_buffer_growth_; |
| 1180 } | 1199 } |
| 1181 | 1200 |
| 1201 bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } |
| 1202 |
| 1182 private: | 1203 private: |
| 1183 inline static void set_target_internal_reference_encoded_at(Address pc, | 1204 inline static void set_target_internal_reference_encoded_at(Address pc, |
| 1184 Address target); | 1205 Address target); |
| 1185 | 1206 |
| 1186 // Buffer size and constant pool distance are checked together at regular | 1207 // Buffer size and constant pool distance are checked together at regular |
| 1187 // intervals of kBufferCheckInterval emitted bytes. | 1208 // intervals of kBufferCheckInterval emitted bytes. |
| 1188 static const int kBufferCheckInterval = 1*KB/2; | 1209 static const int kBufferCheckInterval = 1*KB/2; |
| 1189 | 1210 |
| 1190 // Code generation. | 1211 // Code generation. |
| 1191 // The relocation writer's position is at least kGap bytes below the end of | 1212 // The relocation writer's position is at least kGap bytes below the end of |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1214 bool block_buffer_growth_; // Block growth when true. | 1235 bool block_buffer_growth_; // Block growth when true. |
| 1215 | 1236 |
| 1216 // Relocation information generation. | 1237 // Relocation information generation. |
| 1217 // Each relocation is encoded as a variable size value. | 1238 // Each relocation is encoded as a variable size value. |
| 1218 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; | 1239 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; |
| 1219 RelocInfoWriter reloc_info_writer; | 1240 RelocInfoWriter reloc_info_writer; |
| 1220 | 1241 |
| 1221 // The bound position, before this we cannot do instruction elimination. | 1242 // The bound position, before this we cannot do instruction elimination. |
| 1222 int last_bound_pos_; | 1243 int last_bound_pos_; |
| 1223 | 1244 |
| 1245 // Readable constants for compact branch handling in emit() |
| 1246 enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; |
| 1247 |
| 1224 // Code emission. | 1248 // Code emission. |
| 1225 inline void CheckBuffer(); | 1249 inline void CheckBuffer(); |
| 1226 void GrowBuffer(); | 1250 void GrowBuffer(); |
| 1227 inline void emit(Instr x); | 1251 inline void emit(Instr x, |
| 1252 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1228 inline void CheckTrampolinePoolQuick(int extra_instructions = 0); | 1253 inline void CheckTrampolinePoolQuick(int extra_instructions = 0); |
| 1229 | 1254 |
| 1230 // Instruction generation. | 1255 // Instruction generation. |
| 1231 // We have 3 different kind of encoding layout on MIPS. | 1256 // We have 3 different kind of encoding layout on MIPS. |
| 1232 // However due to many different types of objects encoded in the same fields | 1257 // However due to many different types of objects encoded in the same fields |
| 1233 // we have quite a few aliases for each mode. | 1258 // we have quite a few aliases for each mode. |
| 1234 // Using the same structure to refer to Register and FPURegister would spare a | 1259 // Using the same structure to refer to Register and FPURegister would spare a |
| 1235 // few aliases, but mixing both does not look clean to me. | 1260 // few aliases, but mixing both does not look clean to me. |
| 1236 // Anyway we could surely implement this differently. | 1261 // Anyway we could surely implement this differently. |
| 1237 | 1262 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1269 FPURegister fs, | 1294 FPURegister fs, |
| 1270 FPURegister fd, | 1295 FPURegister fd, |
| 1271 SecondaryField func = NULLSF); | 1296 SecondaryField func = NULLSF); |
| 1272 | 1297 |
| 1273 void GenInstrRegister(Opcode opcode, | 1298 void GenInstrRegister(Opcode opcode, |
| 1274 SecondaryField fmt, | 1299 SecondaryField fmt, |
| 1275 Register rt, | 1300 Register rt, |
| 1276 FPUControlRegister fs, | 1301 FPUControlRegister fs, |
| 1277 SecondaryField func = NULLSF); | 1302 SecondaryField func = NULLSF); |
| 1278 | 1303 |
| 1279 | 1304 void GenInstrImmediate( |
| 1280 void GenInstrImmediate(Opcode opcode, | 1305 Opcode opcode, Register rs, Register rt, int32_t j, |
| 1281 Register rs, | 1306 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1282 Register rt, | 1307 void GenInstrImmediate( |
| 1283 int32_t j); | 1308 Opcode opcode, Register rs, SecondaryField SF, int32_t j, |
| 1284 void GenInstrImmediate(Opcode opcode, | 1309 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1285 Register rs, | 1310 void GenInstrImmediate( |
| 1286 SecondaryField SF, | 1311 Opcode opcode, Register r1, FPURegister r2, int32_t j, |
| 1287 int32_t j); | 1312 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1288 void GenInstrImmediate(Opcode opcode, | 1313 void GenInstrImmediate( |
| 1289 Register r1, | 1314 Opcode opcode, Register rs, int32_t offset21, |
| 1290 FPURegister r2, | 1315 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1291 int32_t j); | 1316 void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21); |
| 1292 void GenInstrImmediate(Opcode opcode, Register rs, int32_t j); | 1317 void GenInstrImmediate( |
| 1293 void GenInstrImmediate(Opcode opcode, int32_t offset26); | 1318 Opcode opcode, int32_t offset26, |
| 1319 CompactBranchType is_compact_branch = CompactBranchType::NO); |
| 1294 | 1320 |
| 1295 | 1321 |
| 1296 void GenInstrJump(Opcode opcode, | 1322 void GenInstrJump(Opcode opcode, |
| 1297 uint32_t address); | 1323 uint32_t address); |
| 1298 | 1324 |
| 1299 // Helpers. | 1325 // Helpers. |
| 1300 void LoadRegPlusOffsetToAt(const MemOperand& src); | 1326 void LoadRegPlusOffsetToAt(const MemOperand& src); |
| 1301 | 1327 |
| 1302 // Labels. | 1328 // Labels. |
| 1303 void print(Label* L); | 1329 void print(Label* L); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1358 int32_t get_trampoline_entry(int32_t pos); | 1384 int32_t get_trampoline_entry(int32_t pos); |
| 1359 int unbound_labels_count_; | 1385 int unbound_labels_count_; |
| 1360 // If trampoline is emitted, generated code is becoming large. As this is | 1386 // If trampoline is emitted, generated code is becoming large. As this is |
| 1361 // already a slow case which can possibly break our code generation for the | 1387 // already a slow case which can possibly break our code generation for the |
| 1362 // extreme case, we use this information to trigger different mode of | 1388 // extreme case, we use this information to trigger different mode of |
| 1363 // branch instruction generation, where we use jump instructions rather | 1389 // branch instruction generation, where we use jump instructions rather |
| 1364 // than regular branch instructions. | 1390 // than regular branch instructions. |
| 1365 bool trampoline_emitted_; | 1391 bool trampoline_emitted_; |
| 1366 static const int kTrampolineSlotsSize = 4 * kInstrSize; | 1392 static const int kTrampolineSlotsSize = 4 * kInstrSize; |
| 1367 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; | 1393 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; |
| 1394 static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; |
| 1368 static const int kInvalidSlotPos = -1; | 1395 static const int kInvalidSlotPos = -1; |
| 1369 | 1396 |
| 1370 // Internal reference positions, required for unbounded internal reference | 1397 // Internal reference positions, required for unbounded internal reference |
| 1371 // labels. | 1398 // labels. |
| 1372 std::set<int> internal_reference_positions_; | 1399 std::set<int> internal_reference_positions_; |
| 1373 | 1400 |
| 1401 void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } |
| 1402 void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } |
| 1403 bool prev_instr_compact_branch_ = false; |
| 1404 |
| 1374 Trampoline trampoline_; | 1405 Trampoline trampoline_; |
| 1375 bool internal_trampoline_exception_; | 1406 bool internal_trampoline_exception_; |
| 1376 | 1407 |
| 1377 friend class RegExpMacroAssemblerMIPS; | 1408 friend class RegExpMacroAssemblerMIPS; |
| 1378 friend class RelocInfo; | 1409 friend class RelocInfo; |
| 1379 friend class CodePatcher; | 1410 friend class CodePatcher; |
| 1380 friend class BlockTrampolinePoolScope; | 1411 friend class BlockTrampolinePoolScope; |
| 1381 | 1412 |
| 1382 PositionsRecorder positions_recorder_; | 1413 PositionsRecorder positions_recorder_; |
| 1383 friend class PositionsRecorder; | 1414 friend class PositionsRecorder; |
| 1384 friend class EnsureSpace; | 1415 friend class EnsureSpace; |
| 1385 }; | 1416 }; |
| 1386 | 1417 |
| 1387 | 1418 |
| 1388 class EnsureSpace BASE_EMBEDDED { | 1419 class EnsureSpace BASE_EMBEDDED { |
| 1389 public: | 1420 public: |
| 1390 explicit EnsureSpace(Assembler* assembler) { | 1421 explicit EnsureSpace(Assembler* assembler) { |
| 1391 assembler->CheckBuffer(); | 1422 assembler->CheckBuffer(); |
| 1392 } | 1423 } |
| 1393 }; | 1424 }; |
| 1394 | 1425 |
| 1395 } // namespace internal | 1426 } // namespace internal |
| 1396 } // namespace v8 | 1427 } // namespace v8 |
| 1397 | 1428 |
| 1398 #endif // V8_ARM_ASSEMBLER_MIPS_H_ | 1429 #endif // V8_ARM_ASSEMBLER_MIPS_H_ |
| OLD | NEW |