| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 // compilation. | 60 // compilation. |
| 61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 | 61 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 |
| 62 answer |= 1u << FPU; | 62 answer |= 1u << FPU; |
| 63 #endif | 63 #endif |
| 64 | 64 |
| 65 return answer; | 65 return answer; |
| 66 } | 66 } |
| 67 | 67 |
| 68 | 68 |
| 69 const char* DoubleRegister::AllocationIndexToString(int index) { | 69 const char* DoubleRegister::AllocationIndexToString(int index) { |
| 70 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 70 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 71 const char* const names[] = { | 71 const char* const names[] = { |
| 72 "f0", | 72 "f0", |
| 73 "f2", | 73 "f2", |
| 74 "f4", | 74 "f4", |
| 75 "f6", | 75 "f6", |
| 76 "f8", | 76 "f8", |
| 77 "f10", | 77 "f10", |
| 78 "f12", | 78 "f12", |
| 79 "f14", | 79 "f14", |
| 80 "f16", | 80 "f16", |
| (...skipping 24 matching lines...) Expand all Loading... |
| 105 if (cpu.has_fpu()) supported_ |= 1u << FPU; | 105 if (cpu.has_fpu()) supported_ |= 1u << FPU; |
| 106 #endif | 106 #endif |
| 107 } | 107 } |
| 108 | 108 |
| 109 | 109 |
| 110 void CpuFeatures::PrintTarget() { } | 110 void CpuFeatures::PrintTarget() { } |
| 111 void CpuFeatures::PrintFeatures() { } | 111 void CpuFeatures::PrintFeatures() { } |
| 112 | 112 |
| 113 | 113 |
| 114 int ToNumber(Register reg) { | 114 int ToNumber(Register reg) { |
| 115 ASSERT(reg.is_valid()); | 115 DCHECK(reg.is_valid()); |
| 116 const int kNumbers[] = { | 116 const int kNumbers[] = { |
| 117 0, // zero_reg | 117 0, // zero_reg |
| 118 1, // at | 118 1, // at |
| 119 2, // v0 | 119 2, // v0 |
| 120 3, // v1 | 120 3, // v1 |
| 121 4, // a0 | 121 4, // a0 |
| 122 5, // a1 | 122 5, // a1 |
| 123 6, // a2 | 123 6, // a2 |
| 124 7, // a3 | 124 7, // a3 |
| 125 8, // a4 | 125 8, // a4 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 145 28, // gp | 145 28, // gp |
| 146 29, // sp | 146 29, // sp |
| 147 30, // fp | 147 30, // fp |
| 148 31, // ra | 148 31, // ra |
| 149 }; | 149 }; |
| 150 return kNumbers[reg.code()]; | 150 return kNumbers[reg.code()]; |
| 151 } | 151 } |
| 152 | 152 |
| 153 | 153 |
| 154 Register ToRegister(int num) { | 154 Register ToRegister(int num) { |
| 155 ASSERT(num >= 0 && num < kNumRegisters); | 155 DCHECK(num >= 0 && num < kNumRegisters); |
| 156 const Register kRegisters[] = { | 156 const Register kRegisters[] = { |
| 157 zero_reg, | 157 zero_reg, |
| 158 at, | 158 at, |
| 159 v0, v1, | 159 v0, v1, |
| 160 a0, a1, a2, a3, a4, a5, a6, a7, | 160 a0, a1, a2, a3, a4, a5, a6, a7, |
| 161 t0, t1, t2, t3, | 161 t0, t1, t2, t3, |
| 162 s0, s1, s2, s3, s4, s5, s6, s7, | 162 s0, s1, s2, s3, s4, s5, s6, s7, |
| 163 t8, t9, | 163 t8, t9, |
| 164 k0, k1, | 164 k0, k1, |
| 165 gp, | 165 gp, |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 215 // ----------------------------------------------------------------------------- | 215 // ----------------------------------------------------------------------------- |
| 216 // Implementation of Operand and MemOperand. | 216 // Implementation of Operand and MemOperand. |
| 217 // See assembler-mips-inl.h for inlined constructors. | 217 // See assembler-mips-inl.h for inlined constructors. |
| 218 | 218 |
| 219 Operand::Operand(Handle<Object> handle) { | 219 Operand::Operand(Handle<Object> handle) { |
| 220 AllowDeferredHandleDereference using_raw_address; | 220 AllowDeferredHandleDereference using_raw_address; |
| 221 rm_ = no_reg; | 221 rm_ = no_reg; |
| 222 // Verify all Objects referred by code are NOT in new space. | 222 // Verify all Objects referred by code are NOT in new space. |
| 223 Object* obj = *handle; | 223 Object* obj = *handle; |
| 224 if (obj->IsHeapObject()) { | 224 if (obj->IsHeapObject()) { |
| 225 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); | 225 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 226 imm64_ = reinterpret_cast<intptr_t>(handle.location()); | 226 imm64_ = reinterpret_cast<intptr_t>(handle.location()); |
| 227 rmode_ = RelocInfo::EMBEDDED_OBJECT; | 227 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 228 } else { | 228 } else { |
| 229 // No relocation needed. | 229 // No relocation needed. |
| 230 imm64_ = reinterpret_cast<intptr_t>(obj); | 230 imm64_ = reinterpret_cast<intptr_t>(obj); |
| 231 rmode_ = RelocInfo::NONE64; | 231 rmode_ = RelocInfo::NONE64; |
| 232 } | 232 } |
| 233 } | 233 } |
| 234 | 234 |
| 235 | 235 |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 300 | 300 |
| 301 trampoline_emitted_ = FLAG_force_long_branches; | 301 trampoline_emitted_ = FLAG_force_long_branches; |
| 302 unbound_labels_count_ = 0; | 302 unbound_labels_count_ = 0; |
| 303 block_buffer_growth_ = false; | 303 block_buffer_growth_ = false; |
| 304 | 304 |
| 305 ClearRecordedAstId(); | 305 ClearRecordedAstId(); |
| 306 } | 306 } |
| 307 | 307 |
| 308 | 308 |
| 309 void Assembler::GetCode(CodeDesc* desc) { | 309 void Assembler::GetCode(CodeDesc* desc) { |
| 310 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. | 310 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. |
| 311 // Set up code descriptor. | 311 // Set up code descriptor. |
| 312 desc->buffer = buffer_; | 312 desc->buffer = buffer_; |
| 313 desc->buffer_size = buffer_size_; | 313 desc->buffer_size = buffer_size_; |
| 314 desc->instr_size = pc_offset(); | 314 desc->instr_size = pc_offset(); |
| 315 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); | 315 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 316 desc->origin = this; | 316 desc->origin = this; |
| 317 } | 317 } |
| 318 | 318 |
| 319 | 319 |
| 320 void Assembler::Align(int m) { | 320 void Assembler::Align(int m) { |
| 321 ASSERT(m >= 4 && IsPowerOf2(m)); | 321 DCHECK(m >= 4 && IsPowerOf2(m)); |
| 322 while ((pc_offset() & (m - 1)) != 0) { | 322 while ((pc_offset() & (m - 1)) != 0) { |
| 323 nop(); | 323 nop(); |
| 324 } | 324 } |
| 325 } | 325 } |
| 326 | 326 |
| 327 | 327 |
| 328 void Assembler::CodeTargetAlign() { | 328 void Assembler::CodeTargetAlign() { |
| 329 // No advantage to aligning branch/call targets to more than | 329 // No advantage to aligning branch/call targets to more than |
| 330 // single instruction, that I am aware of. | 330 // single instruction, that I am aware of. |
| 331 Align(4); | 331 Align(4); |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 550 | 550 |
| 551 bool Assembler::IsOri(Instr instr) { | 551 bool Assembler::IsOri(Instr instr) { |
| 552 uint32_t opcode = GetOpcodeField(instr); | 552 uint32_t opcode = GetOpcodeField(instr); |
| 553 // Checks if the instruction is a load upper immediate. | 553 // Checks if the instruction is a load upper immediate. |
| 554 return opcode == ORI; | 554 return opcode == ORI; |
| 555 } | 555 } |
| 556 | 556 |
| 557 | 557 |
| 558 bool Assembler::IsNop(Instr instr, unsigned int type) { | 558 bool Assembler::IsNop(Instr instr, unsigned int type) { |
| 559 // See Assembler::nop(type). | 559 // See Assembler::nop(type). |
| 560 ASSERT(type < 32); | 560 DCHECK(type < 32); |
| 561 uint32_t opcode = GetOpcodeField(instr); | 561 uint32_t opcode = GetOpcodeField(instr); |
| 562 uint32_t function = GetFunctionField(instr); | 562 uint32_t function = GetFunctionField(instr); |
| 563 uint32_t rt = GetRt(instr); | 563 uint32_t rt = GetRt(instr); |
| 564 uint32_t rd = GetRd(instr); | 564 uint32_t rd = GetRd(instr); |
| 565 uint32_t sa = GetSa(instr); | 565 uint32_t sa = GetSa(instr); |
| 566 | 566 |
| 567 // Traditional mips nop == sll(zero_reg, zero_reg, 0) | 567 // Traditional mips nop == sll(zero_reg, zero_reg, 0) |
| 568 // When marking non-zero type, use sll(zero_reg, at, type) | 568 // When marking non-zero type, use sll(zero_reg, at, type) |
| 569 // to avoid use of mips ssnop and ehb special encodings | 569 // to avoid use of mips ssnop and ehb special encodings |
| 570 // of the sll instruction. | 570 // of the sll instruction. |
| 571 | 571 |
| 572 Register nop_rt_reg = (type == 0) ? zero_reg : at; | 572 Register nop_rt_reg = (type == 0) ? zero_reg : at; |
| 573 bool ret = (opcode == SPECIAL && function == SLL && | 573 bool ret = (opcode == SPECIAL && function == SLL && |
| 574 rd == static_cast<uint32_t>(ToNumber(zero_reg)) && | 574 rd == static_cast<uint32_t>(ToNumber(zero_reg)) && |
| 575 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && | 575 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && |
| 576 sa == type); | 576 sa == type); |
| 577 | 577 |
| 578 return ret; | 578 return ret; |
| 579 } | 579 } |
| 580 | 580 |
| 581 | 581 |
| 582 int32_t Assembler::GetBranchOffset(Instr instr) { | 582 int32_t Assembler::GetBranchOffset(Instr instr) { |
| 583 ASSERT(IsBranch(instr)); | 583 DCHECK(IsBranch(instr)); |
| 584 return (static_cast<int16_t>(instr & kImm16Mask)) << 2; | 584 return (static_cast<int16_t>(instr & kImm16Mask)) << 2; |
| 585 } | 585 } |
| 586 | 586 |
| 587 | 587 |
| 588 bool Assembler::IsLw(Instr instr) { | 588 bool Assembler::IsLw(Instr instr) { |
| 589 return ((instr & kOpcodeMask) == LW); | 589 return ((instr & kOpcodeMask) == LW); |
| 590 } | 590 } |
| 591 | 591 |
| 592 | 592 |
| 593 int16_t Assembler::GetLwOffset(Instr instr) { | 593 int16_t Assembler::GetLwOffset(Instr instr) { |
| 594 ASSERT(IsLw(instr)); | 594 DCHECK(IsLw(instr)); |
| 595 return ((instr & kImm16Mask)); | 595 return ((instr & kImm16Mask)); |
| 596 } | 596 } |
| 597 | 597 |
| 598 | 598 |
| 599 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { | 599 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { |
| 600 ASSERT(IsLw(instr)); | 600 DCHECK(IsLw(instr)); |
| 601 | 601 |
| 602 // We actually create a new lw instruction based on the original one. | 602 // We actually create a new lw instruction based on the original one. |
| 603 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) | 603 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
| 604 | (offset & kImm16Mask); | 604 | (offset & kImm16Mask); |
| 605 | 605 |
| 606 return temp_instr; | 606 return temp_instr; |
| 607 } | 607 } |
| 608 | 608 |
| 609 | 609 |
| 610 bool Assembler::IsSw(Instr instr) { | 610 bool Assembler::IsSw(Instr instr) { |
| 611 return ((instr & kOpcodeMask) == SW); | 611 return ((instr & kOpcodeMask) == SW); |
| 612 } | 612 } |
| 613 | 613 |
| 614 | 614 |
| 615 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { | 615 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { |
| 616 ASSERT(IsSw(instr)); | 616 DCHECK(IsSw(instr)); |
| 617 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); | 617 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); |
| 618 } | 618 } |
| 619 | 619 |
| 620 | 620 |
| 621 bool Assembler::IsAddImmediate(Instr instr) { | 621 bool Assembler::IsAddImmediate(Instr instr) { |
| 622 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU); | 622 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU); |
| 623 } | 623 } |
| 624 | 624 |
| 625 | 625 |
| 626 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { | 626 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { |
| 627 ASSERT(IsAddImmediate(instr)); | 627 DCHECK(IsAddImmediate(instr)); |
| 628 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); | 628 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); |
| 629 } | 629 } |
| 630 | 630 |
| 631 | 631 |
| 632 bool Assembler::IsAndImmediate(Instr instr) { | 632 bool Assembler::IsAndImmediate(Instr instr) { |
| 633 return GetOpcodeField(instr) == ANDI; | 633 return GetOpcodeField(instr) == ANDI; |
| 634 } | 634 } |
| 635 | 635 |
| 636 | 636 |
| 637 int64_t Assembler::target_at(int64_t pos) { | 637 int64_t Assembler::target_at(int64_t pos) { |
| 638 Instr instr = instr_at(pos); | 638 Instr instr = instr_at(pos); |
| 639 if ((instr & ~kImm16Mask) == 0) { | 639 if ((instr & ~kImm16Mask) == 0) { |
| 640 // Emitted label constant, not part of a branch. | 640 // Emitted label constant, not part of a branch. |
| 641 if (instr == 0) { | 641 if (instr == 0) { |
| 642 return kEndOfChain; | 642 return kEndOfChain; |
| 643 } else { | 643 } else { |
| 644 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 644 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
| 645 return (imm18 + pos); | 645 return (imm18 + pos); |
| 646 } | 646 } |
| 647 } | 647 } |
| 648 // Check we have a branch or jump instruction. | 648 // Check we have a branch or jump instruction. |
| 649 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); | 649 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
| 650 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming | 650 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming |
| 651 // the compiler uses arithmetic shifts for signed integers. | 651 // the compiler uses arithmetic shifts for signed integers. |
| 652 if (IsBranch(instr)) { | 652 if (IsBranch(instr)) { |
| 653 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 653 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
| 654 if (imm18 == kEndOfChain) { | 654 if (imm18 == kEndOfChain) { |
| 655 // EndOfChain sentinel is returned directly, not relative to pc or pos. | 655 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 656 return kEndOfChain; | 656 return kEndOfChain; |
| 657 } else { | 657 } else { |
| 658 return pos + kBranchPCOffset + imm18; | 658 return pos + kBranchPCOffset + imm18; |
| 659 } | 659 } |
| 660 } else if (IsLui(instr)) { | 660 } else if (IsLui(instr)) { |
| 661 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); | 661 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); |
| 662 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); | 662 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); |
| 663 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); | 663 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); |
| 664 ASSERT(IsOri(instr_ori)); | 664 DCHECK(IsOri(instr_ori)); |
| 665 ASSERT(IsOri(instr_ori2)); | 665 DCHECK(IsOri(instr_ori2)); |
| 666 | 666 |
| 667 // TODO(plind) create named constants for shift values. | 667 // TODO(plind) create named constants for shift values. |
| 668 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48; | 668 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48; |
| 669 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32; | 669 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32; |
| 670 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16; | 670 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16; |
| 671 // Sign extend address; | 671 // Sign extend address; |
| 672 imm >>= 16; | 672 imm >>= 16; |
| 673 | 673 |
| 674 if (imm == kEndOfJumpChain) { | 674 if (imm == kEndOfJumpChain) { |
| 675 // EndOfChain sentinel is returned directly, not relative to pc or pos. | 675 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 676 return kEndOfChain; | 676 return kEndOfChain; |
| 677 } else { | 677 } else { |
| 678 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); | 678 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); |
| 679 int64_t delta = instr_address - imm; | 679 int64_t delta = instr_address - imm; |
| 680 ASSERT(pos > delta); | 680 DCHECK(pos > delta); |
| 681 return pos - delta; | 681 return pos - delta; |
| 682 } | 682 } |
| 683 } else { | 683 } else { |
| 684 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; | 684 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; |
| 685 if (imm28 == kEndOfJumpChain) { | 685 if (imm28 == kEndOfJumpChain) { |
| 686 // EndOfChain sentinel is returned directly, not relative to pc or pos. | 686 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 687 return kEndOfChain; | 687 return kEndOfChain; |
| 688 } else { | 688 } else { |
| 689 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); | 689 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos); |
| 690 instr_address &= kImm28Mask; | 690 instr_address &= kImm28Mask; |
| 691 int64_t delta = instr_address - imm28; | 691 int64_t delta = instr_address - imm28; |
| 692 ASSERT(pos > delta); | 692 DCHECK(pos > delta); |
| 693 return pos - delta; | 693 return pos - delta; |
| 694 } | 694 } |
| 695 } | 695 } |
| 696 } | 696 } |
| 697 | 697 |
| 698 | 698 |
| 699 void Assembler::target_at_put(int64_t pos, int64_t target_pos) { | 699 void Assembler::target_at_put(int64_t pos, int64_t target_pos) { |
| 700 Instr instr = instr_at(pos); | 700 Instr instr = instr_at(pos); |
| 701 if ((instr & ~kImm16Mask) == 0) { | 701 if ((instr & ~kImm16Mask) == 0) { |
| 702 ASSERT(target_pos == kEndOfChain || target_pos >= 0); | 702 DCHECK(target_pos == kEndOfChain || target_pos >= 0); |
| 703 // Emitted label constant, not part of a branch. | 703 // Emitted label constant, not part of a branch. |
| 704 // Make label relative to Code* of generated Code object. | 704 // Make label relative to Code* of generated Code object. |
| 705 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 705 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
| 706 return; | 706 return; |
| 707 } | 707 } |
| 708 | 708 |
| 709 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); | 709 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
| 710 if (IsBranch(instr)) { | 710 if (IsBranch(instr)) { |
| 711 int32_t imm18 = target_pos - (pos + kBranchPCOffset); | 711 int32_t imm18 = target_pos - (pos + kBranchPCOffset); |
| 712 ASSERT((imm18 & 3) == 0); | 712 DCHECK((imm18 & 3) == 0); |
| 713 | 713 |
| 714 instr &= ~kImm16Mask; | 714 instr &= ~kImm16Mask; |
| 715 int32_t imm16 = imm18 >> 2; | 715 int32_t imm16 = imm18 >> 2; |
| 716 ASSERT(is_int16(imm16)); | 716 DCHECK(is_int16(imm16)); |
| 717 | 717 |
| 718 instr_at_put(pos, instr | (imm16 & kImm16Mask)); | 718 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 719 } else if (IsLui(instr)) { | 719 } else if (IsLui(instr)) { |
| 720 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); | 720 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); |
| 721 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); | 721 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); |
| 722 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); | 722 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); |
| 723 ASSERT(IsOri(instr_ori)); | 723 DCHECK(IsOri(instr_ori)); |
| 724 ASSERT(IsOri(instr_ori2)); | 724 DCHECK(IsOri(instr_ori2)); |
| 725 | 725 |
| 726 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; | 726 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; |
| 727 ASSERT((imm & 3) == 0); | 727 DCHECK((imm & 3) == 0); |
| 728 | 728 |
| 729 instr_lui &= ~kImm16Mask; | 729 instr_lui &= ~kImm16Mask; |
| 730 instr_ori &= ~kImm16Mask; | 730 instr_ori &= ~kImm16Mask; |
| 731 instr_ori2 &= ~kImm16Mask; | 731 instr_ori2 &= ~kImm16Mask; |
| 732 | 732 |
| 733 instr_at_put(pos + 0 * Assembler::kInstrSize, | 733 instr_at_put(pos + 0 * Assembler::kInstrSize, |
| 734 instr_lui | ((imm >> 32) & kImm16Mask)); | 734 instr_lui | ((imm >> 32) & kImm16Mask)); |
| 735 instr_at_put(pos + 1 * Assembler::kInstrSize, | 735 instr_at_put(pos + 1 * Assembler::kInstrSize, |
| 736 instr_ori | ((imm >> 16) & kImm16Mask)); | 736 instr_ori | ((imm >> 16) & kImm16Mask)); |
| 737 instr_at_put(pos + 3 * Assembler::kInstrSize, | 737 instr_at_put(pos + 3 * Assembler::kInstrSize, |
| 738 instr_ori2 | (imm & kImm16Mask)); | 738 instr_ori2 | (imm & kImm16Mask)); |
| 739 } else { | 739 } else { |
| 740 uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos; | 740 uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos; |
| 741 imm28 &= kImm28Mask; | 741 imm28 &= kImm28Mask; |
| 742 ASSERT((imm28 & 3) == 0); | 742 DCHECK((imm28 & 3) == 0); |
| 743 | 743 |
| 744 instr &= ~kImm26Mask; | 744 instr &= ~kImm26Mask; |
| 745 uint32_t imm26 = imm28 >> 2; | 745 uint32_t imm26 = imm28 >> 2; |
| 746 ASSERT(is_uint26(imm26)); | 746 DCHECK(is_uint26(imm26)); |
| 747 | 747 |
| 748 instr_at_put(pos, instr | (imm26 & kImm26Mask)); | 748 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 749 } | 749 } |
| 750 } | 750 } |
| 751 | 751 |
| 752 | 752 |
| 753 void Assembler::print(Label* L) { | 753 void Assembler::print(Label* L) { |
| 754 if (L->is_unused()) { | 754 if (L->is_unused()) { |
| 755 PrintF("unused label\n"); | 755 PrintF("unused label\n"); |
| 756 } else if (L->is_bound()) { | 756 } else if (L->is_bound()) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 768 } | 768 } |
| 769 next(&l); | 769 next(&l); |
| 770 } | 770 } |
| 771 } else { | 771 } else { |
| 772 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); | 772 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
| 773 } | 773 } |
| 774 } | 774 } |
| 775 | 775 |
| 776 | 776 |
| 777 void Assembler::bind_to(Label* L, int pos) { | 777 void Assembler::bind_to(Label* L, int pos) { |
| 778 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. | 778 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. |
| 779 int32_t trampoline_pos = kInvalidSlotPos; | 779 int32_t trampoline_pos = kInvalidSlotPos; |
| 780 if (L->is_linked() && !trampoline_emitted_) { | 780 if (L->is_linked() && !trampoline_emitted_) { |
| 781 unbound_labels_count_--; | 781 unbound_labels_count_--; |
| 782 next_buffer_check_ += kTrampolineSlotsSize; | 782 next_buffer_check_ += kTrampolineSlotsSize; |
| 783 } | 783 } |
| 784 | 784 |
| 785 while (L->is_linked()) { | 785 while (L->is_linked()) { |
| 786 int32_t fixup_pos = L->pos(); | 786 int32_t fixup_pos = L->pos(); |
| 787 int32_t dist = pos - fixup_pos; | 787 int32_t dist = pos - fixup_pos; |
| 788 next(L); // Call next before overwriting link with target at fixup_pos. | 788 next(L); // Call next before overwriting link with target at fixup_pos. |
| 789 Instr instr = instr_at(fixup_pos); | 789 Instr instr = instr_at(fixup_pos); |
| 790 if (IsBranch(instr)) { | 790 if (IsBranch(instr)) { |
| 791 if (dist > kMaxBranchOffset) { | 791 if (dist > kMaxBranchOffset) { |
| 792 if (trampoline_pos == kInvalidSlotPos) { | 792 if (trampoline_pos == kInvalidSlotPos) { |
| 793 trampoline_pos = get_trampoline_entry(fixup_pos); | 793 trampoline_pos = get_trampoline_entry(fixup_pos); |
| 794 CHECK(trampoline_pos != kInvalidSlotPos); | 794 CHECK(trampoline_pos != kInvalidSlotPos); |
| 795 } | 795 } |
| 796 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); | 796 DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); |
| 797 target_at_put(fixup_pos, trampoline_pos); | 797 target_at_put(fixup_pos, trampoline_pos); |
| 798 fixup_pos = trampoline_pos; | 798 fixup_pos = trampoline_pos; |
| 799 dist = pos - fixup_pos; | 799 dist = pos - fixup_pos; |
| 800 } | 800 } |
| 801 target_at_put(fixup_pos, pos); | 801 target_at_put(fixup_pos, pos); |
| 802 } else { | 802 } else { |
| 803 ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr)); | 803 DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr)); |
| 804 target_at_put(fixup_pos, pos); | 804 target_at_put(fixup_pos, pos); |
| 805 } | 805 } |
| 806 } | 806 } |
| 807 L->bind_to(pos); | 807 L->bind_to(pos); |
| 808 | 808 |
| 809 // Keep track of the last bound label so we don't eliminate any instructions | 809 // Keep track of the last bound label so we don't eliminate any instructions |
| 810 // before a bound label. | 810 // before a bound label. |
| 811 if (pos > last_bound_pos_) | 811 if (pos > last_bound_pos_) |
| 812 last_bound_pos_ = pos; | 812 last_bound_pos_ = pos; |
| 813 } | 813 } |
| 814 | 814 |
| 815 | 815 |
| 816 void Assembler::bind(Label* L) { | 816 void Assembler::bind(Label* L) { |
| 817 ASSERT(!L->is_bound()); // Label can only be bound once. | 817 DCHECK(!L->is_bound()); // Label can only be bound once. |
| 818 bind_to(L, pc_offset()); | 818 bind_to(L, pc_offset()); |
| 819 } | 819 } |
| 820 | 820 |
| 821 | 821 |
| 822 void Assembler::next(Label* L) { | 822 void Assembler::next(Label* L) { |
| 823 ASSERT(L->is_linked()); | 823 DCHECK(L->is_linked()); |
| 824 int link = target_at(L->pos()); | 824 int link = target_at(L->pos()); |
| 825 if (link == kEndOfChain) { | 825 if (link == kEndOfChain) { |
| 826 L->Unuse(); | 826 L->Unuse(); |
| 827 } else { | 827 } else { |
| 828 ASSERT(link >= 0); | 828 DCHECK(link >= 0); |
| 829 L->link_to(link); | 829 L->link_to(link); |
| 830 } | 830 } |
| 831 } | 831 } |
| 832 | 832 |
| 833 | 833 |
| 834 bool Assembler::is_near(Label* L) { | 834 bool Assembler::is_near(Label* L) { |
| 835 if (L->is_bound()) { | 835 if (L->is_bound()) { |
| 836 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); | 836 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); |
| 837 } | 837 } |
| 838 return false; | 838 return false; |
| 839 } | 839 } |
| 840 | 840 |
| 841 | 841 |
| 842 // We have to use a temporary register for things that can be relocated even | 842 // We have to use a temporary register for things that can be relocated even |
| 843 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction | 843 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction |
| 844 // space. There is no guarantee that the relocated location can be similarly | 844 // space. There is no guarantee that the relocated location can be similarly |
| 845 // encoded. | 845 // encoded. |
| 846 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { | 846 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { |
| 847 return !RelocInfo::IsNone(rmode); | 847 return !RelocInfo::IsNone(rmode); |
| 848 } | 848 } |
| 849 | 849 |
| 850 void Assembler::GenInstrRegister(Opcode opcode, | 850 void Assembler::GenInstrRegister(Opcode opcode, |
| 851 Register rs, | 851 Register rs, |
| 852 Register rt, | 852 Register rt, |
| 853 Register rd, | 853 Register rd, |
| 854 uint16_t sa, | 854 uint16_t sa, |
| 855 SecondaryField func) { | 855 SecondaryField func) { |
| 856 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); | 856 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); |
| 857 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 857 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 858 | (rd.code() << kRdShift) | (sa << kSaShift) | func; | 858 | (rd.code() << kRdShift) | (sa << kSaShift) | func; |
| 859 emit(instr); | 859 emit(instr); |
| 860 } | 860 } |
| 861 | 861 |
| 862 | 862 |
| 863 void Assembler::GenInstrRegister(Opcode opcode, | 863 void Assembler::GenInstrRegister(Opcode opcode, |
| 864 Register rs, | 864 Register rs, |
| 865 Register rt, | 865 Register rt, |
| 866 uint16_t msb, | 866 uint16_t msb, |
| 867 uint16_t lsb, | 867 uint16_t lsb, |
| 868 SecondaryField func) { | 868 SecondaryField func) { |
| 869 ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); | 869 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); |
| 870 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 870 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 871 | (msb << kRdShift) | (lsb << kSaShift) | func; | 871 | (msb << kRdShift) | (lsb << kSaShift) | func; |
| 872 emit(instr); | 872 emit(instr); |
| 873 } | 873 } |
| 874 | 874 |
| 875 | 875 |
| 876 void Assembler::GenInstrRegister(Opcode opcode, | 876 void Assembler::GenInstrRegister(Opcode opcode, |
| 877 SecondaryField fmt, | 877 SecondaryField fmt, |
| 878 FPURegister ft, | 878 FPURegister ft, |
| 879 FPURegister fs, | 879 FPURegister fs, |
| 880 FPURegister fd, | 880 FPURegister fd, |
| 881 SecondaryField func) { | 881 SecondaryField func) { |
| 882 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); | 882 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid()); |
| 883 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) | 883 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) |
| 884 | (fd.code() << kFdShift) | func; | 884 | (fd.code() << kFdShift) | func; |
| 885 emit(instr); | 885 emit(instr); |
| 886 } | 886 } |
| 887 | 887 |
| 888 | 888 |
| 889 void Assembler::GenInstrRegister(Opcode opcode, | 889 void Assembler::GenInstrRegister(Opcode opcode, |
| 890 FPURegister fr, | 890 FPURegister fr, |
| 891 FPURegister ft, | 891 FPURegister ft, |
| 892 FPURegister fs, | 892 FPURegister fs, |
| 893 FPURegister fd, | 893 FPURegister fd, |
| 894 SecondaryField func) { | 894 SecondaryField func) { |
| 895 ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); | 895 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); |
| 896 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) | 896 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
| 897 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; | 897 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; |
| 898 emit(instr); | 898 emit(instr); |
| 899 } | 899 } |
| 900 | 900 |
| 901 | 901 |
| 902 void Assembler::GenInstrRegister(Opcode opcode, | 902 void Assembler::GenInstrRegister(Opcode opcode, |
| 903 SecondaryField fmt, | 903 SecondaryField fmt, |
| 904 Register rt, | 904 Register rt, |
| 905 FPURegister fs, | 905 FPURegister fs, |
| 906 FPURegister fd, | 906 FPURegister fd, |
| 907 SecondaryField func) { | 907 SecondaryField func) { |
| 908 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); | 908 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid()); |
| 909 Instr instr = opcode | fmt | (rt.code() << kRtShift) | 909 Instr instr = opcode | fmt | (rt.code() << kRtShift) |
| 910 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; | 910 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; |
| 911 emit(instr); | 911 emit(instr); |
| 912 } | 912 } |
| 913 | 913 |
| 914 | 914 |
| 915 void Assembler::GenInstrRegister(Opcode opcode, | 915 void Assembler::GenInstrRegister(Opcode opcode, |
| 916 SecondaryField fmt, | 916 SecondaryField fmt, |
| 917 Register rt, | 917 Register rt, |
| 918 FPUControlRegister fs, | 918 FPUControlRegister fs, |
| 919 SecondaryField func) { | 919 SecondaryField func) { |
| 920 ASSERT(fs.is_valid() && rt.is_valid()); | 920 DCHECK(fs.is_valid() && rt.is_valid()); |
| 921 Instr instr = | 921 Instr instr = |
| 922 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; | 922 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; |
| 923 emit(instr); | 923 emit(instr); |
| 924 } | 924 } |
| 925 | 925 |
| 926 | 926 |
| 927 // Instructions with immediate value. | 927 // Instructions with immediate value. |
| 928 // Registers are in the order of the instruction encoding, from left to right. | 928 // Registers are in the order of the instruction encoding, from left to right. |
| 929 void Assembler::GenInstrImmediate(Opcode opcode, | 929 void Assembler::GenInstrImmediate(Opcode opcode, |
| 930 Register rs, | 930 Register rs, |
| 931 Register rt, | 931 Register rt, |
| 932 int32_t j) { | 932 int32_t j) { |
| 933 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); | 933 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); |
| 934 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 934 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 935 | (j & kImm16Mask); | 935 | (j & kImm16Mask); |
| 936 emit(instr); | 936 emit(instr); |
| 937 } | 937 } |
| 938 | 938 |
| 939 | 939 |
| 940 void Assembler::GenInstrImmediate(Opcode opcode, | 940 void Assembler::GenInstrImmediate(Opcode opcode, |
| 941 Register rs, | 941 Register rs, |
| 942 SecondaryField SF, | 942 SecondaryField SF, |
| 943 int32_t j) { | 943 int32_t j) { |
| 944 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); | 944 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j))); |
| 945 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); | 945 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); |
| 946 emit(instr); | 946 emit(instr); |
| 947 } | 947 } |
| 948 | 948 |
| 949 | 949 |
| 950 void Assembler::GenInstrImmediate(Opcode opcode, | 950 void Assembler::GenInstrImmediate(Opcode opcode, |
| 951 Register rs, | 951 Register rs, |
| 952 FPURegister ft, | 952 FPURegister ft, |
| 953 int32_t j) { | 953 int32_t j) { |
| 954 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); | 954 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); |
| 955 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | 955 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
| 956 | (j & kImm16Mask); | 956 | (j & kImm16Mask); |
| 957 emit(instr); | 957 emit(instr); |
| 958 } | 958 } |
| 959 | 959 |
| 960 | 960 |
| 961 void Assembler::GenInstrJump(Opcode opcode, | 961 void Assembler::GenInstrJump(Opcode opcode, |
| 962 uint32_t address) { | 962 uint32_t address) { |
| 963 BlockTrampolinePoolScope block_trampoline_pool(this); | 963 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 964 ASSERT(is_uint26(address)); | 964 DCHECK(is_uint26(address)); |
| 965 Instr instr = opcode | address; | 965 Instr instr = opcode | address; |
| 966 emit(instr); | 966 emit(instr); |
| 967 BlockTrampolinePoolFor(1); // For associated delay slot. | 967 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 968 } | 968 } |
| 969 | 969 |
| 970 | 970 |
| 971 // Returns the next free trampoline entry. | 971 // Returns the next free trampoline entry. |
| 972 int32_t Assembler::get_trampoline_entry(int32_t pos) { | 972 int32_t Assembler::get_trampoline_entry(int32_t pos) { |
| 973 int32_t trampoline_entry = kInvalidSlotPos; | 973 int32_t trampoline_entry = kInvalidSlotPos; |
| 974 if (!internal_trampoline_exception_) { | 974 if (!internal_trampoline_exception_) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 992 if (L->is_linked()) { | 992 if (L->is_linked()) { |
| 993 target_pos = L->pos(); // L's link. | 993 target_pos = L->pos(); // L's link. |
| 994 L->link_to(pc_offset()); | 994 L->link_to(pc_offset()); |
| 995 } else { | 995 } else { |
| 996 L->link_to(pc_offset()); | 996 L->link_to(pc_offset()); |
| 997 return kEndOfJumpChain; | 997 return kEndOfJumpChain; |
| 998 } | 998 } |
| 999 } | 999 } |
| 1000 | 1000 |
| 1001 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; | 1001 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos; |
| 1002 ASSERT((imm & 3) == 0); | 1002 DCHECK((imm & 3) == 0); |
| 1003 | 1003 |
| 1004 return imm; | 1004 return imm; |
| 1005 } | 1005 } |
| 1006 | 1006 |
| 1007 | 1007 |
| 1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 1009 int32_t target_pos; | 1009 int32_t target_pos; |
| 1010 if (L->is_bound()) { | 1010 if (L->is_bound()) { |
| 1011 target_pos = L->pos(); | 1011 target_pos = L->pos(); |
| 1012 } else { | 1012 } else { |
| 1013 if (L->is_linked()) { | 1013 if (L->is_linked()) { |
| 1014 target_pos = L->pos(); | 1014 target_pos = L->pos(); |
| 1015 L->link_to(pc_offset()); | 1015 L->link_to(pc_offset()); |
| 1016 } else { | 1016 } else { |
| 1017 L->link_to(pc_offset()); | 1017 L->link_to(pc_offset()); |
| 1018 if (!trampoline_emitted_) { | 1018 if (!trampoline_emitted_) { |
| 1019 unbound_labels_count_++; | 1019 unbound_labels_count_++; |
| 1020 next_buffer_check_ -= kTrampolineSlotsSize; | 1020 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1021 } | 1021 } |
| 1022 return kEndOfChain; | 1022 return kEndOfChain; |
| 1023 } | 1023 } |
| 1024 } | 1024 } |
| 1025 | 1025 |
| 1026 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); | 1026 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); |
| 1027 ASSERT((offset & 3) == 0); | 1027 DCHECK((offset & 3) == 0); |
| 1028 ASSERT(is_int16(offset >> 2)); | 1028 DCHECK(is_int16(offset >> 2)); |
| 1029 | 1029 |
| 1030 return offset; | 1030 return offset; |
| 1031 } | 1031 } |
| 1032 | 1032 |
| 1033 | 1033 |
| 1034 int32_t Assembler::branch_offset_compact(Label* L, | 1034 int32_t Assembler::branch_offset_compact(Label* L, |
| 1035 bool jump_elimination_allowed) { | 1035 bool jump_elimination_allowed) { |
| 1036 int32_t target_pos; | 1036 int32_t target_pos; |
| 1037 if (L->is_bound()) { | 1037 if (L->is_bound()) { |
| 1038 target_pos = L->pos(); | 1038 target_pos = L->pos(); |
| 1039 } else { | 1039 } else { |
| 1040 if (L->is_linked()) { | 1040 if (L->is_linked()) { |
| 1041 target_pos = L->pos(); | 1041 target_pos = L->pos(); |
| 1042 L->link_to(pc_offset()); | 1042 L->link_to(pc_offset()); |
| 1043 } else { | 1043 } else { |
| 1044 L->link_to(pc_offset()); | 1044 L->link_to(pc_offset()); |
| 1045 if (!trampoline_emitted_) { | 1045 if (!trampoline_emitted_) { |
| 1046 unbound_labels_count_++; | 1046 unbound_labels_count_++; |
| 1047 next_buffer_check_ -= kTrampolineSlotsSize; | 1047 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1048 } | 1048 } |
| 1049 return kEndOfChain; | 1049 return kEndOfChain; |
| 1050 } | 1050 } |
| 1051 } | 1051 } |
| 1052 | 1052 |
| 1053 int32_t offset = target_pos - pc_offset(); | 1053 int32_t offset = target_pos - pc_offset(); |
| 1054 ASSERT((offset & 3) == 0); | 1054 DCHECK((offset & 3) == 0); |
| 1055 ASSERT(is_int16(offset >> 2)); | 1055 DCHECK(is_int16(offset >> 2)); |
| 1056 | 1056 |
| 1057 return offset; | 1057 return offset; |
| 1058 } | 1058 } |
| 1059 | 1059 |
| 1060 | 1060 |
| 1061 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) { | 1061 int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) { |
| 1062 int32_t target_pos; | 1062 int32_t target_pos; |
| 1063 if (L->is_bound()) { | 1063 if (L->is_bound()) { |
| 1064 target_pos = L->pos(); | 1064 target_pos = L->pos(); |
| 1065 } else { | 1065 } else { |
| 1066 if (L->is_linked()) { | 1066 if (L->is_linked()) { |
| 1067 target_pos = L->pos(); | 1067 target_pos = L->pos(); |
| 1068 L->link_to(pc_offset()); | 1068 L->link_to(pc_offset()); |
| 1069 } else { | 1069 } else { |
| 1070 L->link_to(pc_offset()); | 1070 L->link_to(pc_offset()); |
| 1071 if (!trampoline_emitted_) { | 1071 if (!trampoline_emitted_) { |
| 1072 unbound_labels_count_++; | 1072 unbound_labels_count_++; |
| 1073 next_buffer_check_ -= kTrampolineSlotsSize; | 1073 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1074 } | 1074 } |
| 1075 return kEndOfChain; | 1075 return kEndOfChain; |
| 1076 } | 1076 } |
| 1077 } | 1077 } |
| 1078 | 1078 |
| 1079 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); | 1079 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); |
| 1080 ASSERT((offset & 3) == 0); | 1080 DCHECK((offset & 3) == 0); |
| 1081 ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. | 1081 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. |
| 1082 | 1082 |
| 1083 return offset; | 1083 return offset; |
| 1084 } | 1084 } |
| 1085 | 1085 |
| 1086 | 1086 |
| 1087 int32_t Assembler::branch_offset21_compact(Label* L, | 1087 int32_t Assembler::branch_offset21_compact(Label* L, |
| 1088 bool jump_elimination_allowed) { | 1088 bool jump_elimination_allowed) { |
| 1089 int32_t target_pos; | 1089 int32_t target_pos; |
| 1090 if (L->is_bound()) { | 1090 if (L->is_bound()) { |
| 1091 target_pos = L->pos(); | 1091 target_pos = L->pos(); |
| 1092 } else { | 1092 } else { |
| 1093 if (L->is_linked()) { | 1093 if (L->is_linked()) { |
| 1094 target_pos = L->pos(); | 1094 target_pos = L->pos(); |
| 1095 L->link_to(pc_offset()); | 1095 L->link_to(pc_offset()); |
| 1096 } else { | 1096 } else { |
| 1097 L->link_to(pc_offset()); | 1097 L->link_to(pc_offset()); |
| 1098 if (!trampoline_emitted_) { | 1098 if (!trampoline_emitted_) { |
| 1099 unbound_labels_count_++; | 1099 unbound_labels_count_++; |
| 1100 next_buffer_check_ -= kTrampolineSlotsSize; | 1100 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1101 } | 1101 } |
| 1102 return kEndOfChain; | 1102 return kEndOfChain; |
| 1103 } | 1103 } |
| 1104 } | 1104 } |
| 1105 | 1105 |
| 1106 int32_t offset = target_pos - pc_offset(); | 1106 int32_t offset = target_pos - pc_offset(); |
| 1107 ASSERT((offset & 3) == 0); | 1107 DCHECK((offset & 3) == 0); |
| 1108 ASSERT(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. | 1108 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width. |
| 1109 | 1109 |
| 1110 return offset; | 1110 return offset; |
| 1111 } | 1111 } |
| 1112 | 1112 |
| 1113 | 1113 |
| 1114 void Assembler::label_at_put(Label* L, int at_offset) { | 1114 void Assembler::label_at_put(Label* L, int at_offset) { |
| 1115 int target_pos; | 1115 int target_pos; |
| 1116 if (L->is_bound()) { | 1116 if (L->is_bound()) { |
| 1117 target_pos = L->pos(); | 1117 target_pos = L->pos(); |
| 1118 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 1118 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
| 1119 } else { | 1119 } else { |
| 1120 if (L->is_linked()) { | 1120 if (L->is_linked()) { |
| 1121 target_pos = L->pos(); // L's link. | 1121 target_pos = L->pos(); // L's link. |
| 1122 int32_t imm18 = target_pos - at_offset; | 1122 int32_t imm18 = target_pos - at_offset; |
| 1123 ASSERT((imm18 & 3) == 0); | 1123 DCHECK((imm18 & 3) == 0); |
| 1124 int32_t imm16 = imm18 >> 2; | 1124 int32_t imm16 = imm18 >> 2; |
| 1125 ASSERT(is_int16(imm16)); | 1125 DCHECK(is_int16(imm16)); |
| 1126 instr_at_put(at_offset, (imm16 & kImm16Mask)); | 1126 instr_at_put(at_offset, (imm16 & kImm16Mask)); |
| 1127 } else { | 1127 } else { |
| 1128 target_pos = kEndOfChain; | 1128 target_pos = kEndOfChain; |
| 1129 instr_at_put(at_offset, 0); | 1129 instr_at_put(at_offset, 0); |
| 1130 if (!trampoline_emitted_) { | 1130 if (!trampoline_emitted_) { |
| 1131 unbound_labels_count_++; | 1131 unbound_labels_count_++; |
| 1132 next_buffer_check_ -= kTrampolineSlotsSize; | 1132 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1133 } | 1133 } |
| 1134 } | 1134 } |
| 1135 L->link_to(at_offset); | 1135 L->link_to(at_offset); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1158 | 1158 |
| 1159 | 1159 |
| 1160 void Assembler::bgez(Register rs, int16_t offset) { | 1160 void Assembler::bgez(Register rs, int16_t offset) { |
| 1161 BlockTrampolinePoolScope block_trampoline_pool(this); | 1161 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1162 GenInstrImmediate(REGIMM, rs, BGEZ, offset); | 1162 GenInstrImmediate(REGIMM, rs, BGEZ, offset); |
| 1163 BlockTrampolinePoolFor(1); // For associated delay slot. | 1163 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1164 } | 1164 } |
| 1165 | 1165 |
| 1166 | 1166 |
| 1167 void Assembler::bgezc(Register rt, int16_t offset) { | 1167 void Assembler::bgezc(Register rt, int16_t offset) { |
| 1168 ASSERT(kArchVariant == kMips64r6); | 1168 DCHECK(kArchVariant == kMips64r6); |
| 1169 ASSERT(!(rt.is(zero_reg))); | 1169 DCHECK(!(rt.is(zero_reg))); |
| 1170 GenInstrImmediate(BLEZL, rt, rt, offset); | 1170 GenInstrImmediate(BLEZL, rt, rt, offset); |
| 1171 } | 1171 } |
| 1172 | 1172 |
| 1173 | 1173 |
| 1174 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { | 1174 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { |
| 1175 ASSERT(kArchVariant == kMips64r6); | 1175 DCHECK(kArchVariant == kMips64r6); |
| 1176 ASSERT(!(rs.is(zero_reg))); | 1176 DCHECK(!(rs.is(zero_reg))); |
| 1177 ASSERT(!(rt.is(zero_reg))); | 1177 DCHECK(!(rt.is(zero_reg))); |
| 1178 ASSERT(rs.code() != rt.code()); | 1178 DCHECK(rs.code() != rt.code()); |
| 1179 GenInstrImmediate(BLEZ, rs, rt, offset); | 1179 GenInstrImmediate(BLEZ, rs, rt, offset); |
| 1180 } | 1180 } |
| 1181 | 1181 |
| 1182 | 1182 |
| 1183 void Assembler::bgec(Register rs, Register rt, int16_t offset) { | 1183 void Assembler::bgec(Register rs, Register rt, int16_t offset) { |
| 1184 ASSERT(kArchVariant == kMips64r6); | 1184 DCHECK(kArchVariant == kMips64r6); |
| 1185 ASSERT(!(rs.is(zero_reg))); | 1185 DCHECK(!(rs.is(zero_reg))); |
| 1186 ASSERT(!(rt.is(zero_reg))); | 1186 DCHECK(!(rt.is(zero_reg))); |
| 1187 ASSERT(rs.code() != rt.code()); | 1187 DCHECK(rs.code() != rt.code()); |
| 1188 GenInstrImmediate(BLEZL, rs, rt, offset); | 1188 GenInstrImmediate(BLEZL, rs, rt, offset); |
| 1189 } | 1189 } |
| 1190 | 1190 |
| 1191 | 1191 |
| 1192 void Assembler::bgezal(Register rs, int16_t offset) { | 1192 void Assembler::bgezal(Register rs, int16_t offset) { |
| 1193 ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg)); | 1193 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg)); |
| 1194 BlockTrampolinePoolScope block_trampoline_pool(this); | 1194 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1195 positions_recorder()->WriteRecordedPositions(); | 1195 positions_recorder()->WriteRecordedPositions(); |
| 1196 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); | 1196 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); |
| 1197 BlockTrampolinePoolFor(1); // For associated delay slot. | 1197 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1198 } | 1198 } |
| 1199 | 1199 |
| 1200 | 1200 |
| 1201 void Assembler::bgtz(Register rs, int16_t offset) { | 1201 void Assembler::bgtz(Register rs, int16_t offset) { |
| 1202 BlockTrampolinePoolScope block_trampoline_pool(this); | 1202 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1203 GenInstrImmediate(BGTZ, rs, zero_reg, offset); | 1203 GenInstrImmediate(BGTZ, rs, zero_reg, offset); |
| 1204 BlockTrampolinePoolFor(1); // For associated delay slot. | 1204 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1205 } | 1205 } |
| 1206 | 1206 |
| 1207 | 1207 |
| 1208 void Assembler::bgtzc(Register rt, int16_t offset) { | 1208 void Assembler::bgtzc(Register rt, int16_t offset) { |
| 1209 ASSERT(kArchVariant == kMips64r6); | 1209 DCHECK(kArchVariant == kMips64r6); |
| 1210 ASSERT(!(rt.is(zero_reg))); | 1210 DCHECK(!(rt.is(zero_reg))); |
| 1211 GenInstrImmediate(BGTZL, zero_reg, rt, offset); | 1211 GenInstrImmediate(BGTZL, zero_reg, rt, offset); |
| 1212 } | 1212 } |
| 1213 | 1213 |
| 1214 | 1214 |
| 1215 void Assembler::blez(Register rs, int16_t offset) { | 1215 void Assembler::blez(Register rs, int16_t offset) { |
| 1216 BlockTrampolinePoolScope block_trampoline_pool(this); | 1216 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1217 GenInstrImmediate(BLEZ, rs, zero_reg, offset); | 1217 GenInstrImmediate(BLEZ, rs, zero_reg, offset); |
| 1218 BlockTrampolinePoolFor(1); // For associated delay slot. | 1218 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1219 } | 1219 } |
| 1220 | 1220 |
| 1221 | 1221 |
| 1222 void Assembler::blezc(Register rt, int16_t offset) { | 1222 void Assembler::blezc(Register rt, int16_t offset) { |
| 1223 ASSERT(kArchVariant == kMips64r6); | 1223 DCHECK(kArchVariant == kMips64r6); |
| 1224 ASSERT(!(rt.is(zero_reg))); | 1224 DCHECK(!(rt.is(zero_reg))); |
| 1225 GenInstrImmediate(BLEZL, zero_reg, rt, offset); | 1225 GenInstrImmediate(BLEZL, zero_reg, rt, offset); |
| 1226 } | 1226 } |
| 1227 | 1227 |
| 1228 | 1228 |
| 1229 void Assembler::bltzc(Register rt, int16_t offset) { | 1229 void Assembler::bltzc(Register rt, int16_t offset) { |
| 1230 ASSERT(kArchVariant == kMips64r6); | 1230 DCHECK(kArchVariant == kMips64r6); |
| 1231 ASSERT(!(rt.is(zero_reg))); | 1231 DCHECK(!(rt.is(zero_reg))); |
| 1232 GenInstrImmediate(BGTZL, rt, rt, offset); | 1232 GenInstrImmediate(BGTZL, rt, rt, offset); |
| 1233 } | 1233 } |
| 1234 | 1234 |
| 1235 | 1235 |
| 1236 void Assembler::bltuc(Register rs, Register rt, int16_t offset) { | 1236 void Assembler::bltuc(Register rs, Register rt, int16_t offset) { |
| 1237 ASSERT(kArchVariant == kMips64r6); | 1237 DCHECK(kArchVariant == kMips64r6); |
| 1238 ASSERT(!(rs.is(zero_reg))); | 1238 DCHECK(!(rs.is(zero_reg))); |
| 1239 ASSERT(!(rt.is(zero_reg))); | 1239 DCHECK(!(rt.is(zero_reg))); |
| 1240 ASSERT(rs.code() != rt.code()); | 1240 DCHECK(rs.code() != rt.code()); |
| 1241 GenInstrImmediate(BGTZ, rs, rt, offset); | 1241 GenInstrImmediate(BGTZ, rs, rt, offset); |
| 1242 } | 1242 } |
| 1243 | 1243 |
| 1244 | 1244 |
| 1245 void Assembler::bltc(Register rs, Register rt, int16_t offset) { | 1245 void Assembler::bltc(Register rs, Register rt, int16_t offset) { |
| 1246 ASSERT(kArchVariant == kMips64r6); | 1246 DCHECK(kArchVariant == kMips64r6); |
| 1247 ASSERT(!(rs.is(zero_reg))); | 1247 DCHECK(!(rs.is(zero_reg))); |
| 1248 ASSERT(!(rt.is(zero_reg))); | 1248 DCHECK(!(rt.is(zero_reg))); |
| 1249 ASSERT(rs.code() != rt.code()); | 1249 DCHECK(rs.code() != rt.code()); |
| 1250 GenInstrImmediate(BGTZL, rs, rt, offset); | 1250 GenInstrImmediate(BGTZL, rs, rt, offset); |
| 1251 } | 1251 } |
| 1252 | 1252 |
| 1253 | 1253 |
| 1254 void Assembler::bltz(Register rs, int16_t offset) { | 1254 void Assembler::bltz(Register rs, int16_t offset) { |
| 1255 BlockTrampolinePoolScope block_trampoline_pool(this); | 1255 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1256 GenInstrImmediate(REGIMM, rs, BLTZ, offset); | 1256 GenInstrImmediate(REGIMM, rs, BLTZ, offset); |
| 1257 BlockTrampolinePoolFor(1); // For associated delay slot. | 1257 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1258 } | 1258 } |
| 1259 | 1259 |
| 1260 | 1260 |
| 1261 void Assembler::bltzal(Register rs, int16_t offset) { | 1261 void Assembler::bltzal(Register rs, int16_t offset) { |
| 1262 ASSERT(kArchVariant != kMips64r6 || rs.is(zero_reg)); | 1262 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg)); |
| 1263 BlockTrampolinePoolScope block_trampoline_pool(this); | 1263 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1264 positions_recorder()->WriteRecordedPositions(); | 1264 positions_recorder()->WriteRecordedPositions(); |
| 1265 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); | 1265 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); |
| 1266 BlockTrampolinePoolFor(1); // For associated delay slot. | 1266 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1267 } | 1267 } |
| 1268 | 1268 |
| 1269 | 1269 |
| 1270 void Assembler::bne(Register rs, Register rt, int16_t offset) { | 1270 void Assembler::bne(Register rs, Register rt, int16_t offset) { |
| 1271 BlockTrampolinePoolScope block_trampoline_pool(this); | 1271 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1272 GenInstrImmediate(BNE, rs, rt, offset); | 1272 GenInstrImmediate(BNE, rs, rt, offset); |
| 1273 BlockTrampolinePoolFor(1); // For associated delay slot. | 1273 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1274 } | 1274 } |
| 1275 | 1275 |
| 1276 | 1276 |
| 1277 void Assembler::bovc(Register rs, Register rt, int16_t offset) { | 1277 void Assembler::bovc(Register rs, Register rt, int16_t offset) { |
| 1278 ASSERT(kArchVariant == kMips64r6); | 1278 DCHECK(kArchVariant == kMips64r6); |
| 1279 ASSERT(!(rs.is(zero_reg))); | 1279 DCHECK(!(rs.is(zero_reg))); |
| 1280 ASSERT(rs.code() >= rt.code()); | 1280 DCHECK(rs.code() >= rt.code()); |
| 1281 GenInstrImmediate(ADDI, rs, rt, offset); | 1281 GenInstrImmediate(ADDI, rs, rt, offset); |
| 1282 } | 1282 } |
| 1283 | 1283 |
| 1284 | 1284 |
| 1285 void Assembler::bnvc(Register rs, Register rt, int16_t offset) { | 1285 void Assembler::bnvc(Register rs, Register rt, int16_t offset) { |
| 1286 ASSERT(kArchVariant == kMips64r6); | 1286 DCHECK(kArchVariant == kMips64r6); |
| 1287 ASSERT(!(rs.is(zero_reg))); | 1287 DCHECK(!(rs.is(zero_reg))); |
| 1288 ASSERT(rs.code() >= rt.code()); | 1288 DCHECK(rs.code() >= rt.code()); |
| 1289 GenInstrImmediate(DADDI, rs, rt, offset); | 1289 GenInstrImmediate(DADDI, rs, rt, offset); |
| 1290 } | 1290 } |
| 1291 | 1291 |
| 1292 | 1292 |
| 1293 void Assembler::blezalc(Register rt, int16_t offset) { | 1293 void Assembler::blezalc(Register rt, int16_t offset) { |
| 1294 ASSERT(kArchVariant == kMips64r6); | 1294 DCHECK(kArchVariant == kMips64r6); |
| 1295 ASSERT(!(rt.is(zero_reg))); | 1295 DCHECK(!(rt.is(zero_reg))); |
| 1296 GenInstrImmediate(BLEZ, zero_reg, rt, offset); | 1296 GenInstrImmediate(BLEZ, zero_reg, rt, offset); |
| 1297 } | 1297 } |
| 1298 | 1298 |
| 1299 | 1299 |
| 1300 void Assembler::bgezalc(Register rt, int16_t offset) { | 1300 void Assembler::bgezalc(Register rt, int16_t offset) { |
| 1301 ASSERT(kArchVariant == kMips64r6); | 1301 DCHECK(kArchVariant == kMips64r6); |
| 1302 ASSERT(!(rt.is(zero_reg))); | 1302 DCHECK(!(rt.is(zero_reg))); |
| 1303 GenInstrImmediate(BLEZ, rt, rt, offset); | 1303 GenInstrImmediate(BLEZ, rt, rt, offset); |
| 1304 } | 1304 } |
| 1305 | 1305 |
| 1306 | 1306 |
| 1307 void Assembler::bgezall(Register rs, int16_t offset) { | 1307 void Assembler::bgezall(Register rs, int16_t offset) { |
| 1308 ASSERT(kArchVariant == kMips64r6); | 1308 DCHECK(kArchVariant == kMips64r6); |
| 1309 ASSERT(!(rs.is(zero_reg))); | 1309 DCHECK(!(rs.is(zero_reg))); |
| 1310 GenInstrImmediate(REGIMM, rs, BGEZALL, offset); | 1310 GenInstrImmediate(REGIMM, rs, BGEZALL, offset); |
| 1311 } | 1311 } |
| 1312 | 1312 |
| 1313 | 1313 |
| 1314 void Assembler::bltzalc(Register rt, int16_t offset) { | 1314 void Assembler::bltzalc(Register rt, int16_t offset) { |
| 1315 ASSERT(kArchVariant == kMips64r6); | 1315 DCHECK(kArchVariant == kMips64r6); |
| 1316 ASSERT(!(rt.is(zero_reg))); | 1316 DCHECK(!(rt.is(zero_reg))); |
| 1317 GenInstrImmediate(BGTZ, rt, rt, offset); | 1317 GenInstrImmediate(BGTZ, rt, rt, offset); |
| 1318 } | 1318 } |
| 1319 | 1319 |
| 1320 | 1320 |
| 1321 void Assembler::bgtzalc(Register rt, int16_t offset) { | 1321 void Assembler::bgtzalc(Register rt, int16_t offset) { |
| 1322 ASSERT(kArchVariant == kMips64r6); | 1322 DCHECK(kArchVariant == kMips64r6); |
| 1323 ASSERT(!(rt.is(zero_reg))); | 1323 DCHECK(!(rt.is(zero_reg))); |
| 1324 GenInstrImmediate(BGTZ, zero_reg, rt, offset); | 1324 GenInstrImmediate(BGTZ, zero_reg, rt, offset); |
| 1325 } | 1325 } |
| 1326 | 1326 |
| 1327 | 1327 |
| 1328 void Assembler::beqzalc(Register rt, int16_t offset) { | 1328 void Assembler::beqzalc(Register rt, int16_t offset) { |
| 1329 ASSERT(kArchVariant == kMips64r6); | 1329 DCHECK(kArchVariant == kMips64r6); |
| 1330 ASSERT(!(rt.is(zero_reg))); | 1330 DCHECK(!(rt.is(zero_reg))); |
| 1331 GenInstrImmediate(ADDI, zero_reg, rt, offset); | 1331 GenInstrImmediate(ADDI, zero_reg, rt, offset); |
| 1332 } | 1332 } |
| 1333 | 1333 |
| 1334 | 1334 |
| 1335 void Assembler::bnezalc(Register rt, int16_t offset) { | 1335 void Assembler::bnezalc(Register rt, int16_t offset) { |
| 1336 ASSERT(kArchVariant == kMips64r6); | 1336 DCHECK(kArchVariant == kMips64r6); |
| 1337 ASSERT(!(rt.is(zero_reg))); | 1337 DCHECK(!(rt.is(zero_reg))); |
| 1338 GenInstrImmediate(DADDI, zero_reg, rt, offset); | 1338 GenInstrImmediate(DADDI, zero_reg, rt, offset); |
| 1339 } | 1339 } |
| 1340 | 1340 |
| 1341 | 1341 |
| 1342 void Assembler::beqc(Register rs, Register rt, int16_t offset) { | 1342 void Assembler::beqc(Register rs, Register rt, int16_t offset) { |
| 1343 ASSERT(kArchVariant == kMips64r6); | 1343 DCHECK(kArchVariant == kMips64r6); |
| 1344 ASSERT(rs.code() < rt.code()); | 1344 DCHECK(rs.code() < rt.code()); |
| 1345 GenInstrImmediate(ADDI, rs, rt, offset); | 1345 GenInstrImmediate(ADDI, rs, rt, offset); |
| 1346 } | 1346 } |
| 1347 | 1347 |
| 1348 | 1348 |
| 1349 void Assembler::beqzc(Register rs, int32_t offset) { | 1349 void Assembler::beqzc(Register rs, int32_t offset) { |
| 1350 ASSERT(kArchVariant == kMips64r6); | 1350 DCHECK(kArchVariant == kMips64r6); |
| 1351 ASSERT(!(rs.is(zero_reg))); | 1351 DCHECK(!(rs.is(zero_reg))); |
| 1352 Instr instr = BEQZC | (rs.code() << kRsShift) | offset; | 1352 Instr instr = BEQZC | (rs.code() << kRsShift) | offset; |
| 1353 emit(instr); | 1353 emit(instr); |
| 1354 } | 1354 } |
| 1355 | 1355 |
| 1356 | 1356 |
| 1357 void Assembler::bnec(Register rs, Register rt, int16_t offset) { | 1357 void Assembler::bnec(Register rs, Register rt, int16_t offset) { |
| 1358 ASSERT(kArchVariant == kMips64r6); | 1358 DCHECK(kArchVariant == kMips64r6); |
| 1359 ASSERT(rs.code() < rt.code()); | 1359 DCHECK(rs.code() < rt.code()); |
| 1360 GenInstrImmediate(DADDI, rs, rt, offset); | 1360 GenInstrImmediate(DADDI, rs, rt, offset); |
| 1361 } | 1361 } |
| 1362 | 1362 |
| 1363 | 1363 |
| 1364 void Assembler::bnezc(Register rs, int32_t offset) { | 1364 void Assembler::bnezc(Register rs, int32_t offset) { |
| 1365 ASSERT(kArchVariant == kMips64r6); | 1365 DCHECK(kArchVariant == kMips64r6); |
| 1366 ASSERT(!(rs.is(zero_reg))); | 1366 DCHECK(!(rs.is(zero_reg))); |
| 1367 Instr instr = BNEZC | (rs.code() << kRsShift) | offset; | 1367 Instr instr = BNEZC | (rs.code() << kRsShift) | offset; |
| 1368 emit(instr); | 1368 emit(instr); |
| 1369 } | 1369 } |
| 1370 | 1370 |
| 1371 | 1371 |
| 1372 void Assembler::j(int64_t target) { | 1372 void Assembler::j(int64_t target) { |
| 1373 #if DEBUG | 1373 #if DEBUG |
| 1374 // Get pc of delay slot. | 1374 // Get pc of delay slot. |
| 1375 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); | 1375 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); |
| 1376 bool in_range = (ipc ^ static_cast<uint64_t>(target) >> | 1376 bool in_range = (ipc ^ static_cast<uint64_t>(target) >> |
| 1377 (kImm26Bits + kImmFieldShift)) == 0; | 1377 (kImm26Bits + kImmFieldShift)) == 0; |
| 1378 ASSERT(in_range && ((target & 3) == 0)); | 1378 DCHECK(in_range && ((target & 3) == 0)); |
| 1379 #endif | 1379 #endif |
| 1380 GenInstrJump(J, target >> 2); | 1380 GenInstrJump(J, target >> 2); |
| 1381 } | 1381 } |
| 1382 | 1382 |
| 1383 | 1383 |
| 1384 void Assembler::jr(Register rs) { | 1384 void Assembler::jr(Register rs) { |
| 1385 if (kArchVariant != kMips64r6) { | 1385 if (kArchVariant != kMips64r6) { |
| 1386 BlockTrampolinePoolScope block_trampoline_pool(this); | 1386 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1387 if (rs.is(ra)) { | 1387 if (rs.is(ra)) { |
| 1388 positions_recorder()->WriteRecordedPositions(); | 1388 positions_recorder()->WriteRecordedPositions(); |
| 1389 } | 1389 } |
| 1390 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); | 1390 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); |
| 1391 BlockTrampolinePoolFor(1); // For associated delay slot. | 1391 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 1392 } else { | 1392 } else { |
| 1393 jalr(rs, zero_reg); | 1393 jalr(rs, zero_reg); |
| 1394 } | 1394 } |
| 1395 } | 1395 } |
| 1396 | 1396 |
| 1397 | 1397 |
| 1398 void Assembler::jal(int64_t target) { | 1398 void Assembler::jal(int64_t target) { |
| 1399 #ifdef DEBUG | 1399 #ifdef DEBUG |
| 1400 // Get pc of delay slot. | 1400 // Get pc of delay slot. |
| 1401 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); | 1401 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize); |
| 1402 bool in_range = (ipc ^ static_cast<uint64_t>(target) >> | 1402 bool in_range = (ipc ^ static_cast<uint64_t>(target) >> |
| 1403 (kImm26Bits + kImmFieldShift)) == 0; | 1403 (kImm26Bits + kImmFieldShift)) == 0; |
| 1404 ASSERT(in_range && ((target & 3) == 0)); | 1404 DCHECK(in_range && ((target & 3) == 0)); |
| 1405 #endif | 1405 #endif |
| 1406 positions_recorder()->WriteRecordedPositions(); | 1406 positions_recorder()->WriteRecordedPositions(); |
| 1407 GenInstrJump(JAL, target >> 2); | 1407 GenInstrJump(JAL, target >> 2); |
| 1408 } | 1408 } |
| 1409 | 1409 |
| 1410 | 1410 |
| 1411 void Assembler::jalr(Register rs, Register rd) { | 1411 void Assembler::jalr(Register rs, Register rd) { |
| 1412 BlockTrampolinePoolScope block_trampoline_pool(this); | 1412 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1413 positions_recorder()->WriteRecordedPositions(); | 1413 positions_recorder()->WriteRecordedPositions(); |
| 1414 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); | 1414 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1464 void Assembler::mul(Register rd, Register rs, Register rt) { | 1464 void Assembler::mul(Register rd, Register rs, Register rt) { |
| 1465 if (kArchVariant == kMips64r6) { | 1465 if (kArchVariant == kMips64r6) { |
| 1466 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); | 1466 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); |
| 1467 } else { | 1467 } else { |
| 1468 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); | 1468 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); |
| 1469 } | 1469 } |
| 1470 } | 1470 } |
| 1471 | 1471 |
| 1472 | 1472 |
| 1473 void Assembler::muh(Register rd, Register rs, Register rt) { | 1473 void Assembler::muh(Register rd, Register rs, Register rt) { |
| 1474 ASSERT(kArchVariant == kMips64r6); | 1474 DCHECK(kArchVariant == kMips64r6); |
| 1475 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); | 1475 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); |
| 1476 } | 1476 } |
| 1477 | 1477 |
| 1478 | 1478 |
| 1479 void Assembler::mulu(Register rd, Register rs, Register rt) { | 1479 void Assembler::mulu(Register rd, Register rs, Register rt) { |
| 1480 ASSERT(kArchVariant == kMips64r6); | 1480 DCHECK(kArchVariant == kMips64r6); |
| 1481 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); | 1481 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); |
| 1482 } | 1482 } |
| 1483 | 1483 |
| 1484 | 1484 |
| 1485 void Assembler::muhu(Register rd, Register rs, Register rt) { | 1485 void Assembler::muhu(Register rd, Register rs, Register rt) { |
| 1486 ASSERT(kArchVariant == kMips64r6); | 1486 DCHECK(kArchVariant == kMips64r6); |
| 1487 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); | 1487 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); |
| 1488 } | 1488 } |
| 1489 | 1489 |
| 1490 | 1490 |
| 1491 void Assembler::dmul(Register rd, Register rs, Register rt) { | 1491 void Assembler::dmul(Register rd, Register rs, Register rt) { |
| 1492 ASSERT(kArchVariant == kMips64r6); | 1492 DCHECK(kArchVariant == kMips64r6); |
| 1493 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH); | 1493 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH); |
| 1494 } | 1494 } |
| 1495 | 1495 |
| 1496 | 1496 |
| 1497 void Assembler::dmuh(Register rd, Register rs, Register rt) { | 1497 void Assembler::dmuh(Register rd, Register rs, Register rt) { |
| 1498 ASSERT(kArchVariant == kMips64r6); | 1498 DCHECK(kArchVariant == kMips64r6); |
| 1499 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH); | 1499 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH); |
| 1500 } | 1500 } |
| 1501 | 1501 |
| 1502 | 1502 |
| 1503 void Assembler::dmulu(Register rd, Register rs, Register rt) { | 1503 void Assembler::dmulu(Register rd, Register rs, Register rt) { |
| 1504 ASSERT(kArchVariant == kMips64r6); | 1504 DCHECK(kArchVariant == kMips64r6); |
| 1505 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U); | 1505 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U); |
| 1506 } | 1506 } |
| 1507 | 1507 |
| 1508 | 1508 |
| 1509 void Assembler::dmuhu(Register rd, Register rs, Register rt) { | 1509 void Assembler::dmuhu(Register rd, Register rs, Register rt) { |
| 1510 ASSERT(kArchVariant == kMips64r6); | 1510 DCHECK(kArchVariant == kMips64r6); |
| 1511 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U); | 1511 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U); |
| 1512 } | 1512 } |
| 1513 | 1513 |
| 1514 | 1514 |
| 1515 void Assembler::mult(Register rs, Register rt) { | 1515 void Assembler::mult(Register rs, Register rt) { |
| 1516 ASSERT(kArchVariant != kMips64r6); | 1516 DCHECK(kArchVariant != kMips64r6); |
| 1517 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); | 1517 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); |
| 1518 } | 1518 } |
| 1519 | 1519 |
| 1520 | 1520 |
| 1521 void Assembler::multu(Register rs, Register rt) { | 1521 void Assembler::multu(Register rs, Register rt) { |
| 1522 ASSERT(kArchVariant != kMips64r6); | 1522 DCHECK(kArchVariant != kMips64r6); |
| 1523 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); | 1523 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); |
| 1524 } | 1524 } |
| 1525 | 1525 |
| 1526 | 1526 |
| 1527 void Assembler::daddiu(Register rd, Register rs, int32_t j) { | 1527 void Assembler::daddiu(Register rd, Register rs, int32_t j) { |
| 1528 GenInstrImmediate(DADDIU, rs, rd, j); | 1528 GenInstrImmediate(DADDIU, rs, rd, j); |
| 1529 } | 1529 } |
| 1530 | 1530 |
| 1531 | 1531 |
| 1532 void Assembler::div(Register rs, Register rt) { | 1532 void Assembler::div(Register rs, Register rt) { |
| 1533 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); | 1533 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); |
| 1534 } | 1534 } |
| 1535 | 1535 |
| 1536 | 1536 |
| 1537 void Assembler::div(Register rd, Register rs, Register rt) { | 1537 void Assembler::div(Register rd, Register rs, Register rt) { |
| 1538 ASSERT(kArchVariant == kMips64r6); | 1538 DCHECK(kArchVariant == kMips64r6); |
| 1539 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); | 1539 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); |
| 1540 } | 1540 } |
| 1541 | 1541 |
| 1542 | 1542 |
| 1543 void Assembler::mod(Register rd, Register rs, Register rt) { | 1543 void Assembler::mod(Register rd, Register rs, Register rt) { |
| 1544 ASSERT(kArchVariant == kMips64r6); | 1544 DCHECK(kArchVariant == kMips64r6); |
| 1545 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); | 1545 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); |
| 1546 } | 1546 } |
| 1547 | 1547 |
| 1548 | 1548 |
| 1549 void Assembler::divu(Register rs, Register rt) { | 1549 void Assembler::divu(Register rs, Register rt) { |
| 1550 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); | 1550 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); |
| 1551 } | 1551 } |
| 1552 | 1552 |
| 1553 | 1553 |
| 1554 void Assembler::divu(Register rd, Register rs, Register rt) { | 1554 void Assembler::divu(Register rd, Register rs, Register rt) { |
| 1555 ASSERT(kArchVariant == kMips64r6); | 1555 DCHECK(kArchVariant == kMips64r6); |
| 1556 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); | 1556 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); |
| 1557 } | 1557 } |
| 1558 | 1558 |
| 1559 | 1559 |
| 1560 void Assembler::modu(Register rd, Register rs, Register rt) { | 1560 void Assembler::modu(Register rd, Register rs, Register rt) { |
| 1561 ASSERT(kArchVariant == kMips64r6); | 1561 DCHECK(kArchVariant == kMips64r6); |
| 1562 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); | 1562 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); |
| 1563 } | 1563 } |
| 1564 | 1564 |
| 1565 | 1565 |
| 1566 void Assembler::daddu(Register rd, Register rs, Register rt) { | 1566 void Assembler::daddu(Register rd, Register rs, Register rt) { |
| 1567 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU); | 1567 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU); |
| 1568 } | 1568 } |
| 1569 | 1569 |
| 1570 | 1570 |
| 1571 void Assembler::dsubu(Register rd, Register rs, Register rt) { | 1571 void Assembler::dsubu(Register rd, Register rs, Register rt) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1582 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU); | 1582 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU); |
| 1583 } | 1583 } |
| 1584 | 1584 |
| 1585 | 1585 |
| 1586 void Assembler::ddiv(Register rs, Register rt) { | 1586 void Assembler::ddiv(Register rs, Register rt) { |
| 1587 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV); | 1587 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV); |
| 1588 } | 1588 } |
| 1589 | 1589 |
| 1590 | 1590 |
| 1591 void Assembler::ddiv(Register rd, Register rs, Register rt) { | 1591 void Assembler::ddiv(Register rd, Register rs, Register rt) { |
| 1592 ASSERT(kArchVariant == kMips64r6); | 1592 DCHECK(kArchVariant == kMips64r6); |
| 1593 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD); | 1593 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD); |
| 1594 } | 1594 } |
| 1595 | 1595 |
| 1596 | 1596 |
| 1597 void Assembler::dmod(Register rd, Register rs, Register rt) { | 1597 void Assembler::dmod(Register rd, Register rs, Register rt) { |
| 1598 ASSERT(kArchVariant == kMips64r6); | 1598 DCHECK(kArchVariant == kMips64r6); |
| 1599 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD); | 1599 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD); |
| 1600 } | 1600 } |
| 1601 | 1601 |
| 1602 | 1602 |
| 1603 void Assembler::ddivu(Register rs, Register rt) { | 1603 void Assembler::ddivu(Register rs, Register rt) { |
| 1604 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU); | 1604 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU); |
| 1605 } | 1605 } |
| 1606 | 1606 |
| 1607 | 1607 |
| 1608 void Assembler::ddivu(Register rd, Register rs, Register rt) { | 1608 void Assembler::ddivu(Register rd, Register rs, Register rt) { |
| 1609 ASSERT(kArchVariant == kMips64r6); | 1609 DCHECK(kArchVariant == kMips64r6); |
| 1610 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U); | 1610 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U); |
| 1611 } | 1611 } |
| 1612 | 1612 |
| 1613 | 1613 |
| 1614 void Assembler::dmodu(Register rd, Register rs, Register rt) { | 1614 void Assembler::dmodu(Register rd, Register rs, Register rt) { |
| 1615 ASSERT(kArchVariant == kMips64r6); | 1615 DCHECK(kArchVariant == kMips64r6); |
| 1616 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U); | 1616 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U); |
| 1617 } | 1617 } |
| 1618 | 1618 |
| 1619 | 1619 |
| 1620 // Logical. | 1620 // Logical. |
| 1621 | 1621 |
| 1622 void Assembler::and_(Register rd, Register rs, Register rt) { | 1622 void Assembler::and_(Register rd, Register rs, Register rt) { |
| 1623 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); | 1623 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); |
| 1624 } | 1624 } |
| 1625 | 1625 |
| 1626 | 1626 |
| 1627 void Assembler::andi(Register rt, Register rs, int32_t j) { | 1627 void Assembler::andi(Register rt, Register rs, int32_t j) { |
| 1628 ASSERT(is_uint16(j)); | 1628 DCHECK(is_uint16(j)); |
| 1629 GenInstrImmediate(ANDI, rs, rt, j); | 1629 GenInstrImmediate(ANDI, rs, rt, j); |
| 1630 } | 1630 } |
| 1631 | 1631 |
| 1632 | 1632 |
| 1633 void Assembler::or_(Register rd, Register rs, Register rt) { | 1633 void Assembler::or_(Register rd, Register rs, Register rt) { |
| 1634 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); | 1634 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); |
| 1635 } | 1635 } |
| 1636 | 1636 |
| 1637 | 1637 |
| 1638 void Assembler::ori(Register rt, Register rs, int32_t j) { | 1638 void Assembler::ori(Register rt, Register rs, int32_t j) { |
| 1639 ASSERT(is_uint16(j)); | 1639 DCHECK(is_uint16(j)); |
| 1640 GenInstrImmediate(ORI, rs, rt, j); | 1640 GenInstrImmediate(ORI, rs, rt, j); |
| 1641 } | 1641 } |
| 1642 | 1642 |
| 1643 | 1643 |
| 1644 void Assembler::xor_(Register rd, Register rs, Register rt) { | 1644 void Assembler::xor_(Register rd, Register rs, Register rt) { |
| 1645 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); | 1645 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); |
| 1646 } | 1646 } |
| 1647 | 1647 |
| 1648 | 1648 |
| 1649 void Assembler::xori(Register rt, Register rs, int32_t j) { | 1649 void Assembler::xori(Register rt, Register rs, int32_t j) { |
| 1650 ASSERT(is_uint16(j)); | 1650 DCHECK(is_uint16(j)); |
| 1651 GenInstrImmediate(XORI, rs, rt, j); | 1651 GenInstrImmediate(XORI, rs, rt, j); |
| 1652 } | 1652 } |
| 1653 | 1653 |
| 1654 | 1654 |
| 1655 void Assembler::nor(Register rd, Register rs, Register rt) { | 1655 void Assembler::nor(Register rd, Register rs, Register rt) { |
| 1656 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); | 1656 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); |
| 1657 } | 1657 } |
| 1658 | 1658 |
| 1659 | 1659 |
| 1660 // Shifts. | 1660 // Shifts. |
| 1661 void Assembler::sll(Register rd, | 1661 void Assembler::sll(Register rd, |
| 1662 Register rt, | 1662 Register rt, |
| 1663 uint16_t sa, | 1663 uint16_t sa, |
| 1664 bool coming_from_nop) { | 1664 bool coming_from_nop) { |
| 1665 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be | 1665 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be |
| 1666 // generated using the sll instruction. They must be generated using | 1666 // generated using the sll instruction. They must be generated using |
| 1667 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo | 1667 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo |
| 1668 // instructions. | 1668 // instructions. |
| 1669 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); | 1669 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); |
| 1670 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); | 1670 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); |
| 1671 } | 1671 } |
| 1672 | 1672 |
| 1673 | 1673 |
| 1674 void Assembler::sllv(Register rd, Register rt, Register rs) { | 1674 void Assembler::sllv(Register rd, Register rt, Register rs) { |
| 1675 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); | 1675 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); |
| 1676 } | 1676 } |
| 1677 | 1677 |
| 1678 | 1678 |
| 1679 void Assembler::srl(Register rd, Register rt, uint16_t sa) { | 1679 void Assembler::srl(Register rd, Register rt, uint16_t sa) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1691 } | 1691 } |
| 1692 | 1692 |
| 1693 | 1693 |
| 1694 void Assembler::srav(Register rd, Register rt, Register rs) { | 1694 void Assembler::srav(Register rd, Register rt, Register rs) { |
| 1695 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); | 1695 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); |
| 1696 } | 1696 } |
| 1697 | 1697 |
| 1698 | 1698 |
| 1699 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { | 1699 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { |
| 1700 // Should be called via MacroAssembler::Ror. | 1700 // Should be called via MacroAssembler::Ror. |
| 1701 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); | 1701 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); |
| 1702 ASSERT(kArchVariant == kMips64r2); | 1702 DCHECK(kArchVariant == kMips64r2); |
| 1703 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) | 1703 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
| 1704 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; | 1704 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; |
| 1705 emit(instr); | 1705 emit(instr); |
| 1706 } | 1706 } |
| 1707 | 1707 |
| 1708 | 1708 |
| 1709 void Assembler::rotrv(Register rd, Register rt, Register rs) { | 1709 void Assembler::rotrv(Register rd, Register rt, Register rs) { |
| 1710 // Should be called via MacroAssembler::Ror. | 1710 // Should be called via MacroAssembler::Ror. |
| 1711 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); | 1711 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); |
| 1712 ASSERT(kArchVariant == kMips64r2); | 1712 DCHECK(kArchVariant == kMips64r2); |
| 1713 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 1713 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 1714 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; | 1714 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; |
| 1715 emit(instr); | 1715 emit(instr); |
| 1716 } | 1716 } |
| 1717 | 1717 |
| 1718 | 1718 |
| 1719 void Assembler::dsll(Register rd, Register rt, uint16_t sa) { | 1719 void Assembler::dsll(Register rd, Register rt, uint16_t sa) { |
| 1720 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL); | 1720 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL); |
| 1721 } | 1721 } |
| 1722 | 1722 |
| 1723 | 1723 |
| 1724 void Assembler::dsllv(Register rd, Register rt, Register rs) { | 1724 void Assembler::dsllv(Register rd, Register rt, Register rs) { |
| 1725 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV); | 1725 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV); |
| 1726 } | 1726 } |
| 1727 | 1727 |
| 1728 | 1728 |
| 1729 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) { | 1729 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) { |
| 1730 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL); | 1730 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL); |
| 1731 } | 1731 } |
| 1732 | 1732 |
| 1733 | 1733 |
| 1734 void Assembler::dsrlv(Register rd, Register rt, Register rs) { | 1734 void Assembler::dsrlv(Register rd, Register rt, Register rs) { |
| 1735 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV); | 1735 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV); |
| 1736 } | 1736 } |
| 1737 | 1737 |
| 1738 | 1738 |
| 1739 void Assembler::drotr(Register rd, Register rt, uint16_t sa) { | 1739 void Assembler::drotr(Register rd, Register rt, uint16_t sa) { |
| 1740 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); | 1740 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); |
| 1741 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) | 1741 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
| 1742 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL; | 1742 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL; |
| 1743 emit(instr); | 1743 emit(instr); |
| 1744 } | 1744 } |
| 1745 | 1745 |
| 1746 | 1746 |
| 1747 void Assembler::drotrv(Register rd, Register rt, Register rs) { | 1747 void Assembler::drotrv(Register rd, Register rt, Register rs) { |
| 1748 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); | 1748 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); |
| 1749 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 1749 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 1750 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV; | 1750 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV; |
| 1751 emit(instr); | 1751 emit(instr); |
| 1752 } | 1752 } |
| 1753 | 1753 |
| 1754 | 1754 |
| 1755 void Assembler::dsra(Register rd, Register rt, uint16_t sa) { | 1755 void Assembler::dsra(Register rd, Register rt, uint16_t sa) { |
| 1756 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA); | 1756 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA); |
| 1757 } | 1757 } |
| 1758 | 1758 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1774 | 1774 |
| 1775 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) { | 1775 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) { |
| 1776 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32); | 1776 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32); |
| 1777 } | 1777 } |
| 1778 | 1778 |
| 1779 | 1779 |
| 1780 // ------------Memory-instructions------------- | 1780 // ------------Memory-instructions------------- |
| 1781 | 1781 |
| 1782 // Helper for base-reg + offset, when offset is larger than int16. | 1782 // Helper for base-reg + offset, when offset is larger than int16. |
| 1783 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { | 1783 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { |
| 1784 ASSERT(!src.rm().is(at)); | 1784 DCHECK(!src.rm().is(at)); |
| 1785 ASSERT(is_int32(src.offset_)); | 1785 DCHECK(is_int32(src.offset_)); |
| 1786 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask); | 1786 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask); |
| 1787 dsll(at, at, kLuiShift); | 1787 dsll(at, at, kLuiShift); |
| 1788 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. | 1788 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. |
| 1789 daddu(at, at, src.rm()); // Add base register. | 1789 daddu(at, at, src.rm()); // Add base register. |
| 1790 } | 1790 } |
| 1791 | 1791 |
| 1792 | 1792 |
| 1793 void Assembler::lb(Register rd, const MemOperand& rs) { | 1793 void Assembler::lb(Register rd, const MemOperand& rs) { |
| 1794 if (is_int16(rs.offset_)) { | 1794 if (is_int16(rs.offset_)) { |
| 1795 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); | 1795 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1894 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); | 1894 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
| 1895 } | 1895 } |
| 1896 | 1896 |
| 1897 | 1897 |
| 1898 void Assembler::swr(Register rd, const MemOperand& rs) { | 1898 void Assembler::swr(Register rd, const MemOperand& rs) { |
| 1899 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); | 1899 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); |
| 1900 } | 1900 } |
| 1901 | 1901 |
| 1902 | 1902 |
| 1903 void Assembler::lui(Register rd, int32_t j) { | 1903 void Assembler::lui(Register rd, int32_t j) { |
| 1904 ASSERT(is_uint16(j)); | 1904 DCHECK(is_uint16(j)); |
| 1905 GenInstrImmediate(LUI, zero_reg, rd, j); | 1905 GenInstrImmediate(LUI, zero_reg, rd, j); |
| 1906 } | 1906 } |
| 1907 | 1907 |
| 1908 | 1908 |
| 1909 void Assembler::aui(Register rs, Register rt, int32_t j) { | 1909 void Assembler::aui(Register rs, Register rt, int32_t j) { |
| 1910 // This instruction uses same opcode as 'lui'. The difference in encoding is | 1910 // This instruction uses same opcode as 'lui'. The difference in encoding is |
| 1911 // 'lui' has zero reg. for rs field. | 1911 // 'lui' has zero reg. for rs field. |
| 1912 ASSERT(is_uint16(j)); | 1912 DCHECK(is_uint16(j)); |
| 1913 GenInstrImmediate(LUI, rs, rt, j); | 1913 GenInstrImmediate(LUI, rs, rt, j); |
| 1914 } | 1914 } |
| 1915 | 1915 |
| 1916 | 1916 |
| 1917 void Assembler::daui(Register rs, Register rt, int32_t j) { | 1917 void Assembler::daui(Register rs, Register rt, int32_t j) { |
| 1918 ASSERT(is_uint16(j)); | 1918 DCHECK(is_uint16(j)); |
| 1919 GenInstrImmediate(DAUI, rs, rt, j); | 1919 GenInstrImmediate(DAUI, rs, rt, j); |
| 1920 } | 1920 } |
| 1921 | 1921 |
| 1922 | 1922 |
| 1923 void Assembler::dahi(Register rs, int32_t j) { | 1923 void Assembler::dahi(Register rs, int32_t j) { |
| 1924 ASSERT(is_uint16(j)); | 1924 DCHECK(is_uint16(j)); |
| 1925 GenInstrImmediate(REGIMM, rs, DAHI, j); | 1925 GenInstrImmediate(REGIMM, rs, DAHI, j); |
| 1926 } | 1926 } |
| 1927 | 1927 |
| 1928 | 1928 |
| 1929 void Assembler::dati(Register rs, int32_t j) { | 1929 void Assembler::dati(Register rs, int32_t j) { |
| 1930 ASSERT(is_uint16(j)); | 1930 DCHECK(is_uint16(j)); |
| 1931 GenInstrImmediate(REGIMM, rs, DATI, j); | 1931 GenInstrImmediate(REGIMM, rs, DATI, j); |
| 1932 } | 1932 } |
| 1933 | 1933 |
| 1934 | 1934 |
| 1935 void Assembler::ldl(Register rd, const MemOperand& rs) { | 1935 void Assembler::ldl(Register rd, const MemOperand& rs) { |
| 1936 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); | 1936 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); |
| 1937 } | 1937 } |
| 1938 | 1938 |
| 1939 | 1939 |
| 1940 void Assembler::ldr(Register rd, const MemOperand& rs) { | 1940 void Assembler::ldr(Register rd, const MemOperand& rs) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1969 LoadRegPlusOffsetToAt(rs); | 1969 LoadRegPlusOffsetToAt(rs); |
| 1970 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); | 1970 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); |
| 1971 } | 1971 } |
| 1972 } | 1972 } |
| 1973 | 1973 |
| 1974 | 1974 |
| 1975 // -------------Misc-instructions-------------- | 1975 // -------------Misc-instructions-------------- |
| 1976 | 1976 |
| 1977 // Break / Trap instructions. | 1977 // Break / Trap instructions. |
| 1978 void Assembler::break_(uint32_t code, bool break_as_stop) { | 1978 void Assembler::break_(uint32_t code, bool break_as_stop) { |
| 1979 ASSERT((code & ~0xfffff) == 0); | 1979 DCHECK((code & ~0xfffff) == 0); |
| 1980 // We need to invalidate breaks that could be stops as well because the | 1980 // We need to invalidate breaks that could be stops as well because the |
| 1981 // simulator expects a char pointer after the stop instruction. | 1981 // simulator expects a char pointer after the stop instruction. |
| 1982 // See constants-mips.h for explanation. | 1982 // See constants-mips.h for explanation. |
| 1983 ASSERT((break_as_stop && | 1983 DCHECK((break_as_stop && |
| 1984 code <= kMaxStopCode && | 1984 code <= kMaxStopCode && |
| 1985 code > kMaxWatchpointCode) || | 1985 code > kMaxWatchpointCode) || |
| 1986 (!break_as_stop && | 1986 (!break_as_stop && |
| 1987 (code > kMaxStopCode || | 1987 (code > kMaxStopCode || |
| 1988 code <= kMaxWatchpointCode))); | 1988 code <= kMaxWatchpointCode))); |
| 1989 Instr break_instr = SPECIAL | BREAK | (code << 6); | 1989 Instr break_instr = SPECIAL | BREAK | (code << 6); |
| 1990 emit(break_instr); | 1990 emit(break_instr); |
| 1991 } | 1991 } |
| 1992 | 1992 |
| 1993 | 1993 |
| 1994 void Assembler::stop(const char* msg, uint32_t code) { | 1994 void Assembler::stop(const char* msg, uint32_t code) { |
| 1995 ASSERT(code > kMaxWatchpointCode); | 1995 DCHECK(code > kMaxWatchpointCode); |
| 1996 ASSERT(code <= kMaxStopCode); | 1996 DCHECK(code <= kMaxStopCode); |
| 1997 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) | 1997 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) |
| 1998 break_(0x54321); | 1998 break_(0x54321); |
| 1999 #else // V8_HOST_ARCH_MIPS | 1999 #else // V8_HOST_ARCH_MIPS |
| 2000 BlockTrampolinePoolFor(3); | 2000 BlockTrampolinePoolFor(3); |
| 2001 // The Simulator will handle the stop instruction and get the message address. | 2001 // The Simulator will handle the stop instruction and get the message address. |
| 2002 // On MIPS stop() is just a special kind of break_(). | 2002 // On MIPS stop() is just a special kind of break_(). |
| 2003 break_(code, true); | 2003 break_(code, true); |
| 2004 emit(reinterpret_cast<uint64_t>(msg)); | 2004 emit(reinterpret_cast<uint64_t>(msg)); |
| 2005 #endif | 2005 #endif |
| 2006 } | 2006 } |
| 2007 | 2007 |
| 2008 | 2008 |
| 2009 void Assembler::tge(Register rs, Register rt, uint16_t code) { | 2009 void Assembler::tge(Register rs, Register rt, uint16_t code) { |
| 2010 ASSERT(is_uint10(code)); | 2010 DCHECK(is_uint10(code)); |
| 2011 Instr instr = SPECIAL | TGE | rs.code() << kRsShift | 2011 Instr instr = SPECIAL | TGE | rs.code() << kRsShift |
| 2012 | rt.code() << kRtShift | code << 6; | 2012 | rt.code() << kRtShift | code << 6; |
| 2013 emit(instr); | 2013 emit(instr); |
| 2014 } | 2014 } |
| 2015 | 2015 |
| 2016 | 2016 |
| 2017 void Assembler::tgeu(Register rs, Register rt, uint16_t code) { | 2017 void Assembler::tgeu(Register rs, Register rt, uint16_t code) { |
| 2018 ASSERT(is_uint10(code)); | 2018 DCHECK(is_uint10(code)); |
| 2019 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | 2019 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift |
| 2020 | rt.code() << kRtShift | code << 6; | 2020 | rt.code() << kRtShift | code << 6; |
| 2021 emit(instr); | 2021 emit(instr); |
| 2022 } | 2022 } |
| 2023 | 2023 |
| 2024 | 2024 |
| 2025 void Assembler::tlt(Register rs, Register rt, uint16_t code) { | 2025 void Assembler::tlt(Register rs, Register rt, uint16_t code) { |
| 2026 ASSERT(is_uint10(code)); | 2026 DCHECK(is_uint10(code)); |
| 2027 Instr instr = | 2027 Instr instr = |
| 2028 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; | 2028 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; |
| 2029 emit(instr); | 2029 emit(instr); |
| 2030 } | 2030 } |
| 2031 | 2031 |
| 2032 | 2032 |
| 2033 void Assembler::tltu(Register rs, Register rt, uint16_t code) { | 2033 void Assembler::tltu(Register rs, Register rt, uint16_t code) { |
| 2034 ASSERT(is_uint10(code)); | 2034 DCHECK(is_uint10(code)); |
| 2035 Instr instr = | 2035 Instr instr = |
| 2036 SPECIAL | TLTU | rs.code() << kRsShift | 2036 SPECIAL | TLTU | rs.code() << kRsShift |
| 2037 | rt.code() << kRtShift | code << 6; | 2037 | rt.code() << kRtShift | code << 6; |
| 2038 emit(instr); | 2038 emit(instr); |
| 2039 } | 2039 } |
| 2040 | 2040 |
| 2041 | 2041 |
| 2042 void Assembler::teq(Register rs, Register rt, uint16_t code) { | 2042 void Assembler::teq(Register rs, Register rt, uint16_t code) { |
| 2043 ASSERT(is_uint10(code)); | 2043 DCHECK(is_uint10(code)); |
| 2044 Instr instr = | 2044 Instr instr = |
| 2045 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; | 2045 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; |
| 2046 emit(instr); | 2046 emit(instr); |
| 2047 } | 2047 } |
| 2048 | 2048 |
| 2049 | 2049 |
| 2050 void Assembler::tne(Register rs, Register rt, uint16_t code) { | 2050 void Assembler::tne(Register rs, Register rt, uint16_t code) { |
| 2051 ASSERT(is_uint10(code)); | 2051 DCHECK(is_uint10(code)); |
| 2052 Instr instr = | 2052 Instr instr = |
| 2053 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; | 2053 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; |
| 2054 emit(instr); | 2054 emit(instr); |
| 2055 } | 2055 } |
| 2056 | 2056 |
| 2057 | 2057 |
| 2058 // Move from HI/LO register. | 2058 // Move from HI/LO register. |
| 2059 | 2059 |
| 2060 void Assembler::mfhi(Register rd) { | 2060 void Assembler::mfhi(Register rd) { |
| 2061 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); | 2061 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2108 | 2108 |
| 2109 void Assembler::movf(Register rd, Register rs, uint16_t cc) { | 2109 void Assembler::movf(Register rd, Register rs, uint16_t cc) { |
| 2110 Register rt; | 2110 Register rt; |
| 2111 rt.code_ = (cc & 0x0007) << 2 | 0; | 2111 rt.code_ = (cc & 0x0007) << 2 | 0; |
| 2112 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); | 2112 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
| 2113 } | 2113 } |
| 2114 | 2114 |
| 2115 | 2115 |
| 2116 void Assembler::sel(SecondaryField fmt, FPURegister fd, | 2116 void Assembler::sel(SecondaryField fmt, FPURegister fd, |
| 2117 FPURegister ft, FPURegister fs, uint8_t sel) { | 2117 FPURegister ft, FPURegister fs, uint8_t sel) { |
| 2118 ASSERT(kArchVariant == kMips64r6); | 2118 DCHECK(kArchVariant == kMips64r6); |
| 2119 ASSERT(fmt == D); | 2119 DCHECK(fmt == D); |
| 2120 ASSERT(fmt == S); | 2120 DCHECK(fmt == S); |
| 2121 | 2121 |
| 2122 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | | 2122 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | |
| 2123 fs.code() << kFsShift | fd.code() << kFdShift | SEL; | 2123 fs.code() << kFsShift | fd.code() << kFdShift | SEL; |
| 2124 emit(instr); | 2124 emit(instr); |
| 2125 } | 2125 } |
| 2126 | 2126 |
| 2127 | 2127 |
| 2128 // GPR. | 2128 // GPR. |
| 2129 void Assembler::seleqz(Register rs, Register rt, Register rd) { | 2129 void Assembler::seleqz(Register rs, Register rt, Register rd) { |
| 2130 ASSERT(kArchVariant == kMips64r6); | 2130 DCHECK(kArchVariant == kMips64r6); |
| 2131 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); | 2131 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); |
| 2132 } | 2132 } |
| 2133 | 2133 |
| 2134 | 2134 |
| 2135 // FPR. | 2135 // FPR. |
| 2136 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, | 2136 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, |
| 2137 FPURegister ft, FPURegister fs) { | 2137 FPURegister ft, FPURegister fs) { |
| 2138 ASSERT(kArchVariant == kMips64r6); | 2138 DCHECK(kArchVariant == kMips64r6); |
| 2139 ASSERT(fmt == D); | 2139 DCHECK(fmt == D); |
| 2140 ASSERT(fmt == S); | 2140 DCHECK(fmt == S); |
| 2141 | 2141 |
| 2142 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | | 2142 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | |
| 2143 fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C; | 2143 fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C; |
| 2144 emit(instr); | 2144 emit(instr); |
| 2145 } | 2145 } |
| 2146 | 2146 |
| 2147 | 2147 |
| 2148 // GPR. | 2148 // GPR. |
| 2149 void Assembler::selnez(Register rs, Register rt, Register rd) { | 2149 void Assembler::selnez(Register rs, Register rt, Register rd) { |
| 2150 ASSERT(kArchVariant == kMips64r6); | 2150 DCHECK(kArchVariant == kMips64r6); |
| 2151 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); | 2151 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); |
| 2152 } | 2152 } |
| 2153 | 2153 |
| 2154 | 2154 |
| 2155 // FPR. | 2155 // FPR. |
| 2156 void Assembler::selnez(SecondaryField fmt, FPURegister fd, | 2156 void Assembler::selnez(SecondaryField fmt, FPURegister fd, |
| 2157 FPURegister ft, FPURegister fs) { | 2157 FPURegister ft, FPURegister fs) { |
| 2158 ASSERT(kArchVariant == kMips64r6); | 2158 DCHECK(kArchVariant == kMips64r6); |
| 2159 ASSERT(fmt == D); | 2159 DCHECK(fmt == D); |
| 2160 ASSERT(fmt == S); | 2160 DCHECK(fmt == S); |
| 2161 | 2161 |
| 2162 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | | 2162 Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift | |
| 2163 fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C; | 2163 fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C; |
| 2164 emit(instr); | 2164 emit(instr); |
| 2165 } | 2165 } |
| 2166 | 2166 |
| 2167 | 2167 |
| 2168 // Bit twiddling. | 2168 // Bit twiddling. |
| 2169 void Assembler::clz(Register rd, Register rs) { | 2169 void Assembler::clz(Register rd, Register rs) { |
| 2170 if (kArchVariant != kMips64r6) { | 2170 if (kArchVariant != kMips64r6) { |
| 2171 // Clz instr requires same GPR number in 'rd' and 'rt' fields. | 2171 // Clz instr requires same GPR number in 'rd' and 'rt' fields. |
| 2172 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); | 2172 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); |
| 2173 } else { | 2173 } else { |
| 2174 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); | 2174 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); |
| 2175 } | 2175 } |
| 2176 } | 2176 } |
| 2177 | 2177 |
| 2178 | 2178 |
| 2179 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { | 2179 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { |
| 2180 // Should be called via MacroAssembler::Ins. | 2180 // Should be called via MacroAssembler::Ins. |
| 2181 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. | 2181 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. |
| 2182 ASSERT((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6)); | 2182 DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6)); |
| 2183 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); | 2183 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); |
| 2184 } | 2184 } |
| 2185 | 2185 |
| 2186 | 2186 |
| 2187 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { | 2187 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { |
| 2188 // Should be called via MacroAssembler::Ext. | 2188 // Should be called via MacroAssembler::Ext. |
| 2189 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. | 2189 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. |
| 2190 ASSERT(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); | 2190 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); |
| 2191 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); | 2191 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); |
| 2192 } | 2192 } |
| 2193 | 2193 |
| 2194 | 2194 |
| 2195 void Assembler::pref(int32_t hint, const MemOperand& rs) { | 2195 void Assembler::pref(int32_t hint, const MemOperand& rs) { |
| 2196 ASSERT(is_uint5(hint) && is_uint16(rs.offset_)); | 2196 DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); |
| 2197 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | 2197 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) |
| 2198 | (rs.offset_); | 2198 | (rs.offset_); |
| 2199 emit(instr); | 2199 emit(instr); |
| 2200 } | 2200 } |
| 2201 | 2201 |
| 2202 | 2202 |
| 2203 // --------Coprocessor-instructions---------------- | 2203 // --------Coprocessor-instructions---------------- |
| 2204 | 2204 |
| 2205 // Load, store, move. | 2205 // Load, store, move. |
| 2206 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 2206 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2366 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); | 2366 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); |
| 2367 } | 2367 } |
| 2368 | 2368 |
| 2369 | 2369 |
| 2370 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { | 2370 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { |
| 2371 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); | 2371 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); |
| 2372 } | 2372 } |
| 2373 | 2373 |
| 2374 | 2374 |
| 2375 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { | 2375 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { |
| 2376 ASSERT(kArchVariant == kMips64r2); | 2376 DCHECK(kArchVariant == kMips64r2); |
| 2377 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); | 2377 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); |
| 2378 } | 2378 } |
| 2379 | 2379 |
| 2380 | 2380 |
| 2381 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { | 2381 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { |
| 2382 ASSERT(kArchVariant == kMips64r2); | 2382 DCHECK(kArchVariant == kMips64r2); |
| 2383 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); | 2383 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); |
| 2384 } | 2384 } |
| 2385 | 2385 |
| 2386 | 2386 |
| 2387 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { | 2387 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { |
| 2388 ASSERT(kArchVariant == kMips64r2); | 2388 DCHECK(kArchVariant == kMips64r2); |
| 2389 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); | 2389 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); |
| 2390 } | 2390 } |
| 2391 | 2391 |
| 2392 | 2392 |
| 2393 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { | 2393 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { |
| 2394 ASSERT(kArchVariant == kMips64r2); | 2394 DCHECK(kArchVariant == kMips64r2); |
| 2395 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); | 2395 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); |
| 2396 } | 2396 } |
| 2397 | 2397 |
| 2398 | 2398 |
| 2399 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { | 2399 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { |
| 2400 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); | 2400 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); |
| 2401 } | 2401 } |
| 2402 | 2402 |
| 2403 | 2403 |
| 2404 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { | 2404 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2421 } | 2421 } |
| 2422 | 2422 |
| 2423 | 2423 |
| 2424 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { | 2424 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { |
| 2425 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); | 2425 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); |
| 2426 } | 2426 } |
| 2427 | 2427 |
| 2428 | 2428 |
| 2429 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft, | 2429 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft, |
| 2430 FPURegister fs) { | 2430 FPURegister fs) { |
| 2431 ASSERT(kArchVariant == kMips64r6); | 2431 DCHECK(kArchVariant == kMips64r6); |
| 2432 ASSERT((fmt == D) || (fmt == S)); | 2432 DCHECK((fmt == D) || (fmt == S)); |
| 2433 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); | 2433 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); |
| 2434 } | 2434 } |
| 2435 | 2435 |
| 2436 | 2436 |
| 2437 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft, | 2437 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft, |
| 2438 FPURegister fs) { | 2438 FPURegister fs) { |
| 2439 ASSERT(kArchVariant == kMips64r6); | 2439 DCHECK(kArchVariant == kMips64r6); |
| 2440 ASSERT((fmt == D) || (fmt == S)); | 2440 DCHECK((fmt == D) || (fmt == S)); |
| 2441 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); | 2441 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); |
| 2442 } | 2442 } |
| 2443 | 2443 |
| 2444 | 2444 |
| 2445 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft, | 2445 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft, |
| 2446 FPURegister fs) { | 2446 FPURegister fs) { |
| 2447 ASSERT(kArchVariant == kMips64r6); | 2447 DCHECK(kArchVariant == kMips64r6); |
| 2448 ASSERT((fmt == D) || (fmt == S)); | 2448 DCHECK((fmt == D) || (fmt == S)); |
| 2449 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); | 2449 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); |
| 2450 } | 2450 } |
| 2451 | 2451 |
| 2452 | 2452 |
| 2453 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, | 2453 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, |
| 2454 FPURegister fs) { | 2454 FPURegister fs) { |
| 2455 ASSERT(kArchVariant == kMips64r6); | 2455 DCHECK(kArchVariant == kMips64r6); |
| 2456 ASSERT((fmt == D) || (fmt == S)); | 2456 DCHECK((fmt == D) || (fmt == S)); |
| 2457 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); | 2457 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); |
| 2458 } | 2458 } |
| 2459 | 2459 |
| 2460 | 2460 |
| 2461 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { | 2461 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { |
| 2462 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); | 2462 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); |
| 2463 } | 2463 } |
| 2464 | 2464 |
| 2465 | 2465 |
| 2466 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { | 2466 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { |
| 2467 ASSERT(kArchVariant == kMips64r2); | 2467 DCHECK(kArchVariant == kMips64r2); |
| 2468 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); | 2468 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); |
| 2469 } | 2469 } |
| 2470 | 2470 |
| 2471 | 2471 |
| 2472 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { | 2472 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { |
| 2473 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); | 2473 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); |
| 2474 } | 2474 } |
| 2475 | 2475 |
| 2476 | 2476 |
| 2477 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { | 2477 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { |
| 2478 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); | 2478 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); |
| 2479 } | 2479 } |
| 2480 | 2480 |
| 2481 | 2481 |
| 2482 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { | 2482 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { |
| 2483 ASSERT(kArchVariant == kMips64r2); | 2483 DCHECK(kArchVariant == kMips64r2); |
| 2484 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); | 2484 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); |
| 2485 } | 2485 } |
| 2486 | 2486 |
| 2487 | 2487 |
| 2488 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { | 2488 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { |
| 2489 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); | 2489 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); |
| 2490 } | 2490 } |
| 2491 | 2491 |
| 2492 | 2492 |
| 2493 // Conditions for >= MIPSr6. | 2493 // Conditions for >= MIPSr6. |
| 2494 void Assembler::cmp(FPUCondition cond, SecondaryField fmt, | 2494 void Assembler::cmp(FPUCondition cond, SecondaryField fmt, |
| 2495 FPURegister fd, FPURegister fs, FPURegister ft) { | 2495 FPURegister fd, FPURegister fs, FPURegister ft) { |
| 2496 ASSERT(kArchVariant == kMips64r6); | 2496 DCHECK(kArchVariant == kMips64r6); |
| 2497 ASSERT((fmt & ~(31 << kRsShift)) == 0); | 2497 DCHECK((fmt & ~(31 << kRsShift)) == 0); |
| 2498 Instr instr = COP1 | fmt | ft.code() << kFtShift | | 2498 Instr instr = COP1 | fmt | ft.code() << kFtShift | |
| 2499 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond; | 2499 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond; |
| 2500 emit(instr); | 2500 emit(instr); |
| 2501 } | 2501 } |
| 2502 | 2502 |
| 2503 | 2503 |
| 2504 void Assembler::bc1eqz(int16_t offset, FPURegister ft) { | 2504 void Assembler::bc1eqz(int16_t offset, FPURegister ft) { |
| 2505 ASSERT(kArchVariant == kMips64r6); | 2505 DCHECK(kArchVariant == kMips64r6); |
| 2506 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); | 2506 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); |
| 2507 emit(instr); | 2507 emit(instr); |
| 2508 } | 2508 } |
| 2509 | 2509 |
| 2510 | 2510 |
| 2511 void Assembler::bc1nez(int16_t offset, FPURegister ft) { | 2511 void Assembler::bc1nez(int16_t offset, FPURegister ft) { |
| 2512 ASSERT(kArchVariant == kMips64r6); | 2512 DCHECK(kArchVariant == kMips64r6); |
| 2513 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); | 2513 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); |
| 2514 emit(instr); | 2514 emit(instr); |
| 2515 } | 2515 } |
| 2516 | 2516 |
| 2517 | 2517 |
| 2518 // Conditions for < MIPSr6. | 2518 // Conditions for < MIPSr6. |
| 2519 void Assembler::c(FPUCondition cond, SecondaryField fmt, | 2519 void Assembler::c(FPUCondition cond, SecondaryField fmt, |
| 2520 FPURegister fs, FPURegister ft, uint16_t cc) { | 2520 FPURegister fs, FPURegister ft, uint16_t cc) { |
| 2521 ASSERT(kArchVariant != kMips64r6); | 2521 DCHECK(kArchVariant != kMips64r6); |
| 2522 ASSERT(is_uint3(cc)); | 2522 DCHECK(is_uint3(cc)); |
| 2523 ASSERT((fmt & ~(31 << kRsShift)) == 0); | 2523 DCHECK((fmt & ~(31 << kRsShift)) == 0); |
| 2524 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift | 2524 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
| 2525 | cc << 8 | 3 << 4 | cond; | 2525 | cc << 8 | 3 << 4 | cond; |
| 2526 emit(instr); | 2526 emit(instr); |
| 2527 } | 2527 } |
| 2528 | 2528 |
| 2529 | 2529 |
| 2530 void Assembler::fcmp(FPURegister src1, const double src2, | 2530 void Assembler::fcmp(FPURegister src1, const double src2, |
| 2531 FPUCondition cond) { | 2531 FPUCondition cond) { |
| 2532 ASSERT(src2 == 0.0); | 2532 DCHECK(src2 == 0.0); |
| 2533 mtc1(zero_reg, f14); | 2533 mtc1(zero_reg, f14); |
| 2534 cvt_d_w(f14, f14); | 2534 cvt_d_w(f14, f14); |
| 2535 c(cond, D, src1, f14, 0); | 2535 c(cond, D, src1, f14, 0); |
| 2536 } | 2536 } |
| 2537 | 2537 |
| 2538 | 2538 |
| 2539 void Assembler::bc1f(int16_t offset, uint16_t cc) { | 2539 void Assembler::bc1f(int16_t offset, uint16_t cc) { |
| 2540 ASSERT(is_uint3(cc)); | 2540 DCHECK(is_uint3(cc)); |
| 2541 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); | 2541 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); |
| 2542 emit(instr); | 2542 emit(instr); |
| 2543 } | 2543 } |
| 2544 | 2544 |
| 2545 | 2545 |
| 2546 void Assembler::bc1t(int16_t offset, uint16_t cc) { | 2546 void Assembler::bc1t(int16_t offset, uint16_t cc) { |
| 2547 ASSERT(is_uint3(cc)); | 2547 DCHECK(is_uint3(cc)); |
| 2548 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); | 2548 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); |
| 2549 emit(instr); | 2549 emit(instr); |
| 2550 } | 2550 } |
| 2551 | 2551 |
| 2552 | 2552 |
| 2553 // Debugging. | 2553 // Debugging. |
| 2554 void Assembler::RecordJSReturn() { | 2554 void Assembler::RecordJSReturn() { |
| 2555 positions_recorder()->WriteRecordedPositions(); | 2555 positions_recorder()->WriteRecordedPositions(); |
| 2556 CheckBuffer(); | 2556 CheckBuffer(); |
| 2557 RecordRelocInfo(RelocInfo::JS_RETURN); | 2557 RecordRelocInfo(RelocInfo::JS_RETURN); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2568 void Assembler::RecordComment(const char* msg) { | 2568 void Assembler::RecordComment(const char* msg) { |
| 2569 if (FLAG_code_comments) { | 2569 if (FLAG_code_comments) { |
| 2570 CheckBuffer(); | 2570 CheckBuffer(); |
| 2571 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 2571 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2572 } | 2572 } |
| 2573 } | 2573 } |
| 2574 | 2574 |
| 2575 | 2575 |
| 2576 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { | 2576 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { |
| 2577 Instr instr = instr_at(pc); | 2577 Instr instr = instr_at(pc); |
| 2578 ASSERT(IsJ(instr) || IsLui(instr)); | 2578 DCHECK(IsJ(instr) || IsLui(instr)); |
| 2579 if (IsLui(instr)) { | 2579 if (IsLui(instr)) { |
| 2580 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); | 2580 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); |
| 2581 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); | 2581 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); |
| 2582 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize); | 2582 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize); |
| 2583 ASSERT(IsOri(instr_ori)); | 2583 DCHECK(IsOri(instr_ori)); |
| 2584 ASSERT(IsOri(instr_ori2)); | 2584 DCHECK(IsOri(instr_ori2)); |
| 2585 // TODO(plind): symbolic names for the shifts. | 2585 // TODO(plind): symbolic names for the shifts. |
| 2586 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48; | 2586 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48; |
| 2587 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32; | 2587 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32; |
| 2588 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16; | 2588 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16; |
| 2589 // Sign extend address. | 2589 // Sign extend address. |
| 2590 imm >>= 16; | 2590 imm >>= 16; |
| 2591 | 2591 |
| 2592 if (imm == kEndOfJumpChain) { | 2592 if (imm == kEndOfJumpChain) { |
| 2593 return 0; // Number of instructions patched. | 2593 return 0; // Number of instructions patched. |
| 2594 } | 2594 } |
| 2595 imm += pc_delta; | 2595 imm += pc_delta; |
| 2596 ASSERT((imm & 3) == 0); | 2596 DCHECK((imm & 3) == 0); |
| 2597 | 2597 |
| 2598 instr_lui &= ~kImm16Mask; | 2598 instr_lui &= ~kImm16Mask; |
| 2599 instr_ori &= ~kImm16Mask; | 2599 instr_ori &= ~kImm16Mask; |
| 2600 instr_ori2 &= ~kImm16Mask; | 2600 instr_ori2 &= ~kImm16Mask; |
| 2601 | 2601 |
| 2602 instr_at_put(pc + 0 * Assembler::kInstrSize, | 2602 instr_at_put(pc + 0 * Assembler::kInstrSize, |
| 2603 instr_lui | ((imm >> 32) & kImm16Mask)); | 2603 instr_lui | ((imm >> 32) & kImm16Mask)); |
| 2604 instr_at_put(pc + 1 * Assembler::kInstrSize, | 2604 instr_at_put(pc + 1 * Assembler::kInstrSize, |
| 2605 instr_ori | (imm >> 16 & kImm16Mask)); | 2605 instr_ori | (imm >> 16 & kImm16Mask)); |
| 2606 instr_at_put(pc + 3 * Assembler::kInstrSize, | 2606 instr_at_put(pc + 3 * Assembler::kInstrSize, |
| 2607 instr_ori2 | (imm & kImm16Mask)); | 2607 instr_ori2 | (imm & kImm16Mask)); |
| 2608 return 4; // Number of instructions patched. | 2608 return 4; // Number of instructions patched. |
| 2609 } else { | 2609 } else { |
| 2610 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; | 2610 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; |
| 2611 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) { | 2611 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) { |
| 2612 return 0; // Number of instructions patched. | 2612 return 0; // Number of instructions patched. |
| 2613 } | 2613 } |
| 2614 | 2614 |
| 2615 imm28 += pc_delta; | 2615 imm28 += pc_delta; |
| 2616 imm28 &= kImm28Mask; | 2616 imm28 &= kImm28Mask; |
| 2617 ASSERT((imm28 & 3) == 0); | 2617 DCHECK((imm28 & 3) == 0); |
| 2618 | 2618 |
| 2619 instr &= ~kImm26Mask; | 2619 instr &= ~kImm26Mask; |
| 2620 uint32_t imm26 = imm28 >> 2; | 2620 uint32_t imm26 = imm28 >> 2; |
| 2621 ASSERT(is_uint26(imm26)); | 2621 DCHECK(is_uint26(imm26)); |
| 2622 | 2622 |
| 2623 instr_at_put(pc, instr | (imm26 & kImm26Mask)); | 2623 instr_at_put(pc, instr | (imm26 & kImm26Mask)); |
| 2624 return 1; // Number of instructions patched. | 2624 return 1; // Number of instructions patched. |
| 2625 } | 2625 } |
| 2626 } | 2626 } |
| 2627 | 2627 |
| 2628 | 2628 |
| 2629 void Assembler::GrowBuffer() { | 2629 void Assembler::GrowBuffer() { |
| 2630 if (!own_buffer_) FATAL("external code buffer is too small"); | 2630 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2631 | 2631 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2664 | 2664 |
| 2665 // Relocate runtime entries. | 2665 // Relocate runtime entries. |
| 2666 for (RelocIterator it(desc); !it.done(); it.next()) { | 2666 for (RelocIterator it(desc); !it.done(); it.next()) { |
| 2667 RelocInfo::Mode rmode = it.rinfo()->rmode(); | 2667 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 2668 if (rmode == RelocInfo::INTERNAL_REFERENCE) { | 2668 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 2669 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc()); | 2669 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc()); |
| 2670 RelocateInternalReference(p, pc_delta); | 2670 RelocateInternalReference(p, pc_delta); |
| 2671 } | 2671 } |
| 2672 } | 2672 } |
| 2673 | 2673 |
| 2674 ASSERT(!overflow()); | 2674 DCHECK(!overflow()); |
| 2675 } | 2675 } |
| 2676 | 2676 |
| 2677 | 2677 |
| 2678 void Assembler::db(uint8_t data) { | 2678 void Assembler::db(uint8_t data) { |
| 2679 CheckBuffer(); | 2679 CheckBuffer(); |
| 2680 *reinterpret_cast<uint8_t*>(pc_) = data; | 2680 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 2681 pc_ += sizeof(uint8_t); | 2681 pc_ += sizeof(uint8_t); |
| 2682 } | 2682 } |
| 2683 | 2683 |
| 2684 | 2684 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2695 reinterpret_cast<uint64_t>(stub->instruction_start()); | 2695 reinterpret_cast<uint64_t>(stub->instruction_start()); |
| 2696 pc_ += sizeof(uint64_t); | 2696 pc_ += sizeof(uint64_t); |
| 2697 } | 2697 } |
| 2698 | 2698 |
| 2699 | 2699 |
| 2700 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 2700 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2701 // We do not try to reuse pool constants. | 2701 // We do not try to reuse pool constants. |
| 2702 RelocInfo rinfo(pc_, rmode, data, NULL); | 2702 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 2703 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { | 2703 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { |
| 2704 // Adjust code for new modes. | 2704 // Adjust code for new modes. |
| 2705 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2705 DCHECK(RelocInfo::IsDebugBreakSlot(rmode) |
| 2706 || RelocInfo::IsJSReturn(rmode) | 2706 || RelocInfo::IsJSReturn(rmode) |
| 2707 || RelocInfo::IsComment(rmode) | 2707 || RelocInfo::IsComment(rmode) |
| 2708 || RelocInfo::IsPosition(rmode)); | 2708 || RelocInfo::IsPosition(rmode)); |
| 2709 // These modes do not need an entry in the constant pool. | 2709 // These modes do not need an entry in the constant pool. |
| 2710 } | 2710 } |
| 2711 if (!RelocInfo::IsNone(rinfo.rmode())) { | 2711 if (!RelocInfo::IsNone(rinfo.rmode())) { |
| 2712 // Don't record external references unless the heap will be serialized. | 2712 // Don't record external references unless the heap will be serialized. |
| 2713 if (rmode == RelocInfo::EXTERNAL_REFERENCE && | 2713 if (rmode == RelocInfo::EXTERNAL_REFERENCE && |
| 2714 !serializer_enabled() && !emit_debug_code()) { | 2714 !serializer_enabled() && !emit_debug_code()) { |
| 2715 return; | 2715 return; |
| 2716 } | 2716 } |
| 2717 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. | 2717 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. |
| 2718 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | 2718 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2719 RelocInfo reloc_info_with_ast_id(pc_, | 2719 RelocInfo reloc_info_with_ast_id(pc_, |
| 2720 rmode, | 2720 rmode, |
| 2721 RecordedAstId().ToInt(), | 2721 RecordedAstId().ToInt(), |
| 2722 NULL); | 2722 NULL); |
| 2723 ClearRecordedAstId(); | 2723 ClearRecordedAstId(); |
| 2724 reloc_info_writer.Write(&reloc_info_with_ast_id); | 2724 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2725 } else { | 2725 } else { |
| 2726 reloc_info_writer.Write(&rinfo); | 2726 reloc_info_writer.Write(&rinfo); |
| 2727 } | 2727 } |
| (...skipping 17 matching lines...) Expand all Loading... |
| 2745 // Emission is currently blocked; make sure we try again as soon as | 2745 // Emission is currently blocked; make sure we try again as soon as |
| 2746 // possible. | 2746 // possible. |
| 2747 if (trampoline_pool_blocked_nesting_ > 0) { | 2747 if (trampoline_pool_blocked_nesting_ > 0) { |
| 2748 next_buffer_check_ = pc_offset() + kInstrSize; | 2748 next_buffer_check_ = pc_offset() + kInstrSize; |
| 2749 } else { | 2749 } else { |
| 2750 next_buffer_check_ = no_trampoline_pool_before_; | 2750 next_buffer_check_ = no_trampoline_pool_before_; |
| 2751 } | 2751 } |
| 2752 return; | 2752 return; |
| 2753 } | 2753 } |
| 2754 | 2754 |
| 2755 ASSERT(!trampoline_emitted_); | 2755 DCHECK(!trampoline_emitted_); |
| 2756 ASSERT(unbound_labels_count_ >= 0); | 2756 DCHECK(unbound_labels_count_ >= 0); |
| 2757 if (unbound_labels_count_ > 0) { | 2757 if (unbound_labels_count_ > 0) { |
| 2758 // First we emit jump (2 instructions), then we emit trampoline pool. | 2758 // First we emit jump (2 instructions), then we emit trampoline pool. |
| 2759 { BlockTrampolinePoolScope block_trampoline_pool(this); | 2759 { BlockTrampolinePoolScope block_trampoline_pool(this); |
| 2760 Label after_pool; | 2760 Label after_pool; |
| 2761 b(&after_pool); | 2761 b(&after_pool); |
| 2762 nop(); | 2762 nop(); |
| 2763 | 2763 |
| 2764 int pool_start = pc_offset(); | 2764 int pool_start = pc_offset(); |
| 2765 for (int i = 0; i < unbound_labels_count_; i++) { | 2765 for (int i = 0; i < unbound_labels_count_; i++) { |
| 2766 uint64_t imm64; | 2766 uint64_t imm64; |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2887 // Jump to label may follow at pc + 2 * kInstrSize. | 2887 // Jump to label may follow at pc + 2 * kInstrSize. |
| 2888 uint32_t* p = reinterpret_cast<uint32_t*>(pc); | 2888 uint32_t* p = reinterpret_cast<uint32_t*>(pc); |
| 2889 #ifdef DEBUG | 2889 #ifdef DEBUG |
| 2890 Instr instr1 = instr_at(pc); | 2890 Instr instr1 = instr_at(pc); |
| 2891 #endif | 2891 #endif |
| 2892 Instr instr2 = instr_at(pc + 1 * kInstrSize); | 2892 Instr instr2 = instr_at(pc + 1 * kInstrSize); |
| 2893 Instr instr3 = instr_at(pc + 6 * kInstrSize); | 2893 Instr instr3 = instr_at(pc + 6 * kInstrSize); |
| 2894 bool patched = false; | 2894 bool patched = false; |
| 2895 | 2895 |
| 2896 if (IsJal(instr3)) { | 2896 if (IsJal(instr3)) { |
| 2897 ASSERT(GetOpcodeField(instr1) == LUI); | 2897 DCHECK(GetOpcodeField(instr1) == LUI); |
| 2898 ASSERT(GetOpcodeField(instr2) == ORI); | 2898 DCHECK(GetOpcodeField(instr2) == ORI); |
| 2899 | 2899 |
| 2900 uint32_t rs_field = GetRt(instr2) << kRsShift; | 2900 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2901 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. | 2901 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. |
| 2902 *(p+6) = SPECIAL | rs_field | rd_field | JALR; | 2902 *(p+6) = SPECIAL | rs_field | rd_field | JALR; |
| 2903 patched = true; | 2903 patched = true; |
| 2904 } else if (IsJ(instr3)) { | 2904 } else if (IsJ(instr3)) { |
| 2905 ASSERT(GetOpcodeField(instr1) == LUI); | 2905 DCHECK(GetOpcodeField(instr1) == LUI); |
| 2906 ASSERT(GetOpcodeField(instr2) == ORI); | 2906 DCHECK(GetOpcodeField(instr2) == ORI); |
| 2907 | 2907 |
| 2908 uint32_t rs_field = GetRt(instr2) << kRsShift; | 2908 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2909 *(p+6) = SPECIAL | rs_field | JR; | 2909 *(p+6) = SPECIAL | rs_field | JR; |
| 2910 patched = true; | 2910 patched = true; |
| 2911 } | 2911 } |
| 2912 | 2912 |
| 2913 if (patched) { | 2913 if (patched) { |
| 2914 CpuFeatures::FlushICache(pc+6, sizeof(int32_t)); | 2914 CpuFeatures::FlushICache(pc+6, sizeof(int32_t)); |
| 2915 } | 2915 } |
| 2916 } | 2916 } |
| 2917 | 2917 |
| 2918 | 2918 |
| 2919 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { | 2919 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { |
| 2920 // No out-of-line constant pool support. | 2920 // No out-of-line constant pool support. |
| 2921 ASSERT(!FLAG_enable_ool_constant_pool); | 2921 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2922 return isolate->factory()->empty_constant_pool_array(); | 2922 return isolate->factory()->empty_constant_pool_array(); |
| 2923 } | 2923 } |
| 2924 | 2924 |
| 2925 | 2925 |
| 2926 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { | 2926 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| 2927 // No out-of-line constant pool support. | 2927 // No out-of-line constant pool support. |
| 2928 ASSERT(!FLAG_enable_ool_constant_pool); | 2928 DCHECK(!FLAG_enable_ool_constant_pool); |
| 2929 return; | 2929 return; |
| 2930 } | 2930 } |
| 2931 | 2931 |
| 2932 | 2932 |
| 2933 } } // namespace v8::internal | 2933 } } // namespace v8::internal |
| 2934 | 2934 |
| 2935 #endif // V8_TARGET_ARCH_MIPS64 | 2935 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |