Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(360)

Side by Side Diff: src/mips64/assembler-mips64.cc

Issue 371923006: Add mips64 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips64/assembler-mips64.h ('k') | src/mips64/assembler-mips64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 17 matching lines...) Expand all
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 30
31 // The original source code covered by the above license above has been 31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc. 32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved. 33 // Copyright 2012 the V8 project authors. All rights reserved.
34 34
35 35
36 #include "src/v8.h" 36 #include "src/v8.h"
37 37
38 #if V8_TARGET_ARCH_MIPS 38 #if V8_TARGET_ARCH_MIPS64
39 39
40 #include "src/base/cpu.h" 40 #include "src/base/cpu.h"
41 #include "src/mips/assembler-mips-inl.h" 41 #include "src/mips64/assembler-mips64-inl.h"
42 #include "src/serialize.h" 42 #include "src/serialize.h"
43 43
44 namespace v8 { 44 namespace v8 {
45 namespace internal { 45 namespace internal {
46 46
47
47 // Get the CPU features enabled by the build. For cross compilation the 48 // Get the CPU features enabled by the build. For cross compilation the
48 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS 49 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
49 // can be defined to enable FPU instructions when building the 50 // can be defined to enable FPU instructions when building the
50 // snapshot. 51 // snapshot.
51 static unsigned CpuFeaturesImpliedByCompiler() { 52 static unsigned CpuFeaturesImpliedByCompiler() {
52 unsigned answer = 0; 53 unsigned answer = 0;
53 #ifdef CAN_USE_FPU_INSTRUCTIONS 54 #ifdef CAN_USE_FPU_INSTRUCTIONS
54 answer |= 1u << FPU; 55 answer |= 1u << FPU;
55 #endif // def CAN_USE_FPU_INSTRUCTIONS 56 #endif // def CAN_USE_FPU_INSTRUCTIONS
56 57
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
114 ASSERT(reg.is_valid()); 115 ASSERT(reg.is_valid());
115 const int kNumbers[] = { 116 const int kNumbers[] = {
116 0, // zero_reg 117 0, // zero_reg
117 1, // at 118 1, // at
118 2, // v0 119 2, // v0
119 3, // v1 120 3, // v1
120 4, // a0 121 4, // a0
121 5, // a1 122 5, // a1
122 6, // a2 123 6, // a2
123 7, // a3 124 7, // a3
124 8, // t0 125 8, // a4
125 9, // t1 126 9, // a5
126 10, // t2 127 10, // a6
127 11, // t3 128 11, // a7
128 12, // t4 129 12, // t0
129 13, // t5 130 13, // t1
130 14, // t6 131 14, // t2
131 15, // t7 132 15, // t3
132 16, // s0 133 16, // s0
133 17, // s1 134 17, // s1
134 18, // s2 135 18, // s2
135 19, // s3 136 19, // s3
136 20, // s4 137 20, // s4
137 21, // s5 138 21, // s5
138 22, // s6 139 22, // s6
139 23, // s7 140 23, // s7
140 24, // t8 141 24, // t8
141 25, // t9 142 25, // t9
142 26, // k0 143 26, // k0
143 27, // k1 144 27, // k1
144 28, // gp 145 28, // gp
145 29, // sp 146 29, // sp
146 30, // fp 147 30, // fp
147 31, // ra 148 31, // ra
148 }; 149 };
149 return kNumbers[reg.code()]; 150 return kNumbers[reg.code()];
150 } 151 }
151 152
152 153
153 Register ToRegister(int num) { 154 Register ToRegister(int num) {
154 ASSERT(num >= 0 && num < kNumRegisters); 155 ASSERT(num >= 0 && num < kNumRegisters);
155 const Register kRegisters[] = { 156 const Register kRegisters[] = {
156 zero_reg, 157 zero_reg,
157 at, 158 at,
158 v0, v1, 159 v0, v1,
159 a0, a1, a2, a3, 160 a0, a1, a2, a3, a4, a5, a6, a7,
160 t0, t1, t2, t3, t4, t5, t6, t7, 161 t0, t1, t2, t3,
161 s0, s1, s2, s3, s4, s5, s6, s7, 162 s0, s1, s2, s3, s4, s5, s6, s7,
162 t8, t9, 163 t8, t9,
163 k0, k1, 164 k0, k1,
164 gp, 165 gp,
165 sp, 166 sp,
166 fp, 167 fp,
167 ra 168 ra
168 }; 169 };
169 return kRegisters[num]; 170 return kRegisters[num];
170 } 171 }
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 // Implementation of Operand and MemOperand. 216 // Implementation of Operand and MemOperand.
216 // See assembler-mips-inl.h for inlined constructors. 217 // See assembler-mips-inl.h for inlined constructors.
217 218
218 Operand::Operand(Handle<Object> handle) { 219 Operand::Operand(Handle<Object> handle) {
219 AllowDeferredHandleDereference using_raw_address; 220 AllowDeferredHandleDereference using_raw_address;
220 rm_ = no_reg; 221 rm_ = no_reg;
221 // Verify all Objects referred by code are NOT in new space. 222 // Verify all Objects referred by code are NOT in new space.
222 Object* obj = *handle; 223 Object* obj = *handle;
223 if (obj->IsHeapObject()) { 224 if (obj->IsHeapObject()) {
224 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); 225 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
225 imm32_ = reinterpret_cast<intptr_t>(handle.location()); 226 imm64_ = reinterpret_cast<intptr_t>(handle.location());
226 rmode_ = RelocInfo::EMBEDDED_OBJECT; 227 rmode_ = RelocInfo::EMBEDDED_OBJECT;
227 } else { 228 } else {
228 // No relocation needed. 229 // No relocation needed.
229 imm32_ = reinterpret_cast<intptr_t>(obj); 230 imm64_ = reinterpret_cast<intptr_t>(obj);
230 rmode_ = RelocInfo::NONE32; 231 rmode_ = RelocInfo::NONE64;
231 } 232 }
232 } 233 }
233 234
234 235
235 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { 236 MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
236 offset_ = offset; 237 offset_ = offset;
237 } 238 }
238 239
239 240
240 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, 241 MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
241 OffsetAddend offset_addend) : Operand(rm) { 242 OffsetAddend offset_addend) : Operand(rm) {
242 offset_ = unit * multiplier + offset_addend; 243 offset_ = unit * multiplier + offset_addend;
243 } 244 }
244 245
245 246
246 // ----------------------------------------------------------------------------- 247 // -----------------------------------------------------------------------------
247 // Specific instructions, constants, and masks. 248 // Specific instructions, constants, and masks.
248 249
249 static const int kNegOffset = 0x00008000; 250 static const int kNegOffset = 0x00008000;
250 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) 251 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
251 // operations as post-increment of sp. 252 // operations as post-increment of sp.
252 const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift) 253 const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
253 | (kRegister_sp_Code << kRtShift) 254 | (kRegister_sp_Code << kRtShift)
254 | (kPointerSize & kImm16Mask); // NOLINT 255 | (kPointerSize & kImm16Mask); // NOLINT
255 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. 256 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
256 const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift) 257 const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
257 | (kRegister_sp_Code << kRtShift) 258 | (kRegister_sp_Code << kRtShift)
258 | (-kPointerSize & kImm16Mask); // NOLINT 259 | (-kPointerSize & kImm16Mask); // NOLINT
259 // sw(r, MemOperand(sp, 0)) 260 // sd(r, MemOperand(sp, 0))
260 const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift) 261 const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
261 | (0 & kImm16Mask); // NOLINT 262 | (0 & kImm16Mask); // NOLINT
262 // lw(r, MemOperand(sp, 0)) 263 // ld(r, MemOperand(sp, 0))
263 const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift) 264 const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
264 | (0 & kImm16Mask); // NOLINT 265 | (0 & kImm16Mask); // NOLINT
265 266
266 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift) 267 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
267 | (0 & kImm16Mask); // NOLINT 268 | (0 & kImm16Mask); // NOLINT
268 269
269 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift) 270 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
270 | (0 & kImm16Mask); // NOLINT 271 | (0 & kImm16Mask); // NOLINT
271 272
272 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift) 273 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
273 | (kNegOffset & kImm16Mask); // NOLINT 274 | (kNegOffset & kImm16Mask); // NOLINT
274 275
275 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift) 276 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
276 | (kNegOffset & kImm16Mask); // NOLINT 277 | (kNegOffset & kImm16Mask); // NOLINT
277 // A mask for the Rt register for push, pop, lw, sw instructions. 278 // A mask for the Rt register for push, pop, lw, sw instructions.
278 const Instr kRtMask = kRtFieldMask; 279 const Instr kRtMask = kRtFieldMask;
279 const Instr kLwSwInstrTypeMask = 0xffe00000; 280 const Instr kLwSwInstrTypeMask = 0xffe00000;
280 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; 281 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
281 const Instr kLwSwOffsetMask = kImm16Mask; 282 const Instr kLwSwOffsetMask = kImm16Mask;
282 283
283 284
284 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) 285 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
285 : AssemblerBase(isolate, buffer, buffer_size), 286 : AssemblerBase(isolate, buffer, buffer_size),
286 recorded_ast_id_(TypeFeedbackId::None()), 287 recorded_ast_id_(TypeFeedbackId::None()),
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after
609 } 610 }
610 611
611 612
612 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { 613 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
613 ASSERT(IsSw(instr)); 614 ASSERT(IsSw(instr));
614 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); 615 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
615 } 616 }
616 617
617 618
618 bool Assembler::IsAddImmediate(Instr instr) { 619 bool Assembler::IsAddImmediate(Instr instr) {
619 return ((instr & kOpcodeMask) == ADDIU); 620 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
620 } 621 }
621 622
622 623
623 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { 624 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
624 ASSERT(IsAddImmediate(instr)); 625 ASSERT(IsAddImmediate(instr));
625 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); 626 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
626 } 627 }
627 628
628 629
629 bool Assembler::IsAndImmediate(Instr instr) { 630 bool Assembler::IsAndImmediate(Instr instr) {
630 return GetOpcodeField(instr) == ANDI; 631 return GetOpcodeField(instr) == ANDI;
631 } 632 }
632 633
633 634
634 int Assembler::target_at(int32_t pos) { 635 int64_t Assembler::target_at(int64_t pos) {
635 Instr instr = instr_at(pos); 636 Instr instr = instr_at(pos);
636 if ((instr & ~kImm16Mask) == 0) { 637 if ((instr & ~kImm16Mask) == 0) {
637 // Emitted label constant, not part of a branch. 638 // Emitted label constant, not part of a branch.
638 if (instr == 0) { 639 if (instr == 0) {
639 return kEndOfChain; 640 return kEndOfChain;
640 } else { 641 } else {
641 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; 642 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
642 return (imm18 + pos); 643 return (imm18 + pos);
643 } 644 }
644 } 645 }
645 // Check we have a branch or jump instruction. 646 // Check we have a branch or jump instruction.
646 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); 647 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
647 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming 648 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
648 // the compiler uses arithmectic shifts for signed integers. 649 // the compiler uses arithmetic shifts for signed integers.
649 if (IsBranch(instr)) { 650 if (IsBranch(instr)) {
650 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; 651 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
651
652 if (imm18 == kEndOfChain) { 652 if (imm18 == kEndOfChain) {
653 // EndOfChain sentinel is returned directly, not relative to pc or pos. 653 // EndOfChain sentinel is returned directly, not relative to pc or pos.
654 return kEndOfChain; 654 return kEndOfChain;
655 } else { 655 } else {
656 return pos + kBranchPCOffset + imm18; 656 return pos + kBranchPCOffset + imm18;
657 } 657 }
658 } else if (IsLui(instr)) { 658 } else if (IsLui(instr)) {
659 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); 659 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
660 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); 660 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
661 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
661 ASSERT(IsOri(instr_ori)); 662 ASSERT(IsOri(instr_ori));
662 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; 663 ASSERT(IsOri(instr_ori2));
663 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); 664
665 // TODO(plind) create named constants for shift values.
666 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
667 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
668 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
669 // Sign extend address;
670 imm >>= 16;
664 671
665 if (imm == kEndOfJumpChain) { 672 if (imm == kEndOfJumpChain) {
666 // EndOfChain sentinel is returned directly, not relative to pc or pos. 673 // EndOfChain sentinel is returned directly, not relative to pc or pos.
667 return kEndOfChain; 674 return kEndOfChain;
668 } else { 675 } else {
669 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); 676 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
670 int32_t delta = instr_address - imm; 677 int64_t delta = instr_address - imm;
671 ASSERT(pos > delta); 678 ASSERT(pos > delta);
672 return pos - delta; 679 return pos - delta;
673 } 680 }
674 } else { 681 } else {
675 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; 682 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
676 if (imm28 == kEndOfJumpChain) { 683 if (imm28 == kEndOfJumpChain) {
677 // EndOfChain sentinel is returned directly, not relative to pc or pos. 684 // EndOfChain sentinel is returned directly, not relative to pc or pos.
678 return kEndOfChain; 685 return kEndOfChain;
679 } else { 686 } else {
680 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); 687 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
681 instr_address &= kImm28Mask; 688 instr_address &= kImm28Mask;
682 int32_t delta = instr_address - imm28; 689 int64_t delta = instr_address - imm28;
683 ASSERT(pos > delta); 690 ASSERT(pos > delta);
684 return pos - delta; 691 return pos - delta;
685 } 692 }
686 } 693 }
687 } 694 }
688 695
689 696
690 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { 697 void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
691 Instr instr = instr_at(pos); 698 Instr instr = instr_at(pos);
692 if ((instr & ~kImm16Mask) == 0) { 699 if ((instr & ~kImm16Mask) == 0) {
693 ASSERT(target_pos == kEndOfChain || target_pos >= 0); 700 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
694 // Emitted label constant, not part of a branch. 701 // Emitted label constant, not part of a branch.
695 // Make label relative to Code* of generated Code object. 702 // Make label relative to Code* of generated Code object.
696 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 703 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
697 return; 704 return;
698 } 705 }
699 706
700 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); 707 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
701 if (IsBranch(instr)) { 708 if (IsBranch(instr)) {
702 int32_t imm18 = target_pos - (pos + kBranchPCOffset); 709 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
703 ASSERT((imm18 & 3) == 0); 710 ASSERT((imm18 & 3) == 0);
704 711
705 instr &= ~kImm16Mask; 712 instr &= ~kImm16Mask;
706 int32_t imm16 = imm18 >> 2; 713 int32_t imm16 = imm18 >> 2;
707 ASSERT(is_int16(imm16)); 714 ASSERT(is_int16(imm16));
708 715
709 instr_at_put(pos, instr | (imm16 & kImm16Mask)); 716 instr_at_put(pos, instr | (imm16 & kImm16Mask));
710 } else if (IsLui(instr)) { 717 } else if (IsLui(instr)) {
711 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); 718 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
712 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); 719 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
720 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
713 ASSERT(IsOri(instr_ori)); 721 ASSERT(IsOri(instr_ori));
714 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; 722 ASSERT(IsOri(instr_ori2));
723
724 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
715 ASSERT((imm & 3) == 0); 725 ASSERT((imm & 3) == 0);
716 726
717 instr_lui &= ~kImm16Mask; 727 instr_lui &= ~kImm16Mask;
718 instr_ori &= ~kImm16Mask; 728 instr_ori &= ~kImm16Mask;
729 instr_ori2 &= ~kImm16Mask;
719 730
720 instr_at_put(pos + 0 * Assembler::kInstrSize, 731 instr_at_put(pos + 0 * Assembler::kInstrSize,
721 instr_lui | ((imm & kHiMask) >> kLuiShift)); 732 instr_lui | ((imm >> 32) & kImm16Mask));
722 instr_at_put(pos + 1 * Assembler::kInstrSize, 733 instr_at_put(pos + 1 * Assembler::kInstrSize,
723 instr_ori | (imm & kImm16Mask)); 734 instr_ori | ((imm >> 16) & kImm16Mask));
735 instr_at_put(pos + 3 * Assembler::kInstrSize,
736 instr_ori2 | (imm & kImm16Mask));
724 } else { 737 } else {
725 uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos; 738 uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
726 imm28 &= kImm28Mask; 739 imm28 &= kImm28Mask;
727 ASSERT((imm28 & 3) == 0); 740 ASSERT((imm28 & 3) == 0);
728 741
729 instr &= ~kImm26Mask; 742 instr &= ~kImm26Mask;
730 uint32_t imm26 = imm28 >> 2; 743 uint32_t imm26 = imm28 >> 2;
731 ASSERT(is_uint26(imm26)); 744 ASSERT(is_uint26(imm26));
732 745
733 instr_at_put(pos, instr | (imm26 & kImm26Mask)); 746 instr_at_put(pos, instr | (imm26 & kImm26Mask));
734 } 747 }
735 } 748 }
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after
963 } 976 }
964 977
965 if (kInvalidSlotPos == trampoline_entry) { 978 if (kInvalidSlotPos == trampoline_entry) {
966 internal_trampoline_exception_ = true; 979 internal_trampoline_exception_ = true;
967 } 980 }
968 } 981 }
969 return trampoline_entry; 982 return trampoline_entry;
970 } 983 }
971 984
972 985
973 uint32_t Assembler::jump_address(Label* L) { 986 uint64_t Assembler::jump_address(Label* L) {
974 int32_t target_pos; 987 int64_t target_pos;
975 988
976 if (L->is_bound()) { 989 if (L->is_bound()) {
977 target_pos = L->pos(); 990 target_pos = L->pos();
978 } else { 991 } else {
979 if (L->is_linked()) { 992 if (L->is_linked()) {
980 target_pos = L->pos(); // L's link. 993 target_pos = L->pos(); // L's link.
981 L->link_to(pc_offset()); 994 L->link_to(pc_offset());
982 } else { 995 } else {
983 L->link_to(pc_offset()); 996 L->link_to(pc_offset());
984 return kEndOfJumpChain; 997 return kEndOfJumpChain;
985 } 998 }
986 } 999 }
987 1000
988 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; 1001 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
989 ASSERT((imm & 3) == 0); 1002 ASSERT((imm & 3) == 0);
990 1003
991 return imm; 1004 return imm;
992 } 1005 }
993 1006
994 1007
995 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { 1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
996 int32_t target_pos; 1009 int32_t target_pos;
997 1010
998 if (L->is_bound()) { 1011 if (L->is_bound()) {
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1109 } 1122 }
1110 1123
1111 1124
1112 void Assembler::bne(Register rs, Register rt, int16_t offset) { 1125 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1113 BlockTrampolinePoolScope block_trampoline_pool(this); 1126 BlockTrampolinePoolScope block_trampoline_pool(this);
1114 GenInstrImmediate(BNE, rs, rt, offset); 1127 GenInstrImmediate(BNE, rs, rt, offset);
1115 BlockTrampolinePoolFor(1); // For associated delay slot. 1128 BlockTrampolinePoolFor(1); // For associated delay slot.
1116 } 1129 }
1117 1130
1118 1131
1119 void Assembler::j(int32_t target) { 1132 void Assembler::j(int64_t target) {
1120 #if DEBUG 1133 #if DEBUG
1121 // Get pc of delay slot. 1134 // Get pc of delay slot.
1122 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1135 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1123 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1136 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1124 (kImm26Bits + kImmFieldShift)) == 0; 1137 (kImm26Bits + kImmFieldShift)) == 0;
1125 ASSERT(in_range && ((target & 3) == 0)); 1138 ASSERT(in_range && ((target & 3) == 0));
1126 #endif 1139 #endif
1127 GenInstrJump(J, target >> 2); 1140 GenInstrJump(J, target >> 2);
1128 } 1141 }
1129 1142
1130 1143
1131 void Assembler::jr(Register rs) { 1144 void Assembler::jr(Register rs) {
1132 BlockTrampolinePoolScope block_trampoline_pool(this); 1145 BlockTrampolinePoolScope block_trampoline_pool(this);
1133 if (rs.is(ra)) { 1146 if (rs.is(ra)) {
1134 positions_recorder()->WriteRecordedPositions(); 1147 positions_recorder()->WriteRecordedPositions();
1135 } 1148 }
1136 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); 1149 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1137 BlockTrampolinePoolFor(1); // For associated delay slot. 1150 BlockTrampolinePoolFor(1); // For associated delay slot.
1138 } 1151 }
1139 1152
1140 1153
1141 void Assembler::jal(int32_t target) { 1154 void Assembler::jal(int64_t target) {
1142 #ifdef DEBUG 1155 #ifdef DEBUG
1143 // Get pc of delay slot. 1156 // Get pc of delay slot.
1144 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1157 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1145 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1158 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1146 (kImm26Bits + kImmFieldShift)) == 0; 1159 (kImm26Bits + kImmFieldShift)) == 0;
1147 ASSERT(in_range && ((target & 3) == 0)); 1160 ASSERT(in_range && ((target & 3) == 0));
1148 #endif 1161 #endif
1149 positions_recorder()->WriteRecordedPositions(); 1162 positions_recorder()->WriteRecordedPositions();
1150 GenInstrJump(JAL, target >> 2); 1163 GenInstrJump(JAL, target >> 2);
1151 } 1164 }
1152 1165
1153 1166
1154 void Assembler::jalr(Register rs, Register rd) { 1167 void Assembler::jalr(Register rs, Register rd) {
1155 BlockTrampolinePoolScope block_trampoline_pool(this); 1168 BlockTrampolinePoolScope block_trampoline_pool(this);
1156 positions_recorder()->WriteRecordedPositions(); 1169 positions_recorder()->WriteRecordedPositions();
1157 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); 1170 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1158 BlockTrampolinePoolFor(1); // For associated delay slot. 1171 BlockTrampolinePoolFor(1); // For associated delay slot.
1159 } 1172 }
1160 1173
1161 1174
1162 void Assembler::j_or_jr(int32_t target, Register rs) { 1175 void Assembler::j_or_jr(int64_t target, Register rs) {
1163 // Get pc of delay slot. 1176 // Get pc of delay slot.
1164 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1177 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1165 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1178 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1166 (kImm26Bits + kImmFieldShift)) == 0; 1179 (kImm26Bits + kImmFieldShift)) == 0;
1167 if (in_range) { 1180 if (in_range) {
1168 j(target); 1181 j(target);
1169 } else { 1182 } else {
1170 jr(t9); 1183 jr(t9);
1171 } 1184 }
1172 } 1185 }
1173 1186
1174 1187
1175 void Assembler::jal_or_jalr(int32_t target, Register rs) { 1188 void Assembler::jal_or_jalr(int64_t target, Register rs) {
1176 // Get pc of delay slot. 1189 // Get pc of delay slot.
1177 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); 1190 uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
1178 bool in_range = (ipc ^ static_cast<uint32_t>(target) >> 1191 bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
1179 (kImm26Bits+kImmFieldShift)) == 0; 1192 (kImm26Bits+kImmFieldShift)) == 0;
1180 if (in_range) { 1193 if (in_range) {
1181 jal(target); 1194 jal(target);
1182 } else { 1195 } else {
1183 jalr(t9); 1196 jalr(t9);
1184 } 1197 }
1185 } 1198 }
1186 1199
1187 1200
1188 // -------Data-processing-instructions--------- 1201 // -------Data-processing-instructions---------
(...skipping 23 matching lines...) Expand all
1212 void Assembler::mult(Register rs, Register rt) { 1225 void Assembler::mult(Register rs, Register rt) {
1213 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); 1226 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1214 } 1227 }
1215 1228
1216 1229
1217 void Assembler::multu(Register rs, Register rt) { 1230 void Assembler::multu(Register rs, Register rt) {
1218 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); 1231 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1219 } 1232 }
1220 1233
1221 1234
1235 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1236 GenInstrImmediate(DADDIU, rs, rd, j);
1237 }
1238
1239
1222 void Assembler::div(Register rs, Register rt) { 1240 void Assembler::div(Register rs, Register rt) {
1223 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); 1241 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1224 } 1242 }
1225 1243
1226 1244
1227 void Assembler::divu(Register rs, Register rt) { 1245 void Assembler::divu(Register rs, Register rt) {
1228 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); 1246 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1229 } 1247 }
1230 1248
1231 1249
1250 void Assembler::daddu(Register rd, Register rs, Register rt) {
1251 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1252 }
1253
1254
1255 void Assembler::dsubu(Register rd, Register rs, Register rt) {
1256 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1257 }
1258
1259
1260 void Assembler::dmult(Register rs, Register rt) {
1261 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1262 }
1263
1264
1265 void Assembler::dmultu(Register rs, Register rt) {
1266 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1267 }
1268
1269
1270 void Assembler::ddiv(Register rs, Register rt) {
1271 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1272 }
1273
1274
1275 void Assembler::ddivu(Register rs, Register rt) {
1276 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1277 }
1278
1279
1232 // Logical. 1280 // Logical.
1233 1281
1234 void Assembler::and_(Register rd, Register rs, Register rt) { 1282 void Assembler::and_(Register rd, Register rs, Register rt) {
1235 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); 1283 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1236 } 1284 }
1237 1285
1238 1286
1239 void Assembler::andi(Register rt, Register rs, int32_t j) { 1287 void Assembler::andi(Register rt, Register rs, int32_t j) {
1240 ASSERT(is_uint16(j)); 1288 ASSERT(is_uint16(j));
1241 GenInstrImmediate(ANDI, rs, rt, j); 1289 GenInstrImmediate(ANDI, rs, rt, j);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 1352
1305 1353
1306 void Assembler::srav(Register rd, Register rt, Register rs) { 1354 void Assembler::srav(Register rd, Register rt, Register rs) {
1307 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); 1355 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1308 } 1356 }
1309 1357
1310 1358
1311 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { 1359 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1312 // Should be called via MacroAssembler::Ror. 1360 // Should be called via MacroAssembler::Ror.
1313 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); 1361 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1314 ASSERT(kArchVariant == kMips32r2); 1362 ASSERT(kArchVariant == kMips64r2);
1315 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) 1363 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1316 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; 1364 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1317 emit(instr); 1365 emit(instr);
1318 } 1366 }
1319 1367
1320 1368
1321 void Assembler::rotrv(Register rd, Register rt, Register rs) { 1369 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1322 // Should be called via MacroAssembler::Ror. 1370 // Should be called via MacroAssembler::Ror.
1323 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); 1371 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1324 ASSERT(kArchVariant == kMips32r2); 1372 ASSERT(kArchVariant == kMips64r2);
1325 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) 1373 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1326 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; 1374 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1327 emit(instr); 1375 emit(instr);
1328 } 1376 }
1329 1377
1330 1378
1379 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1380 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
1381 }
1382
1383
1384 void Assembler::dsllv(Register rd, Register rt, Register rs) {
1385 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1386 }
1387
1388
1389 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1390 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
1391 }
1392
1393
1394 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1395 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1396 }
1397
1398
1399 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1400 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1401 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1402 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1403 emit(instr);
1404 }
1405
1406
1407 void Assembler::drotrv(Register rd, Register rt, Register rs) {
1408 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1409 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1410 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1411 emit(instr);
1412 }
1413
1414
1415 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1416 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
1417 }
1418
1419
1420 void Assembler::dsrav(Register rd, Register rt, Register rs) {
1421 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1422 }
1423
1424
1425 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
1426 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
1427 }
1428
1429
1430 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
1431 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
1432 }
1433
1434
1435 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
1436 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
1437 }
1438
1439
1331 // ------------Memory-instructions------------- 1440 // ------------Memory-instructions-------------
1332 1441
1333 // Helper for base-reg + offset, when offset is larger than int16. 1442 // Helper for base-reg + offset, when offset is larger than int16.
1334 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { 1443 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1335 ASSERT(!src.rm().is(at)); 1444 ASSERT(!src.rm().is(at));
1336 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); 1445 ASSERT(is_int32(src.offset_));
1446 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1447 dsll(at, at, kLuiShift);
1337 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. 1448 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1338 addu(at, at, src.rm()); // Add base register. 1449 daddu(at, at, src.rm()); // Add base register.
1339 } 1450 }
1340 1451
1341 1452
1342 void Assembler::lb(Register rd, const MemOperand& rs) { 1453 void Assembler::lb(Register rd, const MemOperand& rs) {
1343 if (is_int16(rs.offset_)) { 1454 if (is_int16(rs.offset_)) {
1344 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); 1455 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1345 } else { // Offset > 16 bits, use multiple instructions to load. 1456 } else { // Offset > 16 bits, use multiple instructions to load.
1346 LoadRegPlusOffsetToAt(rs); 1457 LoadRegPlusOffsetToAt(rs);
1347 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0)); 1458 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1348 } 1459 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1382 void Assembler::lw(Register rd, const MemOperand& rs) { 1493 void Assembler::lw(Register rd, const MemOperand& rs) {
1383 if (is_int16(rs.offset_)) { 1494 if (is_int16(rs.offset_)) {
1384 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); 1495 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1385 } else { // Offset > 16 bits, use multiple instructions to load. 1496 } else { // Offset > 16 bits, use multiple instructions to load.
1386 LoadRegPlusOffsetToAt(rs); 1497 LoadRegPlusOffsetToAt(rs);
1387 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); 1498 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1388 } 1499 }
1389 } 1500 }
1390 1501
1391 1502
1503 void Assembler::lwu(Register rd, const MemOperand& rs) {
1504 if (is_int16(rs.offset_)) {
1505 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1506 } else { // Offset > 16 bits, use multiple instructions to load.
1507 LoadRegPlusOffsetToAt(rs);
1508 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
1509 }
1510 }
1511
1512
1392 void Assembler::lwl(Register rd, const MemOperand& rs) { 1513 void Assembler::lwl(Register rd, const MemOperand& rs) {
1393 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); 1514 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1394 } 1515 }
1395 1516
1396 1517
1397 void Assembler::lwr(Register rd, const MemOperand& rs) { 1518 void Assembler::lwr(Register rd, const MemOperand& rs) {
1398 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); 1519 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1399 } 1520 }
1400 1521
1401 1522
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1438 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); 1559 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1439 } 1560 }
1440 1561
1441 1562
1442 void Assembler::lui(Register rd, int32_t j) { 1563 void Assembler::lui(Register rd, int32_t j) {
1443 ASSERT(is_uint16(j)); 1564 ASSERT(is_uint16(j));
1444 GenInstrImmediate(LUI, zero_reg, rd, j); 1565 GenInstrImmediate(LUI, zero_reg, rd, j);
1445 } 1566 }
1446 1567
1447 1568
1569 void Assembler::ldl(Register rd, const MemOperand& rs) {
1570 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
1571 }
1572
1573
1574 void Assembler::ldr(Register rd, const MemOperand& rs) {
1575 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
1576 }
1577
1578
1579 void Assembler::sdl(Register rd, const MemOperand& rs) {
1580 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
1581 }
1582
1583
1584 void Assembler::sdr(Register rd, const MemOperand& rs) {
1585 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
1586 }
1587
1588
1589 void Assembler::ld(Register rd, const MemOperand& rs) {
1590 if (is_int16(rs.offset_)) {
1591 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
1592 } else { // Offset > 16 bits, use multiple instructions to load.
1593 LoadRegPlusOffsetToAt(rs);
1594 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1595 }
1596 }
1597
1598
1599 void Assembler::sd(Register rd, const MemOperand& rs) {
1600 if (is_int16(rs.offset_)) {
1601 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
1602 } else { // Offset > 16 bits, use multiple instructions to store.
1603 LoadRegPlusOffsetToAt(rs);
1604 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1605 }
1606 }
1607
1608
1448 // -------------Misc-instructions-------------- 1609 // -------------Misc-instructions--------------
1449 1610
1450 // Break / Trap instructions. 1611 // Break / Trap instructions.
1451 void Assembler::break_(uint32_t code, bool break_as_stop) { 1612 void Assembler::break_(uint32_t code, bool break_as_stop) {
1452 ASSERT((code & ~0xfffff) == 0); 1613 ASSERT((code & ~0xfffff) == 0);
1453 // We need to invalidate breaks that could be stops as well because the 1614 // We need to invalidate breaks that could be stops as well because the
1454 // simulator expects a char pointer after the stop instruction. 1615 // simulator expects a char pointer after the stop instruction.
1455 // See constants-mips.h for explanation. 1616 // See constants-mips.h for explanation.
1456 ASSERT((break_as_stop && 1617 ASSERT((break_as_stop &&
1457 code <= kMaxStopCode && 1618 code <= kMaxStopCode &&
1458 code > kMaxWatchpointCode) || 1619 code > kMaxWatchpointCode) ||
1459 (!break_as_stop && 1620 (!break_as_stop &&
1460 (code > kMaxStopCode || 1621 (code > kMaxStopCode ||
1461 code <= kMaxWatchpointCode))); 1622 code <= kMaxWatchpointCode)));
1462 Instr break_instr = SPECIAL | BREAK | (code << 6); 1623 Instr break_instr = SPECIAL | BREAK | (code << 6);
1463 emit(break_instr); 1624 emit(break_instr);
1464 } 1625 }
1465 1626
1466 1627
1467 void Assembler::stop(const char* msg, uint32_t code) { 1628 void Assembler::stop(const char* msg, uint32_t code) {
1468 ASSERT(code > kMaxWatchpointCode); 1629 ASSERT(code > kMaxWatchpointCode);
1469 ASSERT(code <= kMaxStopCode); 1630 ASSERT(code <= kMaxStopCode);
1470 #if V8_HOST_ARCH_MIPS 1631 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
1471 break_(0x54321); 1632 break_(0x54321);
1472 #else // V8_HOST_ARCH_MIPS 1633 #else // V8_HOST_ARCH_MIPS
1473 BlockTrampolinePoolFor(2); 1634 BlockTrampolinePoolFor(3);
1474 // The Simulator will handle the stop instruction and get the message address. 1635 // The Simulator will handle the stop instruction and get the message address.
1475 // On MIPS stop() is just a special kind of break_(). 1636 // On MIPS stop() is just a special kind of break_().
1476 break_(code, true); 1637 break_(code, true);
1477 emit(reinterpret_cast<Instr>(msg)); 1638 emit(reinterpret_cast<uint64_t>(msg));
1478 #endif 1639 #endif
1479 } 1640 }
1480 1641
1481 1642
1482 void Assembler::tge(Register rs, Register rt, uint16_t code) { 1643 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1483 ASSERT(is_uint10(code)); 1644 ASSERT(is_uint10(code));
1484 Instr instr = SPECIAL | TGE | rs.code() << kRsShift 1645 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1485 | rt.code() << kRtShift | code << 6; 1646 | rt.code() << kRtShift | code << 6;
1486 emit(instr); 1647 emit(instr);
1487 } 1648 }
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
1589 // Bit twiddling. 1750 // Bit twiddling.
1590 void Assembler::clz(Register rd, Register rs) { 1751 void Assembler::clz(Register rd, Register rs) {
1591 // Clz instr requires same GPR number in 'rd' and 'rt' fields. 1752 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1592 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); 1753 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1593 } 1754 }
1594 1755
1595 1756
1596 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1757 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1597 // Should be called via MacroAssembler::Ins. 1758 // Should be called via MacroAssembler::Ins.
1598 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. 1759 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1599 ASSERT(kArchVariant == kMips32r2); 1760 ASSERT(kArchVariant == kMips64r2);
1600 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); 1761 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1601 } 1762 }
1602 1763
1603 1764
1604 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { 1765 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1605 // Should be called via MacroAssembler::Ext. 1766 // Should be called via MacroAssembler::Ext.
1606 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. 1767 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1607 ASSERT(kArchVariant == kMips32r2); 1768 ASSERT(kArchVariant == kMips64r2);
1608 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); 1769 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1609 } 1770 }
1610 1771
1611 1772
1612 void Assembler::pref(int32_t hint, const MemOperand& rs) { 1773 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1613 ASSERT(kArchVariant != kLoongson); 1774 ASSERT(kArchVariant != kLoongson);
1614 ASSERT(is_uint5(hint) && is_uint16(rs.offset_)); 1775 ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
1615 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) 1776 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1616 | (rs.offset_); 1777 | (rs.offset_);
1617 emit(instr); 1778 emit(instr);
1618 } 1779 }
1619 1780
1620 1781
1621 // --------Coprocessor-instructions---------------- 1782 // --------Coprocessor-instructions----------------
1622 1783
1623 // Load, store, move. 1784 // Load, store, move.
1624 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { 1785 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1625 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); 1786 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1626 } 1787 }
1627 1788
1628 1789
1629 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { 1790 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1630 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1791 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
1631 // load to two 32-bit loads.
1632 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1633 Register::kMantissaOffset);
1634 FPURegister nextfpreg;
1635 nextfpreg.setcode(fd.code() + 1);
1636 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1637 Register::kExponentOffset);
1638 } 1792 }
1639 1793
1640 1794
1641 void Assembler::swc1(FPURegister fd, const MemOperand& src) { 1795 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1642 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); 1796 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1643 } 1797 }
1644 1798
1645 1799
1646 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { 1800 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1647 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 1801 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
1648 // store to two 32-bit stores.
1649 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1650 Register::kMantissaOffset);
1651 FPURegister nextfpreg;
1652 nextfpreg.setcode(fd.code() + 1);
1653 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
1654 Register::kExponentOffset);
1655 } 1802 }
1656 1803
1657 1804
1658 void Assembler::mtc1(Register rt, FPURegister fs) { 1805 void Assembler::mtc1(Register rt, FPURegister fs) {
1659 GenInstrRegister(COP1, MTC1, rt, fs, f0); 1806 GenInstrRegister(COP1, MTC1, rt, fs, f0);
1660 } 1807 }
1661 1808
1662 1809
1810 void Assembler::mthc1(Register rt, FPURegister fs) {
1811 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
1812 }
1813
1814
1815 void Assembler::dmtc1(Register rt, FPURegister fs) {
1816 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
1817 }
1818
1819
1663 void Assembler::mfc1(Register rt, FPURegister fs) { 1820 void Assembler::mfc1(Register rt, FPURegister fs) {
1664 GenInstrRegister(COP1, MFC1, rt, fs, f0); 1821 GenInstrRegister(COP1, MFC1, rt, fs, f0);
1665 } 1822 }
1666 1823
1667 1824
1825 void Assembler::mfhc1(Register rt, FPURegister fs) {
1826 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
1827 }
1828
1829
1830 void Assembler::dmfc1(Register rt, FPURegister fs) {
1831 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
1832 }
1833
1834
1668 void Assembler::ctc1(Register rt, FPUControlRegister fs) { 1835 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1669 GenInstrRegister(COP1, CTC1, rt, fs); 1836 GenInstrRegister(COP1, CTC1, rt, fs);
1670 } 1837 }
1671 1838
1672 1839
1673 void Assembler::cfc1(Register rt, FPUControlRegister fs) { 1840 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1674 GenInstrRegister(COP1, CFC1, rt, fs); 1841 GenInstrRegister(COP1, CFC1, rt, fs);
1675 } 1842 }
1676 1843
1677 1844
(...skipping 18 matching lines...) Expand all
1696 } 1863 }
1697 1864
1698 1865
1699 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { 1866 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1700 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D); 1867 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1701 } 1868 }
1702 1869
1703 1870
1704 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, 1871 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1705 FPURegister ft) { 1872 FPURegister ft) {
1873 ASSERT(kArchVariant != kLoongson);
1706 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); 1874 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
1707 } 1875 }
1708 1876
1709 1877
1710 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { 1878 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1711 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); 1879 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1712 } 1880 }
1713 1881
1714 1882
1715 void Assembler::abs_d(FPURegister fd, FPURegister fs) { 1883 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1778 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); 1946 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1779 } 1947 }
1780 1948
1781 1949
1782 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { 1950 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1783 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); 1951 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1784 } 1952 }
1785 1953
1786 1954
1787 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { 1955 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1788 ASSERT(kArchVariant == kMips32r2); 1956 ASSERT(kArchVariant == kMips64r2);
1789 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); 1957 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1790 } 1958 }
1791 1959
1792 1960
1793 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { 1961 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1794 ASSERT(kArchVariant == kMips32r2); 1962 ASSERT(kArchVariant == kMips64r2);
1795 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); 1963 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1796 } 1964 }
1797 1965
1798 1966
1799 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { 1967 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1800 ASSERT(kArchVariant == kMips32r2); 1968 ASSERT(kArchVariant == kMips64r2);
1801 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); 1969 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1802 } 1970 }
1803 1971
1804 1972
1805 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { 1973 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1806 ASSERT(kArchVariant == kMips32r2); 1974 ASSERT(kArchVariant == kMips64r2);
1807 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); 1975 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1808 } 1976 }
1809 1977
1810 1978
1811 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { 1979 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1812 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); 1980 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1813 } 1981 }
1814 1982
1815 1983
1816 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { 1984 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
(...skipping 20 matching lines...) Expand all
1837 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); 2005 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1838 } 2006 }
1839 2007
1840 2008
1841 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { 2009 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1842 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); 2010 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1843 } 2011 }
1844 2012
1845 2013
1846 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { 2014 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1847 ASSERT(kArchVariant == kMips32r2); 2015 ASSERT(kArchVariant == kMips64r2);
1848 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); 2016 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1849 } 2017 }
1850 2018
1851 2019
1852 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { 2020 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1853 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); 2021 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1854 } 2022 }
1855 2023
1856 2024
1857 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { 2025 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1858 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); 2026 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1859 } 2027 }
1860 2028
1861 2029
1862 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { 2030 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1863 ASSERT(kArchVariant == kMips32r2); 2031 ASSERT(kArchVariant == kMips64r2);
1864 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); 2032 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1865 } 2033 }
1866 2034
1867 2035
1868 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { 2036 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1869 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); 2037 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1870 } 2038 }
1871 2039
1872 2040
1873 // Conditions. 2041 // Conditions.
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1926 } 2094 }
1927 } 2095 }
1928 2096
1929 2097
1930 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { 2098 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1931 Instr instr = instr_at(pc); 2099 Instr instr = instr_at(pc);
1932 ASSERT(IsJ(instr) || IsLui(instr)); 2100 ASSERT(IsJ(instr) || IsLui(instr));
1933 if (IsLui(instr)) { 2101 if (IsLui(instr)) {
1934 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); 2102 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1935 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); 2103 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
2104 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
1936 ASSERT(IsOri(instr_ori)); 2105 ASSERT(IsOri(instr_ori));
1937 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; 2106 ASSERT(IsOri(instr_ori2));
1938 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); 2107 // TODO(plind): symbolic names for the shifts.
2108 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
2109 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
2110 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
2111 // Sign extend address.
2112 imm >>= 16;
2113
1939 if (imm == kEndOfJumpChain) { 2114 if (imm == kEndOfJumpChain) {
1940 return 0; // Number of instructions patched. 2115 return 0; // Number of instructions patched.
1941 } 2116 }
1942 imm += pc_delta; 2117 imm += pc_delta;
1943 ASSERT((imm & 3) == 0); 2118 ASSERT((imm & 3) == 0);
1944 2119
1945 instr_lui &= ~kImm16Mask; 2120 instr_lui &= ~kImm16Mask;
1946 instr_ori &= ~kImm16Mask; 2121 instr_ori &= ~kImm16Mask;
2122 instr_ori2 &= ~kImm16Mask;
1947 2123
1948 instr_at_put(pc + 0 * Assembler::kInstrSize, 2124 instr_at_put(pc + 0 * Assembler::kInstrSize,
1949 instr_lui | ((imm >> kLuiShift) & kImm16Mask)); 2125 instr_lui | ((imm >> 32) & kImm16Mask));
1950 instr_at_put(pc + 1 * Assembler::kInstrSize, 2126 instr_at_put(pc + 1 * Assembler::kInstrSize,
1951 instr_ori | (imm & kImm16Mask)); 2127 instr_ori | (imm >> 16 & kImm16Mask));
1952 return 2; // Number of instructions patched. 2128 instr_at_put(pc + 3 * Assembler::kInstrSize,
2129 instr_ori2 | (imm & kImm16Mask));
2130 return 4; // Number of instructions patched.
1953 } else { 2131 } else {
1954 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; 2132 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1955 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) { 2133 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1956 return 0; // Number of instructions patched. 2134 return 0; // Number of instructions patched.
1957 } 2135 }
2136
1958 imm28 += pc_delta; 2137 imm28 += pc_delta;
1959 imm28 &= kImm28Mask; 2138 imm28 &= kImm28Mask;
1960 ASSERT((imm28 & 3) == 0); 2139 ASSERT((imm28 & 3) == 0);
1961 2140
1962 instr &= ~kImm26Mask; 2141 instr &= ~kImm26Mask;
1963 uint32_t imm26 = imm28 >> 2; 2142 uint32_t imm26 = imm28 >> 2;
1964 ASSERT(is_uint26(imm26)); 2143 ASSERT(is_uint26(imm26));
1965 2144
1966 instr_at_put(pc, instr | (imm26 & kImm26Mask)); 2145 instr_at_put(pc, instr | (imm26 & kImm26Mask));
1967 return 1; // Number of instructions patched. 2146 return 1; // Number of instructions patched.
(...skipping 15 matching lines...) Expand all
1983 } 2162 }
1984 CHECK_GT(desc.buffer_size, 0); // No overflow. 2163 CHECK_GT(desc.buffer_size, 0); // No overflow.
1985 2164
1986 // Set up new buffer. 2165 // Set up new buffer.
1987 desc.buffer = NewArray<byte>(desc.buffer_size); 2166 desc.buffer = NewArray<byte>(desc.buffer_size);
1988 2167
1989 desc.instr_size = pc_offset(); 2168 desc.instr_size = pc_offset();
1990 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); 2169 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1991 2170
1992 // Copy the data. 2171 // Copy the data.
1993 int pc_delta = desc.buffer - buffer_; 2172 intptr_t pc_delta = desc.buffer - buffer_;
1994 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); 2173 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2174 (buffer_ + buffer_size_);
1995 MemMove(desc.buffer, buffer_, desc.instr_size); 2175 MemMove(desc.buffer, buffer_, desc.instr_size);
1996 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), 2176 MemMove(reloc_info_writer.pos() + rc_delta,
1997 desc.reloc_size); 2177 reloc_info_writer.pos(), desc.reloc_size);
1998 2178
1999 // Switch buffers. 2179 // Switch buffers.
2000 DeleteArray(buffer_); 2180 DeleteArray(buffer_);
2001 buffer_ = desc.buffer; 2181 buffer_ = desc.buffer;
2002 buffer_size_ = desc.buffer_size; 2182 buffer_size_ = desc.buffer_size;
2003 pc_ += pc_delta; 2183 pc_ += pc_delta;
2004 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, 2184 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2005 reloc_info_writer.last_pc() + pc_delta); 2185 reloc_info_writer.last_pc() + pc_delta);
2006 2186
2007 // Relocate runtime entries. 2187 // Relocate runtime entries.
(...skipping 18 matching lines...) Expand all
2026 2206
2027 void Assembler::dd(uint32_t data) { 2207 void Assembler::dd(uint32_t data) {
2028 CheckBuffer(); 2208 CheckBuffer();
2029 *reinterpret_cast<uint32_t*>(pc_) = data; 2209 *reinterpret_cast<uint32_t*>(pc_) = data;
2030 pc_ += sizeof(uint32_t); 2210 pc_ += sizeof(uint32_t);
2031 } 2211 }
2032 2212
2033 2213
2034 void Assembler::emit_code_stub_address(Code* stub) { 2214 void Assembler::emit_code_stub_address(Code* stub) {
2035 CheckBuffer(); 2215 CheckBuffer();
2036 *reinterpret_cast<uint32_t*>(pc_) = 2216 *reinterpret_cast<uint64_t*>(pc_) =
2037 reinterpret_cast<uint32_t>(stub->instruction_start()); 2217 reinterpret_cast<uint64_t>(stub->instruction_start());
2038 pc_ += sizeof(uint32_t); 2218 pc_ += sizeof(uint64_t);
2039 } 2219 }
2040 2220
2041 2221
2042 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2222 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2043 // We do not try to reuse pool constants. 2223 // We do not try to reuse pool constants.
2044 RelocInfo rinfo(pc_, rmode, data, NULL); 2224 RelocInfo rinfo(pc_, rmode, data, NULL);
2045 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { 2225 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2046 // Adjust code for new modes. 2226 // Adjust code for new modes.
2047 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) 2227 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2048 || RelocInfo::IsJSReturn(rmode) 2228 || RelocInfo::IsJSReturn(rmode)
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
2098 ASSERT(unbound_labels_count_ >= 0); 2278 ASSERT(unbound_labels_count_ >= 0);
2099 if (unbound_labels_count_ > 0) { 2279 if (unbound_labels_count_ > 0) {
2100 // First we emit jump (2 instructions), then we emit trampoline pool. 2280 // First we emit jump (2 instructions), then we emit trampoline pool.
2101 { BlockTrampolinePoolScope block_trampoline_pool(this); 2281 { BlockTrampolinePoolScope block_trampoline_pool(this);
2102 Label after_pool; 2282 Label after_pool;
2103 b(&after_pool); 2283 b(&after_pool);
2104 nop(); 2284 nop();
2105 2285
2106 int pool_start = pc_offset(); 2286 int pool_start = pc_offset();
2107 for (int i = 0; i < unbound_labels_count_; i++) { 2287 for (int i = 0; i < unbound_labels_count_; i++) {
2108 uint32_t imm32; 2288 uint64_t imm64;
2109 imm32 = jump_address(&after_pool); 2289 imm64 = jump_address(&after_pool);
2110 { BlockGrowBufferScope block_buf_growth(this); 2290 { BlockGrowBufferScope block_buf_growth(this);
2111 // Buffer growth (and relocation) must be blocked for internal 2291 // Buffer growth (and relocation) must be blocked for internal
2112 // references until associated instructions are emitted and available 2292 // references until associated instructions are emitted and available
2113 // to be patched. 2293 // to be patched.
2114 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 2294 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2115 lui(at, (imm32 & kHiMask) >> kLuiShift); 2295 // TODO(plind): Verify this, presume I cannot use macro-assembler
2116 ori(at, at, (imm32 & kImm16Mask)); 2296 // here.
2297 lui(at, (imm64 >> 32) & kImm16Mask);
2298 ori(at, at, (imm64 >> 16) & kImm16Mask);
2299 dsll(at, at, 16);
2300 ori(at, at, imm64 & kImm16Mask);
2117 } 2301 }
2118 jr(at); 2302 jr(at);
2119 nop(); 2303 nop();
2120 } 2304 }
2121 bind(&after_pool); 2305 bind(&after_pool);
2122 trampoline_ = Trampoline(pool_start, unbound_labels_count_); 2306 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2123 2307
2124 trampoline_emitted_ = true; 2308 trampoline_emitted_ = true;
2125 // As we are only going to emit trampoline once, we need to prevent any 2309 // As we are only going to emit trampoline once, we need to prevent any
2126 // further emission. 2310 // further emission.
2127 next_buffer_check_ = kMaxInt; 2311 next_buffer_check_ = kMaxInt;
2128 } 2312 }
2129 } else { 2313 } else {
2130 // Number of branches to unbound label at this point is zero, so we can 2314 // Number of branches to unbound label at this point is zero, so we can
2131 // move next buffer check to maximum. 2315 // move next buffer check to maximum.
2132 next_buffer_check_ = pc_offset() + 2316 next_buffer_check_ = pc_offset() +
2133 kMaxBranchOffset - kTrampolineSlotsSize * 16; 2317 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2134 } 2318 }
2135 return; 2319 return;
2136 } 2320 }
2137 2321
2138 2322
2139 Address Assembler::target_address_at(Address pc) { 2323 Address Assembler::target_address_at(Address pc) {
2140 Instr instr1 = instr_at(pc); 2324 Instr instr0 = instr_at(pc);
2141 Instr instr2 = instr_at(pc + kInstrSize); 2325 Instr instr1 = instr_at(pc + 1 * kInstrSize);
2142 // Interpret 2 instructions generated by li: lui/ori 2326 Instr instr3 = instr_at(pc + 3 * kInstrSize);
2143 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { 2327
2144 // Assemble the 32 bit value. 2328 // Interpret 4 instructions for address generated by li: See listing in
2145 return reinterpret_cast<Address>( 2329 // Assembler::set_target_address_at() just below.
2146 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2)); 2330 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
2331 (GetOpcodeField(instr3) == ORI)) {
2332 // Assemble the 48 bit value.
2333 int64_t addr = static_cast<int64_t>(
2334 ((uint64_t)(GetImmediate16(instr0)) << 32) |
2335 ((uint64_t)(GetImmediate16(instr1)) << 16) |
2336 ((uint64_t)(GetImmediate16(instr3))));
2337
2338 // Sign extend to get canonical address.
2339 addr = (addr << 16) >> 16;
2340 return reinterpret_cast<Address>(addr);
2147 } 2341 }
2148
2149 // We should never get here, force a bad address if we do. 2342 // We should never get here, force a bad address if we do.
2150 UNREACHABLE(); 2343 UNREACHABLE();
2151 return (Address)0x0; 2344 return (Address)0x0;
2152 } 2345 }
2153 2346
2154 2347
2155 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32 2348 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2156 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap 2349 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2157 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted. 2350 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2158 // OS::nan_value() returns a qNaN. 2351 // OS::nan_value() returns a qNaN.
2159 void Assembler::QuietNaN(HeapObject* object) { 2352 void Assembler::QuietNaN(HeapObject* object) {
2160 HeapNumber::cast(object)->set_value(base::OS::nan_value()); 2353 HeapNumber::cast(object)->set_value(base::OS::nan_value());
2161 } 2354 }
2162 2355
2163 2356
2164 // On Mips, a target address is stored in a lui/ori instruction pair, each 2357 // On Mips64, a target address is stored in a 4-instruction sequence:
2165 // of which load 16 bits of the 32-bit address to a register. 2358 // 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
2166 // Patching the address must replace both instr, and flush the i-cache. 2359 // 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
2360 // 2: dsll(rd, rd, 16);
2361 // 3: ori(rd, rd, j.imm32_ & kImm16Mask);
2362 //
2363 // Patching the address must replace all the lui & ori instructions,
2364 // and flush the i-cache.
2167 // 2365 //
2168 // There is an optimization below, which emits a nop when the address 2366 // There is an optimization below, which emits a nop when the address
2169 // fits in just 16 bits. This is unlikely to help, and should be benchmarked, 2367 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2170 // and possibly removed. 2368 // and possibly removed.
2171 void Assembler::set_target_address_at(Address pc, 2369 void Assembler::set_target_address_at(Address pc,
2172 Address target, 2370 Address target,
2173 ICacheFlushMode icache_flush_mode) { 2371 ICacheFlushMode icache_flush_mode) {
2174 Instr instr2 = instr_at(pc + kInstrSize); 2372 // There is an optimization where only 4 instructions are used to load address
2175 uint32_t rt_code = GetRtField(instr2); 2373 // in code on MIP64 because only 48-bits of address is effectively used.
2374 // It relies on fact the upper [63:48] bits are not used for virtual address
2375 // translation and they have to be set according to value of bit 47 in order
2376 // get canonical address.
2377 Instr instr1 = instr_at(pc + kInstrSize);
2378 uint32_t rt_code = GetRt(instr1);
2176 uint32_t* p = reinterpret_cast<uint32_t*>(pc); 2379 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2177 uint32_t itarget = reinterpret_cast<uint32_t>(target); 2380 uint64_t itarget = reinterpret_cast<uint64_t>(target);
2178 2381
2179 #ifdef DEBUG 2382 #ifdef DEBUG
2180 // Check we have the result from a li macro-instruction, using instr pair. 2383 // Check we have the result from a li macro-instruction.
2181 Instr instr1 = instr_at(pc); 2384 Instr instr0 = instr_at(pc);
2182 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); 2385 Instr instr3 = instr_at(pc + kInstrSize * 3);
2386 CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
2387 GetOpcodeField(instr3) == ORI));
2183 #endif 2388 #endif
2184 2389
2185 // Must use 2 instructions to insure patchable code => just use lui and ori. 2390 // Must use 4 instructions to insure patchable code.
2186 // lui rt, upper-16. 2391 // lui rt, upper-16.
2392 // ori rt, rt, lower-16.
2393 // dsll rt, rt, 16.
2187 // ori rt rt, lower-16. 2394 // ori rt rt, lower-16.
2188 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); 2395 *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
2189 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); 2396 *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
2190 2397 | ((itarget >> 16) & kImm16Mask);
2191 // The following code is an optimization for the common case of Call() 2398 *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
2192 // or Jump() which is load to register, and jump through register: 2399 | (itarget & kImm16Mask);
2193 // li(t9, address); jalr(t9) (or jr(t9)).
2194 // If the destination address is in the same 256 MB page as the call, it
2195 // is faster to do a direct jal, or j, rather than jump thru register, since
2196 // that lets the cpu pipeline prefetch the target address. However each
2197 // time the address above is patched, we have to patch the direct jal/j
2198 // instruction, as well as possibly revert to jalr/jr if we now cross a
2199 // 256 MB page. Note that with the jal/j instructions, we do not need to
2200 // load the register, but that code is left, since it makes it easy to
2201 // revert this process. A further optimization could try replacing the
2202 // li sequence with nops.
2203 // This optimization can only be applied if the rt-code from instr2 is the
2204 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2205 // mips return. Occasionally this lands after an li().
2206
2207 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2208 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2209 bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2210 uint32_t target_field =
2211 static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2212 bool patched_jump = false;
2213
2214 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2215 // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2216 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2217 // apply this workaround for all cores so we don't have to identify the core.
2218 if (in_range) {
2219 // The 24k core E156 bug has some very specific requirements, we only check
2220 // the most simple one: if the address of the delay slot instruction is in
2221 // the first or last 32 KB of the 256 MB segment.
2222 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2223 uint32_t ipc_segment_addr = ipc & segment_mask;
2224 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2225 in_range = false;
2226 }
2227 #endif
2228
2229 if (IsJalr(instr3)) {
2230 // Try to convert JALR to JAL.
2231 if (in_range && GetRt(instr2) == GetRs(instr3)) {
2232 *(p+2) = JAL | target_field;
2233 patched_jump = true;
2234 }
2235 } else if (IsJr(instr3)) {
2236 // Try to convert JR to J, skip returns (jr ra).
2237 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2238 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2239 *(p+2) = J | target_field;
2240 patched_jump = true;
2241 }
2242 } else if (IsJal(instr3)) {
2243 if (in_range) {
2244 // We are patching an already converted JAL.
2245 *(p+2) = JAL | target_field;
2246 } else {
2247 // Patch JAL, but out of range, revert to JALR.
2248 // JALR rs reg is the rt reg specified in the ORI instruction.
2249 uint32_t rs_field = GetRt(instr2) << kRsShift;
2250 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2251 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2252 }
2253 patched_jump = true;
2254 } else if (IsJ(instr3)) {
2255 if (in_range) {
2256 // We are patching an already converted J (jump).
2257 *(p+2) = J | target_field;
2258 } else {
2259 // Trying patch J, but out of range, just go back to JR.
2260 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2261 uint32_t rs_field = GetRt(instr2) << kRsShift;
2262 *(p+2) = SPECIAL | rs_field | JR;
2263 }
2264 patched_jump = true;
2265 }
2266 2400
2267 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { 2401 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2268 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); 2402 CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
2269 } 2403 }
2270 } 2404 }
2271 2405
2272 2406
2273 void Assembler::JumpLabelToJumpRegister(Address pc) { 2407 void Assembler::JumpLabelToJumpRegister(Address pc) {
2274 // Address pc points to lui/ori instructions. 2408 // Address pc points to lui/ori instructions.
2275 // Jump to label may follow at pc + 2 * kInstrSize. 2409 // Jump to label may follow at pc + 2 * kInstrSize.
2276 uint32_t* p = reinterpret_cast<uint32_t*>(pc); 2410 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2277 #ifdef DEBUG 2411 #ifdef DEBUG
2278 Instr instr1 = instr_at(pc); 2412 Instr instr1 = instr_at(pc);
2279 #endif 2413 #endif
2280 Instr instr2 = instr_at(pc + 1 * kInstrSize); 2414 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2281 Instr instr3 = instr_at(pc + 2 * kInstrSize); 2415 Instr instr3 = instr_at(pc + 6 * kInstrSize);
2282 bool patched = false; 2416 bool patched = false;
2283 2417
2284 if (IsJal(instr3)) { 2418 if (IsJal(instr3)) {
2285 ASSERT(GetOpcodeField(instr1) == LUI); 2419 ASSERT(GetOpcodeField(instr1) == LUI);
2286 ASSERT(GetOpcodeField(instr2) == ORI); 2420 ASSERT(GetOpcodeField(instr2) == ORI);
2287 2421
2288 uint32_t rs_field = GetRt(instr2) << kRsShift; 2422 uint32_t rs_field = GetRt(instr2) << kRsShift;
2289 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. 2423 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2290 *(p+2) = SPECIAL | rs_field | rd_field | JALR; 2424 *(p+6) = SPECIAL | rs_field | rd_field | JALR;
2291 patched = true; 2425 patched = true;
2292 } else if (IsJ(instr3)) { 2426 } else if (IsJ(instr3)) {
2293 ASSERT(GetOpcodeField(instr1) == LUI); 2427 ASSERT(GetOpcodeField(instr1) == LUI);
2294 ASSERT(GetOpcodeField(instr2) == ORI); 2428 ASSERT(GetOpcodeField(instr2) == ORI);
2295 2429
2296 uint32_t rs_field = GetRt(instr2) << kRsShift; 2430 uint32_t rs_field = GetRt(instr2) << kRsShift;
2297 *(p+2) = SPECIAL | rs_field | JR; 2431 *(p+6) = SPECIAL | rs_field | JR;
2298 patched = true; 2432 patched = true;
2299 } 2433 }
2300 2434
2301 if (patched) { 2435 if (patched) {
2302 CpuFeatures::FlushICache(pc+2, sizeof(Address)); 2436 CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
2303 } 2437 }
2304 } 2438 }
2305 2439
2306 2440
2307 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { 2441 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2308 // No out-of-line constant pool support. 2442 // No out-of-line constant pool support.
2309 ASSERT(!FLAG_enable_ool_constant_pool); 2443 ASSERT(!FLAG_enable_ool_constant_pool);
2310 return isolate->factory()->empty_constant_pool_array(); 2444 return isolate->factory()->empty_constant_pool_array();
2311 } 2445 }
2312 2446
2313 2447
2314 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 2448 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2315 // No out-of-line constant pool support. 2449 // No out-of-line constant pool support.
2316 ASSERT(!FLAG_enable_ool_constant_pool); 2450 ASSERT(!FLAG_enable_ool_constant_pool);
2317 return; 2451 return;
2318 } 2452 }
2319 2453
2320 2454
2321 } } // namespace v8::internal 2455 } } // namespace v8::internal
2322 2456
2323 #endif // V8_TARGET_ARCH_MIPS 2457 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/assembler-mips64.h ('k') | src/mips64/assembler-mips64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698