OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #ifndef RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
6 #define RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
7 | |
8 #ifndef RUNTIME_VM_ASSEMBLER_H_ | |
9 #error Do not include assembler_mips.h directly; use assembler.h instead. | |
10 #endif | |
11 | |
12 #include "platform/assert.h" | |
13 #include "platform/utils.h" | |
14 #include "vm/constants_mips.h" | |
15 #include "vm/hash_map.h" | |
16 #include "vm/object.h" | |
17 #include "vm/simulator.h" | |
18 | |
19 // References to documentation in this file refer to: | |
20 // "MIPS® Architecture For Programmers Volume I-A: | |
21 // Introduction to the MIPS32® Architecture" in short "VolI-A" | |
22 // and | |
23 // "MIPS® Architecture For Programmers Volume II-A: | |
24 // The MIPS32® Instruction Set" in short "VolII-A" | |
25 namespace dart { | |
26 | |
27 // Forward declarations. | |
28 class RuntimeEntry; | |
29 class StubEntry; | |
30 | |
31 class Immediate : public ValueObject { | |
32 public: | |
33 explicit Immediate(int32_t value) : value_(value) {} | |
34 | |
35 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {} | |
36 Immediate& operator=(const Immediate& other) { | |
37 value_ = other.value_; | |
38 return *this; | |
39 } | |
40 | |
41 private: | |
42 int32_t value_; | |
43 | |
44 int32_t value() const { return value_; } | |
45 | |
46 friend class Assembler; | |
47 }; | |
48 | |
49 | |
50 class Address : public ValueObject { | |
51 public: | |
52 explicit Address(Register base, int32_t offset = 0) | |
53 : ValueObject(), base_(base), offset_(offset) {} | |
54 | |
55 // This addressing mode does not exist. | |
56 Address(Register base, Register offset); | |
57 | |
58 Address(const Address& other) | |
59 : ValueObject(), base_(other.base_), offset_(other.offset_) {} | |
60 Address& operator=(const Address& other) { | |
61 base_ = other.base_; | |
62 offset_ = other.offset_; | |
63 return *this; | |
64 } | |
65 | |
66 uint32_t encoding() const { | |
67 ASSERT(Utils::IsInt(kImmBits, offset_)); | |
68 uint16_t imm_value = static_cast<uint16_t>(offset_); | |
69 return (base_ << kRsShift) | imm_value; | |
70 } | |
71 | |
72 static bool CanHoldOffset(int32_t offset) { | |
73 return Utils::IsInt(kImmBits, offset); | |
74 } | |
75 | |
76 Register base() const { return base_; } | |
77 int32_t offset() const { return offset_; } | |
78 | |
79 private: | |
80 Register base_; | |
81 int32_t offset_; | |
82 }; | |
83 | |
84 | |
85 class FieldAddress : public Address { | |
86 public: | |
87 FieldAddress(Register base, int32_t disp) | |
88 : Address(base, disp - kHeapObjectTag) {} | |
89 | |
90 FieldAddress(const FieldAddress& other) : Address(other) {} | |
91 | |
92 FieldAddress& operator=(const FieldAddress& other) { | |
93 Address::operator=(other); | |
94 return *this; | |
95 } | |
96 }; | |
97 | |
98 | |
99 class Label : public ValueObject { | |
100 public: | |
101 Label() : position_(0) {} | |
102 | |
103 ~Label() { | |
104 // Assert if label is being destroyed with unresolved branches pending. | |
105 ASSERT(!IsLinked()); | |
106 } | |
107 | |
108 // Returns the position for bound and linked labels. Cannot be used | |
109 // for unused labels. | |
110 intptr_t Position() const { | |
111 ASSERT(!IsUnused()); | |
112 return IsBound() ? -position_ - kWordSize : position_ - kWordSize; | |
113 } | |
114 | |
115 bool IsBound() const { return position_ < 0; } | |
116 bool IsUnused() const { return position_ == 0; } | |
117 bool IsLinked() const { return position_ > 0; } | |
118 | |
119 private: | |
120 intptr_t position_; | |
121 | |
122 void Reinitialize() { position_ = 0; } | |
123 | |
124 void BindTo(intptr_t position) { | |
125 ASSERT(!IsBound()); | |
126 position_ = -position - kWordSize; | |
127 ASSERT(IsBound()); | |
128 } | |
129 | |
130 void LinkTo(intptr_t position) { | |
131 ASSERT(!IsBound()); | |
132 position_ = position + kWordSize; | |
133 ASSERT(IsLinked()); | |
134 } | |
135 | |
136 friend class Assembler; | |
137 DISALLOW_COPY_AND_ASSIGN(Label); | |
138 }; | |
139 | |
140 | |
141 // There is no dedicated status register on MIPS, but Condition values are used | |
142 // and passed around by the intermediate language, so we need a Condition type. | |
143 // We delay code generation of a comparison that would result in a traditional | |
144 // condition code in the status register by keeping both register operands and | |
145 // the relational operator between them as the Condition. | |
146 class Condition : public ValueObject { | |
147 public: | |
148 enum Bits { | |
149 kLeftPos = 0, | |
150 kLeftSize = 6, | |
151 kRightPos = kLeftPos + kLeftSize, | |
152 kRightSize = 6, | |
153 kRelOpPos = kRightPos + kRightSize, | |
154 kRelOpSize = 4, | |
155 kImmPos = kRelOpPos + kRelOpSize, | |
156 kImmSize = 16, | |
157 }; | |
158 | |
159 class LeftBits : public BitField<uword, Register, kLeftPos, kLeftSize> {}; | |
160 class RightBits : public BitField<uword, Register, kRightPos, kRightSize> {}; | |
161 class RelOpBits | |
162 : public BitField<uword, RelationOperator, kRelOpPos, kRelOpSize> {}; | |
163 class ImmBits : public BitField<uword, uint16_t, kImmPos, kImmSize> {}; | |
164 | |
165 Register left() const { return LeftBits::decode(bits_); } | |
166 Register right() const { return RightBits::decode(bits_); } | |
167 RelationOperator rel_op() const { return RelOpBits::decode(bits_); } | |
168 int16_t imm() const { return static_cast<int16_t>(ImmBits::decode(bits_)); } | |
169 | |
170 static bool IsValidImm(int32_t value) { | |
171 // We want both value and value + 1 to fit in an int16_t. | |
172 return (-0x08000 <= value) && (value < 0x7fff); | |
173 } | |
174 | |
175 void set_rel_op(RelationOperator value) { | |
176 ASSERT(IsValidRelOp(value)); | |
177 bits_ = RelOpBits::update(value, bits_); | |
178 } | |
179 | |
180 // Uninitialized condition. | |
181 Condition() : ValueObject(), bits_(0) {} | |
182 | |
183 // Copy constructor. | |
184 Condition(const Condition& other) : ValueObject(), bits_(other.bits_) {} | |
185 | |
186 // Copy assignment operator. | |
187 Condition& operator=(const Condition& other) { | |
188 bits_ = other.bits_; | |
189 return *this; | |
190 } | |
191 | |
192 Condition(Register left, | |
193 Register right, | |
194 RelationOperator rel_op, | |
195 int16_t imm = 0) { | |
196 // At most one constant, ZR or immediate. | |
197 ASSERT(!(((left == ZR) || (left == IMM)) && | |
198 ((right == ZR) || (right == IMM)))); | |
199 // Non-zero immediate value is only allowed for IMM. | |
200 ASSERT((imm != 0) == ((left == IMM) || (right == IMM))); | |
201 set_left(left); | |
202 set_right(right); | |
203 set_rel_op(rel_op); | |
204 set_imm(imm); | |
205 } | |
206 | |
207 private: | |
208 static bool IsValidRelOp(RelationOperator value) { | |
209 return (AL <= value) && (value <= ULE); | |
210 } | |
211 | |
212 static bool IsValidRegister(Register value) { | |
213 return (ZR <= value) && (value <= IMM) && (value != AT); | |
214 } | |
215 | |
216 void set_left(Register value) { | |
217 ASSERT(IsValidRegister(value)); | |
218 bits_ = LeftBits::update(value, bits_); | |
219 } | |
220 | |
221 void set_right(Register value) { | |
222 ASSERT(IsValidRegister(value)); | |
223 bits_ = RightBits::update(value, bits_); | |
224 } | |
225 | |
226 void set_imm(int16_t value) { | |
227 ASSERT(IsValidImm(value)); | |
228 bits_ = ImmBits::update(static_cast<uint16_t>(value), bits_); | |
229 } | |
230 | |
231 uword bits_; | |
232 }; | |
233 | |
234 | |
235 class Assembler : public ValueObject { | |
236 public: | |
237 explicit Assembler(bool use_far_branches = false) | |
238 : buffer_(), | |
239 prologue_offset_(-1), | |
240 has_single_entry_point_(true), | |
241 use_far_branches_(use_far_branches), | |
242 delay_slot_available_(false), | |
243 in_delay_slot_(false), | |
244 comments_(), | |
245 constant_pool_allowed_(true) {} | |
246 ~Assembler() {} | |
247 | |
248 void PopRegister(Register r) { Pop(r); } | |
249 | |
250 void Bind(Label* label); | |
251 void Jump(Label* label) { b(label); } | |
252 | |
253 // Misc. functionality | |
254 intptr_t CodeSize() const { return buffer_.Size(); } | |
255 intptr_t prologue_offset() const { return prologue_offset_; } | |
256 bool has_single_entry_point() const { return has_single_entry_point_; } | |
257 | |
258 // Count the fixups that produce a pointer offset, without processing | |
259 // the fixups. | |
260 intptr_t CountPointerOffsets() const { return buffer_.CountPointerOffsets(); } | |
261 | |
262 const ZoneGrowableArray<intptr_t>& GetPointerOffsets() const { | |
263 return buffer_.pointer_offsets(); | |
264 } | |
265 | |
266 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } | |
267 | |
268 RawObjectPool* MakeObjectPool() { | |
269 return object_pool_wrapper_.MakeObjectPool(); | |
270 } | |
271 | |
272 void FinalizeInstructions(const MemoryRegion& region) { | |
273 buffer_.FinalizeInstructions(region); | |
274 } | |
275 | |
276 bool use_far_branches() const { | |
277 return FLAG_use_far_branches || use_far_branches_; | |
278 } | |
279 | |
280 void set_use_far_branches(bool b) { use_far_branches_ = b; } | |
281 | |
282 void EnterFrame(); | |
283 void LeaveFrameAndReturn(); | |
284 | |
285 // Set up a stub frame so that the stack traversal code can easily identify | |
286 // a stub frame. | |
287 void EnterStubFrame(intptr_t frame_size = 0); | |
288 void LeaveStubFrame(); | |
289 // A separate macro for when a Ret immediately follows, so that we can use | |
290 // the branch delay slot. | |
291 void LeaveStubFrameAndReturn(Register ra = RA); | |
292 | |
293 void MonomorphicCheckedEntry(); | |
294 | |
295 void UpdateAllocationStats(intptr_t cid, | |
296 Register temp_reg, | |
297 Heap::Space space); | |
298 | |
299 void UpdateAllocationStatsWithSize(intptr_t cid, | |
300 Register size_reg, | |
301 Register temp_reg, | |
302 Heap::Space space); | |
303 | |
304 | |
305 void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace); | |
306 | |
307 // Inlined allocation of an instance of class 'cls', code has no runtime | |
308 // calls. Jump to 'failure' if the instance cannot be allocated here. | |
309 // Allocated instance is returned in 'instance_reg'. | |
310 // Only the tags field of the object is initialized. | |
311 void TryAllocate(const Class& cls, | |
312 Label* failure, | |
313 Register instance_reg, | |
314 Register temp_reg); | |
315 | |
316 void TryAllocateArray(intptr_t cid, | |
317 intptr_t instance_size, | |
318 Label* failure, | |
319 Register instance, | |
320 Register end_address, | |
321 Register temp1, | |
322 Register temp2); | |
323 | |
324 // Debugging and bringup support. | |
325 void Stop(const char* message); | |
326 void Unimplemented(const char* message); | |
327 void Untested(const char* message); | |
328 void Unreachable(const char* message); | |
329 | |
330 static void InitializeMemoryWithBreakpoints(uword data, intptr_t length); | |
331 | |
332 void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3); | |
333 static bool EmittingComments(); | |
334 | |
335 const Code::Comments& GetCodeComments() const; | |
336 | |
337 static const char* RegisterName(Register reg); | |
338 | |
339 static const char* FpuRegisterName(FpuRegister reg); | |
340 | |
341 void SetPrologueOffset() { | |
342 if (prologue_offset_ == -1) { | |
343 prologue_offset_ = CodeSize(); | |
344 } | |
345 } | |
346 | |
347 // A utility to be able to assemble an instruction into the delay slot. | |
348 Assembler* delay_slot() { | |
349 ASSERT(delay_slot_available_); | |
350 ASSERT(buffer_.Load<int32_t>(buffer_.GetPosition() - sizeof(int32_t)) == | |
351 Instr::kNopInstruction); | |
352 buffer_.Remit<int32_t>(); | |
353 delay_slot_available_ = false; | |
354 in_delay_slot_ = true; | |
355 return this; | |
356 } | |
357 | |
358 // CPU instructions in alphabetical order. | |
359 void addd(DRegister dd, DRegister ds, DRegister dt) { | |
360 // DRegisters start at the even FRegisters. | |
361 FRegister fd = static_cast<FRegister>(dd * 2); | |
362 FRegister fs = static_cast<FRegister>(ds * 2); | |
363 FRegister ft = static_cast<FRegister>(dt * 2); | |
364 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_ADD); | |
365 } | |
366 | |
367 void addiu(Register rt, Register rs, const Immediate& imm) { | |
368 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
369 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
370 EmitIType(ADDIU, rs, rt, imm_value); | |
371 } | |
372 | |
373 void addu(Register rd, Register rs, Register rt) { | |
374 EmitRType(SPECIAL, rs, rt, rd, 0, ADDU); | |
375 } | |
376 | |
377 void and_(Register rd, Register rs, Register rt) { | |
378 EmitRType(SPECIAL, rs, rt, rd, 0, AND); | |
379 } | |
380 | |
381 void andi(Register rt, Register rs, const Immediate& imm) { | |
382 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
383 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
384 EmitIType(ANDI, rs, rt, imm_value); | |
385 } | |
386 | |
387 // Unconditional branch. | |
388 void b(Label* l) { beq(R0, R0, l); } | |
389 | |
390 void bal(Label* l) { | |
391 ASSERT(!in_delay_slot_); | |
392 EmitRegImmBranch(BGEZAL, R0, l); | |
393 EmitBranchDelayNop(); | |
394 } | |
395 | |
396 // Branch on floating point false. | |
397 void bc1f(Label* l) { | |
398 EmitFpuBranch(false, l); | |
399 EmitBranchDelayNop(); | |
400 } | |
401 | |
402 // Branch on floating point true. | |
403 void bc1t(Label* l) { | |
404 EmitFpuBranch(true, l); | |
405 EmitBranchDelayNop(); | |
406 } | |
407 | |
408 // Branch if equal. | |
409 void beq(Register rs, Register rt, Label* l) { | |
410 ASSERT(!in_delay_slot_); | |
411 EmitBranch(BEQ, rs, rt, l); | |
412 EmitBranchDelayNop(); | |
413 } | |
414 | |
415 // Branch if equal, likely taken. | |
416 // Delay slot executed only when branch taken. | |
417 void beql(Register rs, Register rt, Label* l) { | |
418 ASSERT(!in_delay_slot_); | |
419 EmitBranch(BEQL, rs, rt, l); | |
420 EmitBranchDelayNop(); | |
421 } | |
422 | |
423 // Branch if rs >= 0. | |
424 void bgez(Register rs, Label* l) { | |
425 ASSERT(!in_delay_slot_); | |
426 EmitRegImmBranch(BGEZ, rs, l); | |
427 EmitBranchDelayNop(); | |
428 } | |
429 | |
430 // Branch if rs >= 0, likely taken. | |
431 // Delay slot executed only when branch taken. | |
432 void bgezl(Register rs, Label* l) { | |
433 ASSERT(!in_delay_slot_); | |
434 EmitRegImmBranch(BGEZL, rs, l); | |
435 EmitBranchDelayNop(); | |
436 } | |
437 | |
438 // Branch if rs > 0. | |
439 void bgtz(Register rs, Label* l) { | |
440 ASSERT(!in_delay_slot_); | |
441 EmitBranch(BGTZ, rs, R0, l); | |
442 EmitBranchDelayNop(); | |
443 } | |
444 | |
445 // Branch if rs > 0, likely taken. | |
446 // Delay slot executed only when branch taken. | |
447 void bgtzl(Register rs, Label* l) { | |
448 ASSERT(!in_delay_slot_); | |
449 EmitBranch(BGTZL, rs, R0, l); | |
450 EmitBranchDelayNop(); | |
451 } | |
452 | |
453 // Branch if rs <= 0. | |
454 void blez(Register rs, Label* l) { | |
455 ASSERT(!in_delay_slot_); | |
456 EmitBranch(BLEZ, rs, R0, l); | |
457 EmitBranchDelayNop(); | |
458 } | |
459 | |
460 // Branch if rs <= 0, likely taken. | |
461 // Delay slot executed only when branch taken. | |
462 void blezl(Register rs, Label* l) { | |
463 ASSERT(!in_delay_slot_); | |
464 EmitBranch(BLEZL, rs, R0, l); | |
465 EmitBranchDelayNop(); | |
466 } | |
467 | |
468 // Branch if rs < 0. | |
469 void bltz(Register rs, Label* l) { | |
470 ASSERT(!in_delay_slot_); | |
471 EmitRegImmBranch(BLTZ, rs, l); | |
472 EmitBranchDelayNop(); | |
473 } | |
474 | |
475 // Branch if rs < 0, likely taken. | |
476 // Delay slot executed only when branch taken. | |
477 void bltzl(Register rs, Label* l) { | |
478 ASSERT(!in_delay_slot_); | |
479 EmitRegImmBranch(BLTZL, rs, l); | |
480 EmitBranchDelayNop(); | |
481 } | |
482 | |
483 // Branch if not equal. | |
484 void bne(Register rs, Register rt, Label* l) { | |
485 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
486 EmitBranch(BNE, rs, rt, l); | |
487 EmitBranchDelayNop(); | |
488 } | |
489 | |
490 // Branch if not equal, likely taken. | |
491 // Delay slot executed only when branch taken. | |
492 void bnel(Register rs, Register rt, Label* l) { | |
493 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
494 EmitBranch(BNEL, rs, rt, l); | |
495 EmitBranchDelayNop(); | |
496 } | |
497 | |
498 static int32_t BreakEncoding(int32_t code) { | |
499 ASSERT(Utils::IsUint(20, code)); | |
500 return SPECIAL << kOpcodeShift | code << kBreakCodeShift | | |
501 BREAK << kFunctionShift; | |
502 } | |
503 | |
504 | |
505 void break_(int32_t code) { Emit(BreakEncoding(code)); } | |
506 | |
507 static uword GetBreakInstructionFiller() { return BreakEncoding(0); } | |
508 | |
509 // FPU compare, always false. | |
510 void cfd(DRegister ds, DRegister dt) { | |
511 FRegister fs = static_cast<FRegister>(ds * 2); | |
512 FRegister ft = static_cast<FRegister>(dt * 2); | |
513 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_F); | |
514 } | |
515 | |
516 // FPU compare, true if unordered, i.e. one is NaN. | |
517 void cund(DRegister ds, DRegister dt) { | |
518 FRegister fs = static_cast<FRegister>(ds * 2); | |
519 FRegister ft = static_cast<FRegister>(dt * 2); | |
520 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UN); | |
521 } | |
522 | |
523 // FPU compare, true if equal. | |
524 void ceqd(DRegister ds, DRegister dt) { | |
525 FRegister fs = static_cast<FRegister>(ds * 2); | |
526 FRegister ft = static_cast<FRegister>(dt * 2); | |
527 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_EQ); | |
528 } | |
529 | |
530 // FPU compare, true if unordered or equal. | |
531 void cueqd(DRegister ds, DRegister dt) { | |
532 FRegister fs = static_cast<FRegister>(ds * 2); | |
533 FRegister ft = static_cast<FRegister>(dt * 2); | |
534 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UEQ); | |
535 } | |
536 | |
537 // FPU compare, true if less than. | |
538 void coltd(DRegister ds, DRegister dt) { | |
539 FRegister fs = static_cast<FRegister>(ds * 2); | |
540 FRegister ft = static_cast<FRegister>(dt * 2); | |
541 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLT); | |
542 } | |
543 | |
544 // FPU compare, true if unordered or less than. | |
545 void cultd(DRegister ds, DRegister dt) { | |
546 FRegister fs = static_cast<FRegister>(ds * 2); | |
547 FRegister ft = static_cast<FRegister>(dt * 2); | |
548 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULT); | |
549 } | |
550 | |
551 // FPU compare, true if less or equal. | |
552 void coled(DRegister ds, DRegister dt) { | |
553 FRegister fs = static_cast<FRegister>(ds * 2); | |
554 FRegister ft = static_cast<FRegister>(dt * 2); | |
555 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLE); | |
556 } | |
557 | |
558 // FPU compare, true if unordered or less or equal. | |
559 void culed(DRegister ds, DRegister dt) { | |
560 FRegister fs = static_cast<FRegister>(ds * 2); | |
561 FRegister ft = static_cast<FRegister>(dt * 2); | |
562 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULE); | |
563 } | |
564 | |
565 void clo(Register rd, Register rs) { | |
566 EmitRType(SPECIAL2, rs, rd, rd, 0, CLO); | |
567 } | |
568 | |
569 void clz(Register rd, Register rs) { | |
570 EmitRType(SPECIAL2, rs, rd, rd, 0, CLZ); | |
571 } | |
572 | |
573 // Convert a double in ds to a 32-bit signed int in fd rounding towards 0. | |
574 void truncwd(FRegister fd, DRegister ds) { | |
575 FRegister fs = static_cast<FRegister>(ds * 2); | |
576 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_TRUNC_W); | |
577 } | |
578 | |
579 // Convert a 32-bit float in fs to a 64-bit double in dd. | |
580 void cvtds(DRegister dd, FRegister fs) { | |
581 FRegister fd = static_cast<FRegister>(dd * 2); | |
582 EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_CVT_D); | |
583 } | |
584 | |
585 // Converts a 32-bit signed int in fs to a double in fd. | |
586 void cvtdw(DRegister dd, FRegister fs) { | |
587 FRegister fd = static_cast<FRegister>(dd * 2); | |
588 EmitFpuRType(COP1, FMT_W, F0, fs, fd, COP1_CVT_D); | |
589 } | |
590 | |
591 // Convert a 64-bit double in ds to a 32-bit float in fd. | |
592 void cvtsd(FRegister fd, DRegister ds) { | |
593 FRegister fs = static_cast<FRegister>(ds * 2); | |
594 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_CVT_S); | |
595 } | |
596 | |
597 void div(Register rs, Register rt) { EmitRType(SPECIAL, rs, rt, R0, 0, DIV); } | |
598 | |
599 void divd(DRegister dd, DRegister ds, DRegister dt) { | |
600 FRegister fd = static_cast<FRegister>(dd * 2); | |
601 FRegister fs = static_cast<FRegister>(ds * 2); | |
602 FRegister ft = static_cast<FRegister>(dt * 2); | |
603 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_DIV); | |
604 } | |
605 | |
606 void divu(Register rs, Register rt) { | |
607 EmitRType(SPECIAL, rs, rt, R0, 0, DIVU); | |
608 } | |
609 | |
610 void jalr(Register rs, Register rd = RA) { | |
611 ASSERT(rs != rd); | |
612 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
613 EmitRType(SPECIAL, rs, R0, rd, 0, JALR); | |
614 EmitBranchDelayNop(); | |
615 } | |
616 | |
617 void jr(Register rs) { | |
618 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
619 EmitRType(SPECIAL, rs, R0, R0, 0, JR); | |
620 EmitBranchDelayNop(); | |
621 } | |
622 | |
623 void lb(Register rt, const Address& addr) { EmitLoadStore(LB, rt, addr); } | |
624 | |
625 void lbu(Register rt, const Address& addr) { EmitLoadStore(LBU, rt, addr); } | |
626 | |
627 void ldc1(DRegister dt, const Address& addr) { | |
628 FRegister ft = static_cast<FRegister>(dt * 2); | |
629 EmitFpuLoadStore(LDC1, ft, addr); | |
630 } | |
631 | |
632 void lh(Register rt, const Address& addr) { EmitLoadStore(LH, rt, addr); } | |
633 | |
634 void lhu(Register rt, const Address& addr) { EmitLoadStore(LHU, rt, addr); } | |
635 | |
636 void ll(Register rt, const Address& addr) { EmitLoadStore(LL, rt, addr); } | |
637 | |
638 void lui(Register rt, const Immediate& imm) { | |
639 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
640 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
641 EmitIType(LUI, R0, rt, imm_value); | |
642 } | |
643 | |
644 void lw(Register rt, const Address& addr) { EmitLoadStore(LW, rt, addr); } | |
645 | |
646 void lwc1(FRegister ft, const Address& addr) { | |
647 EmitFpuLoadStore(LWC1, ft, addr); | |
648 } | |
649 | |
650 void madd(Register rs, Register rt) { | |
651 EmitRType(SPECIAL2, rs, rt, R0, 0, MADD); | |
652 } | |
653 | |
654 void maddu(Register rs, Register rt) { | |
655 EmitRType(SPECIAL2, rs, rt, R0, 0, MADDU); | |
656 } | |
657 | |
658 void mfc1(Register rt, FRegister fs) { | |
659 Emit(COP1 << kOpcodeShift | COP1_MF << kCop1SubShift | rt << kRtShift | | |
660 fs << kFsShift); | |
661 } | |
662 | |
663 void mfhi(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFHI); } | |
664 | |
665 void mflo(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFLO); } | |
666 | |
667 void mov(Register rd, Register rs) { or_(rd, rs, ZR); } | |
668 | |
669 void movd(DRegister dd, DRegister ds) { | |
670 FRegister fd = static_cast<FRegister>(dd * 2); | |
671 FRegister fs = static_cast<FRegister>(ds * 2); | |
672 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_MOV); | |
673 } | |
674 | |
675 // Move if floating point false. | |
676 void movf(Register rd, Register rs) { | |
677 EmitRType(SPECIAL, rs, R0, rd, 0, MOVCI); | |
678 } | |
679 | |
680 void movn(Register rd, Register rs, Register rt) { | |
681 EmitRType(SPECIAL, rs, rt, rd, 0, MOVN); | |
682 } | |
683 | |
684 // Move if floating point true. | |
685 void movt(Register rd, Register rs) { | |
686 EmitRType(SPECIAL, rs, R1, rd, 0, MOVCI); | |
687 } | |
688 | |
689 // rd <- (rt == 0) ? rs : rd; | |
690 void movz(Register rd, Register rs, Register rt) { | |
691 EmitRType(SPECIAL, rs, rt, rd, 0, MOVZ); | |
692 } | |
693 | |
694 void movs(FRegister fd, FRegister fs) { | |
695 EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_MOV); | |
696 } | |
697 | |
698 void mtc1(Register rt, FRegister fs) { | |
699 Emit(COP1 << kOpcodeShift | COP1_MT << kCop1SubShift | rt << kRtShift | | |
700 fs << kFsShift); | |
701 } | |
702 | |
703 void mthi(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTHI); } | |
704 | |
705 void mtlo(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTLO); } | |
706 | |
707 void muld(DRegister dd, DRegister ds, DRegister dt) { | |
708 FRegister fd = static_cast<FRegister>(dd * 2); | |
709 FRegister fs = static_cast<FRegister>(ds * 2); | |
710 FRegister ft = static_cast<FRegister>(dt * 2); | |
711 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_MUL); | |
712 } | |
713 | |
714 void mult(Register rs, Register rt) { | |
715 EmitRType(SPECIAL, rs, rt, R0, 0, MULT); | |
716 } | |
717 | |
718 void multu(Register rs, Register rt) { | |
719 EmitRType(SPECIAL, rs, rt, R0, 0, MULTU); | |
720 } | |
721 | |
722 void negd(DRegister dd, DRegister ds) { | |
723 FRegister fd = static_cast<FRegister>(dd * 2); | |
724 FRegister fs = static_cast<FRegister>(ds * 2); | |
725 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_NEG); | |
726 } | |
727 | |
728 void nop() { Emit(Instr::kNopInstruction); } | |
729 | |
730 void nor(Register rd, Register rs, Register rt) { | |
731 EmitRType(SPECIAL, rs, rt, rd, 0, NOR); | |
732 } | |
733 | |
734 void or_(Register rd, Register rs, Register rt) { | |
735 EmitRType(SPECIAL, rs, rt, rd, 0, OR); | |
736 } | |
737 | |
738 void ori(Register rt, Register rs, const Immediate& imm) { | |
739 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
740 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
741 EmitIType(ORI, rs, rt, imm_value); | |
742 } | |
743 | |
744 void sb(Register rt, const Address& addr) { EmitLoadStore(SB, rt, addr); } | |
745 | |
746 // rt = 1 on success, 0 on failure. | |
747 void sc(Register rt, const Address& addr) { EmitLoadStore(SC, rt, addr); } | |
748 | |
749 void sdc1(DRegister dt, const Address& addr) { | |
750 FRegister ft = static_cast<FRegister>(dt * 2); | |
751 EmitFpuLoadStore(SDC1, ft, addr); | |
752 } | |
753 | |
754 void sh(Register rt, const Address& addr) { EmitLoadStore(SH, rt, addr); } | |
755 | |
756 void sll(Register rd, Register rt, int sa) { | |
757 EmitRType(SPECIAL, R0, rt, rd, sa, SLL); | |
758 } | |
759 | |
760 void sllv(Register rd, Register rt, Register rs) { | |
761 EmitRType(SPECIAL, rs, rt, rd, 0, SLLV); | |
762 } | |
763 | |
764 void slt(Register rd, Register rs, Register rt) { | |
765 EmitRType(SPECIAL, rs, rt, rd, 0, SLT); | |
766 } | |
767 | |
768 void slti(Register rt, Register rs, const Immediate& imm) { | |
769 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
770 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
771 EmitIType(SLTI, rs, rt, imm_value); | |
772 } | |
773 | |
774 // Although imm argument is int32_t, it is interpreted as an uint32_t. | |
775 // For example, -1 stands for 0xffffffffUL: it is encoded as 0xffff in the | |
776 // instruction imm field and is then sign extended back to 0xffffffffUL. | |
777 void sltiu(Register rt, Register rs, const Immediate& imm) { | |
778 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
779 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
780 EmitIType(SLTIU, rs, rt, imm_value); | |
781 } | |
782 | |
783 void sltu(Register rd, Register rs, Register rt) { | |
784 EmitRType(SPECIAL, rs, rt, rd, 0, SLTU); | |
785 } | |
786 | |
787 void sqrtd(DRegister dd, DRegister ds) { | |
788 FRegister fd = static_cast<FRegister>(dd * 2); | |
789 FRegister fs = static_cast<FRegister>(ds * 2); | |
790 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_SQRT); | |
791 } | |
792 | |
793 void sra(Register rd, Register rt, int sa) { | |
794 EmitRType(SPECIAL, R0, rt, rd, sa, SRA); | |
795 } | |
796 | |
797 void srav(Register rd, Register rt, Register rs) { | |
798 EmitRType(SPECIAL, rs, rt, rd, 0, SRAV); | |
799 } | |
800 | |
801 void srl(Register rd, Register rt, int sa) { | |
802 EmitRType(SPECIAL, R0, rt, rd, sa, SRL); | |
803 } | |
804 | |
805 void srlv(Register rd, Register rt, Register rs) { | |
806 EmitRType(SPECIAL, rs, rt, rd, 0, SRLV); | |
807 } | |
808 | |
809 void subd(DRegister dd, DRegister ds, DRegister dt) { | |
810 FRegister fd = static_cast<FRegister>(dd * 2); | |
811 FRegister fs = static_cast<FRegister>(ds * 2); | |
812 FRegister ft = static_cast<FRegister>(dt * 2); | |
813 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_SUB); | |
814 } | |
815 | |
816 void subu(Register rd, Register rs, Register rt) { | |
817 EmitRType(SPECIAL, rs, rt, rd, 0, SUBU); | |
818 } | |
819 | |
820 void sw(Register rt, const Address& addr) { EmitLoadStore(SW, rt, addr); } | |
821 | |
822 void swc1(FRegister ft, const Address& addr) { | |
823 EmitFpuLoadStore(SWC1, ft, addr); | |
824 } | |
825 | |
826 void xori(Register rt, Register rs, const Immediate& imm) { | |
827 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
828 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
829 EmitIType(XORI, rs, rt, imm_value); | |
830 } | |
831 | |
832 void xor_(Register rd, Register rs, Register rt) { | |
833 EmitRType(SPECIAL, rs, rt, rd, 0, XOR); | |
834 } | |
835 | |
836 // Macros in alphabetical order. | |
837 | |
838 // Addition of rs and rt with the result placed in rd. | |
839 // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. | |
840 // rd and ro must not be TMP. | |
841 // ro must be different from all the other registers. | |
842 // If rd, rs, and rt are the same register, then a scratch register different | |
843 // from the other registers is needed. | |
844 void AdduDetectOverflow(Register rd, | |
845 Register rs, | |
846 Register rt, | |
847 Register ro, | |
848 Register scratch = kNoRegister); | |
849 | |
850 // ro must be different from rd and rs. | |
851 // rd and ro must not be TMP. | |
852 // If rd and rs are the same, a scratch register different from the other | |
853 // registers is needed. | |
854 void AddImmediateDetectOverflow(Register rd, | |
855 Register rs, | |
856 int32_t imm, | |
857 Register ro, | |
858 Register scratch = kNoRegister) { | |
859 ASSERT(!in_delay_slot_); | |
860 LoadImmediate(rd, imm); | |
861 AdduDetectOverflow(rd, rs, rd, ro, scratch); | |
862 } | |
863 | |
864 // Subtraction of rt from rs (rs - rt) with the result placed in rd. | |
865 // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. | |
866 // None of rd, rs, rt, or ro may be TMP. | |
867 // ro must be different from the other registers. | |
868 void SubuDetectOverflow(Register rd, Register rs, Register rt, Register ro); | |
869 | |
870 // ro must be different from rd and rs. | |
871 // None of rd, rs, rt, or ro may be TMP. | |
872 void SubImmediateDetectOverflow(Register rd, | |
873 Register rs, | |
874 int32_t imm, | |
875 Register ro) { | |
876 ASSERT(!in_delay_slot_); | |
877 LoadImmediate(rd, imm); | |
878 SubuDetectOverflow(rd, rs, rd, ro); | |
879 } | |
880 | |
881 void Branch(const StubEntry& stub_entry, Register pp = PP); | |
882 | |
883 void BranchLink(const StubEntry& stub_entry, | |
884 Patchability patchable = kNotPatchable); | |
885 | |
886 void BranchLinkPatchable(const StubEntry& stub_entry); | |
887 void BranchLinkToRuntime(); | |
888 | |
889 // Emit a call that shares its object pool entries with other calls | |
890 // that have the same equivalence marker. | |
891 void BranchLinkWithEquivalence(const StubEntry& stub_entry, | |
892 const Object& equivalence); | |
893 | |
894 void Drop(intptr_t stack_elements) { | |
895 ASSERT(stack_elements >= 0); | |
896 if (stack_elements > 0) { | |
897 addiu(SP, SP, Immediate(stack_elements * kWordSize)); | |
898 } | |
899 } | |
900 | |
901 void LoadPoolPointer(Register reg = PP) { | |
902 ASSERT(!in_delay_slot_); | |
903 CheckCodePointer(); | |
904 lw(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); | |
905 set_constant_pool_allowed(reg == PP); | |
906 } | |
907 | |
908 void CheckCodePointer(); | |
909 | |
910 void RestoreCodePointer(); | |
911 | |
912 void LoadImmediate(Register rd, int32_t value) { | |
913 ASSERT(!in_delay_slot_); | |
914 if (Utils::IsInt(kImmBits, value)) { | |
915 addiu(rd, ZR, Immediate(value)); | |
916 } else { | |
917 const uint16_t low = Utils::Low16Bits(value); | |
918 const uint16_t high = Utils::High16Bits(value); | |
919 lui(rd, Immediate(high)); | |
920 if (low != 0) { | |
921 ori(rd, rd, Immediate(low)); | |
922 } | |
923 } | |
924 } | |
925 | |
926 void LoadImmediate(DRegister rd, double value) { | |
927 ASSERT(!in_delay_slot_); | |
928 FRegister frd = static_cast<FRegister>(rd * 2); | |
929 const int64_t ival = bit_cast<uint64_t, double>(value); | |
930 const int32_t low = Utils::Low32Bits(ival); | |
931 const int32_t high = Utils::High32Bits(ival); | |
932 if (low != 0) { | |
933 LoadImmediate(TMP, low); | |
934 mtc1(TMP, frd); | |
935 } else { | |
936 mtc1(ZR, frd); | |
937 } | |
938 | |
939 if (high != 0) { | |
940 LoadImmediate(TMP, high); | |
941 mtc1(TMP, static_cast<FRegister>(frd + 1)); | |
942 } else { | |
943 mtc1(ZR, static_cast<FRegister>(frd + 1)); | |
944 } | |
945 } | |
946 | |
947 void LoadImmediate(FRegister rd, float value) { | |
948 ASSERT(!in_delay_slot_); | |
949 const int32_t ival = bit_cast<int32_t, float>(value); | |
950 if (ival == 0) { | |
951 mtc1(ZR, rd); | |
952 } else { | |
953 LoadImmediate(TMP, ival); | |
954 mtc1(TMP, rd); | |
955 } | |
956 } | |
957 | |
958 void AddImmediate(Register rd, Register rs, int32_t value) { | |
959 ASSERT(!in_delay_slot_); | |
960 if ((value == 0) && (rd == rs)) return; | |
961 // If value is 0, we still want to move rs to rd if they aren't the same. | |
962 if (Utils::IsInt(kImmBits, value)) { | |
963 addiu(rd, rs, Immediate(value)); | |
964 } else { | |
965 LoadImmediate(TMP, value); | |
966 addu(rd, rs, TMP); | |
967 } | |
968 } | |
969 | |
970 void AddImmediate(Register rd, int32_t value) { | |
971 ASSERT(!in_delay_slot_); | |
972 AddImmediate(rd, rd, value); | |
973 } | |
974 | |
975 void AndImmediate(Register rd, Register rs, int32_t imm) { | |
976 ASSERT(!in_delay_slot_); | |
977 if (imm == 0) { | |
978 mov(rd, ZR); | |
979 return; | |
980 } | |
981 | |
982 if (Utils::IsUint(kImmBits, imm)) { | |
983 andi(rd, rs, Immediate(imm)); | |
984 } else { | |
985 LoadImmediate(TMP, imm); | |
986 and_(rd, rs, TMP); | |
987 } | |
988 } | |
989 | |
990 void OrImmediate(Register rd, Register rs, int32_t imm) { | |
991 ASSERT(!in_delay_slot_); | |
992 if (imm == 0) { | |
993 mov(rd, rs); | |
994 return; | |
995 } | |
996 | |
997 if (Utils::IsUint(kImmBits, imm)) { | |
998 ori(rd, rs, Immediate(imm)); | |
999 } else { | |
1000 LoadImmediate(TMP, imm); | |
1001 or_(rd, rs, TMP); | |
1002 } | |
1003 } | |
1004 | |
1005 void XorImmediate(Register rd, Register rs, int32_t imm) { | |
1006 ASSERT(!in_delay_slot_); | |
1007 if (imm == 0) { | |
1008 mov(rd, rs); | |
1009 return; | |
1010 } | |
1011 | |
1012 if (Utils::IsUint(kImmBits, imm)) { | |
1013 xori(rd, rs, Immediate(imm)); | |
1014 } else { | |
1015 LoadImmediate(TMP, imm); | |
1016 xor_(rd, rs, TMP); | |
1017 } | |
1018 } | |
1019 | |
1020 Register LoadConditionOperand(Register rd, | |
1021 const Object& operand, | |
1022 int16_t* imm) { | |
1023 if (operand.IsSmi()) { | |
1024 const int32_t val = reinterpret_cast<int32_t>(operand.raw()); | |
1025 if (val == 0) { | |
1026 return ZR; | |
1027 } else if (Condition::IsValidImm(val)) { | |
1028 ASSERT(*imm == 0); | |
1029 *imm = val; | |
1030 return IMM; | |
1031 } | |
1032 } | |
1033 LoadObject(rd, operand); | |
1034 return rd; | |
1035 } | |
1036 | |
1037 // Branch to label if condition is true. | |
1038 void BranchOnCondition(Condition cond, Label* l) { | |
1039 ASSERT(!in_delay_slot_); | |
1040 Register left = cond.left(); | |
1041 Register right = cond.right(); | |
1042 RelationOperator rel_op = cond.rel_op(); | |
1043 switch (rel_op) { | |
1044 case NV: | |
1045 return; | |
1046 case AL: | |
1047 b(l); | |
1048 return; | |
1049 case EQ: // fall through. | |
1050 case NE: { | |
1051 if (left == IMM) { | |
1052 addiu(AT, ZR, Immediate(cond.imm())); | |
1053 left = AT; | |
1054 } else if (right == IMM) { | |
1055 addiu(AT, ZR, Immediate(cond.imm())); | |
1056 right = AT; | |
1057 } | |
1058 if (rel_op == EQ) { | |
1059 beq(left, right, l); | |
1060 } else { | |
1061 bne(left, right, l); | |
1062 } | |
1063 break; | |
1064 } | |
1065 case GT: { | |
1066 if (left == ZR) { | |
1067 bltz(right, l); | |
1068 } else if (right == ZR) { | |
1069 bgtz(left, l); | |
1070 } else if (left == IMM) { | |
1071 slti(AT, right, Immediate(cond.imm())); | |
1072 bne(AT, ZR, l); | |
1073 } else if (right == IMM) { | |
1074 slti(AT, left, Immediate(cond.imm() + 1)); | |
1075 beq(AT, ZR, l); | |
1076 } else { | |
1077 slt(AT, right, left); | |
1078 bne(AT, ZR, l); | |
1079 } | |
1080 break; | |
1081 } | |
1082 case GE: { | |
1083 if (left == ZR) { | |
1084 blez(right, l); | |
1085 } else if (right == ZR) { | |
1086 bgez(left, l); | |
1087 } else if (left == IMM) { | |
1088 slti(AT, right, Immediate(cond.imm() + 1)); | |
1089 bne(AT, ZR, l); | |
1090 } else if (right == IMM) { | |
1091 slti(AT, left, Immediate(cond.imm())); | |
1092 beq(AT, ZR, l); | |
1093 } else { | |
1094 slt(AT, left, right); | |
1095 beq(AT, ZR, l); | |
1096 } | |
1097 break; | |
1098 } | |
1099 case LT: { | |
1100 if (left == ZR) { | |
1101 bgtz(right, l); | |
1102 } else if (right == ZR) { | |
1103 bltz(left, l); | |
1104 } else if (left == IMM) { | |
1105 slti(AT, right, Immediate(cond.imm() + 1)); | |
1106 beq(AT, ZR, l); | |
1107 } else if (right == IMM) { | |
1108 slti(AT, left, Immediate(cond.imm())); | |
1109 bne(AT, ZR, l); | |
1110 } else { | |
1111 slt(AT, left, right); | |
1112 bne(AT, ZR, l); | |
1113 } | |
1114 break; | |
1115 } | |
1116 case LE: { | |
1117 if (left == ZR) { | |
1118 bgez(right, l); | |
1119 } else if (right == ZR) { | |
1120 blez(left, l); | |
1121 } else if (left == IMM) { | |
1122 slti(AT, right, Immediate(cond.imm())); | |
1123 beq(AT, ZR, l); | |
1124 } else if (right == IMM) { | |
1125 slti(AT, left, Immediate(cond.imm() + 1)); | |
1126 bne(AT, ZR, l); | |
1127 } else { | |
1128 slt(AT, right, left); | |
1129 beq(AT, ZR, l); | |
1130 } | |
1131 break; | |
1132 } | |
1133 case UGT: { | |
1134 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1135 if (left == ZR) { | |
1136 // NV: Never branch. Fall through. | |
1137 } else if (right == ZR) { | |
1138 bne(left, ZR, l); | |
1139 } else { | |
1140 sltu(AT, right, left); | |
1141 bne(AT, ZR, l); | |
1142 } | |
1143 break; | |
1144 } | |
1145 case UGE: { | |
1146 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1147 if (left == ZR) { | |
1148 beq(right, ZR, l); | |
1149 } else if (right == ZR) { | |
1150 // AL: Always branch to l. | |
1151 beq(ZR, ZR, l); | |
1152 } else { | |
1153 sltu(AT, left, right); | |
1154 beq(AT, ZR, l); | |
1155 } | |
1156 break; | |
1157 } | |
1158 case ULT: { | |
1159 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1160 if (left == ZR) { | |
1161 bne(right, ZR, l); | |
1162 } else if (right == ZR) { | |
1163 // NV: Never branch. Fall through. | |
1164 } else { | |
1165 sltu(AT, left, right); | |
1166 bne(AT, ZR, l); | |
1167 } | |
1168 break; | |
1169 } | |
1170 case ULE: { | |
1171 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1172 if (left == ZR) { | |
1173 // AL: Always branch to l. | |
1174 beq(ZR, ZR, l); | |
1175 } else if (right == ZR) { | |
1176 beq(left, ZR, l); | |
1177 } else { | |
1178 sltu(AT, right, left); | |
1179 beq(AT, ZR, l); | |
1180 } | |
1181 break; | |
1182 } | |
1183 default: | |
1184 UNREACHABLE(); | |
1185 } | |
1186 } | |
1187 | |
1188 void BranchEqual(Register rd, Register rn, Label* l) { beq(rd, rn, l); } | |
1189 | |
1190 void BranchEqual(Register rd, const Immediate& imm, Label* l) { | |
1191 ASSERT(!in_delay_slot_); | |
1192 if (imm.value() == 0) { | |
1193 beq(rd, ZR, l); | |
1194 } else { | |
1195 ASSERT(rd != CMPRES2); | |
1196 LoadImmediate(CMPRES2, imm.value()); | |
1197 beq(rd, CMPRES2, l); | |
1198 } | |
1199 } | |
1200 | |
1201 void BranchEqual(Register rd, const Object& object, Label* l) { | |
1202 ASSERT(!in_delay_slot_); | |
1203 ASSERT(rd != CMPRES2); | |
1204 LoadObject(CMPRES2, object); | |
1205 beq(rd, CMPRES2, l); | |
1206 } | |
1207 | |
1208 void BranchNotEqual(Register rd, Register rn, Label* l) { bne(rd, rn, l); } | |
1209 | |
1210 void BranchNotEqual(Register rd, const Immediate& imm, Label* l) { | |
1211 ASSERT(!in_delay_slot_); | |
1212 if (imm.value() == 0) { | |
1213 bne(rd, ZR, l); | |
1214 } else { | |
1215 ASSERT(rd != CMPRES2); | |
1216 LoadImmediate(CMPRES2, imm.value()); | |
1217 bne(rd, CMPRES2, l); | |
1218 } | |
1219 } | |
1220 | |
1221 void BranchNotEqual(Register rd, const Object& object, Label* l) { | |
1222 ASSERT(!in_delay_slot_); | |
1223 ASSERT(rd != CMPRES2); | |
1224 LoadObject(CMPRES2, object); | |
1225 bne(rd, CMPRES2, l); | |
1226 } | |
1227 | |
1228 void BranchSignedGreater(Register rd, Register rs, Label* l) { | |
1229 ASSERT(!in_delay_slot_); | |
1230 slt(CMPRES2, rs, rd); // CMPRES2 = rd > rs ? 1 : 0. | |
1231 bne(CMPRES2, ZR, l); | |
1232 } | |
1233 | |
1234 void BranchSignedGreater(Register rd, const Immediate& imm, Label* l) { | |
1235 ASSERT(!in_delay_slot_); | |
1236 if (imm.value() == 0) { | |
1237 bgtz(rd, l); | |
1238 } else { | |
1239 if (Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1240 slti(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1241 beq(CMPRES2, ZR, l); | |
1242 } else { | |
1243 ASSERT(rd != CMPRES2); | |
1244 LoadImmediate(CMPRES2, imm.value()); | |
1245 BranchSignedGreater(rd, CMPRES2, l); | |
1246 } | |
1247 } | |
1248 } | |
1249 | |
1250 void BranchUnsignedGreater(Register rd, Register rs, Label* l) { | |
1251 ASSERT(!in_delay_slot_); | |
1252 sltu(CMPRES2, rs, rd); | |
1253 bne(CMPRES2, ZR, l); | |
1254 } | |
1255 | |
1256 void BranchUnsignedGreater(Register rd, const Immediate& imm, Label* l) { | |
1257 ASSERT(!in_delay_slot_); | |
1258 if (imm.value() == 0) { | |
1259 BranchNotEqual(rd, Immediate(0), l); | |
1260 } else { | |
1261 if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1262 sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1263 beq(CMPRES2, ZR, l); | |
1264 } else { | |
1265 ASSERT(rd != CMPRES2); | |
1266 LoadImmediate(CMPRES2, imm.value()); | |
1267 BranchUnsignedGreater(rd, CMPRES2, l); | |
1268 } | |
1269 } | |
1270 } | |
1271 | |
1272 void BranchSignedGreaterEqual(Register rd, Register rs, Label* l) { | |
1273 ASSERT(!in_delay_slot_); | |
1274 slt(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. | |
1275 beq(CMPRES2, ZR, l); // If CMPRES2 = 0, then rd >= rs. | |
1276 } | |
1277 | |
1278 void BranchSignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { | |
1279 ASSERT(!in_delay_slot_); | |
1280 if (imm.value() == 0) { | |
1281 bgez(rd, l); | |
1282 } else { | |
1283 if (Utils::IsInt(kImmBits, imm.value())) { | |
1284 slti(CMPRES2, rd, imm); | |
1285 beq(CMPRES2, ZR, l); | |
1286 } else { | |
1287 ASSERT(rd != CMPRES2); | |
1288 LoadImmediate(CMPRES2, imm.value()); | |
1289 BranchSignedGreaterEqual(rd, CMPRES2, l); | |
1290 } | |
1291 } | |
1292 } | |
1293 | |
1294 void BranchUnsignedGreaterEqual(Register rd, Register rs, Label* l) { | |
1295 ASSERT(!in_delay_slot_); | |
1296 sltu(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. | |
1297 beq(CMPRES2, ZR, l); | |
1298 } | |
1299 | |
1300 void BranchUnsignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { | |
1301 ASSERT(!in_delay_slot_); | |
1302 if (imm.value() == 0) { | |
1303 b(l); | |
1304 } else { | |
1305 if (Utils::IsInt(kImmBits, imm.value())) { | |
1306 sltiu(CMPRES2, rd, imm); | |
1307 beq(CMPRES2, ZR, l); | |
1308 } else { | |
1309 ASSERT(rd != CMPRES2); | |
1310 LoadImmediate(CMPRES2, imm.value()); | |
1311 BranchUnsignedGreaterEqual(rd, CMPRES2, l); | |
1312 } | |
1313 } | |
1314 } | |
1315 | |
1316 void BranchSignedLess(Register rd, Register rs, Label* l) { | |
1317 ASSERT(!in_delay_slot_); | |
1318 BranchSignedGreater(rs, rd, l); | |
1319 } | |
1320 | |
1321 void BranchSignedLess(Register rd, const Immediate& imm, Label* l) { | |
1322 ASSERT(!in_delay_slot_); | |
1323 if (imm.value() == 0) { | |
1324 bltz(rd, l); | |
1325 } else { | |
1326 if (Utils::IsInt(kImmBits, imm.value())) { | |
1327 slti(CMPRES2, rd, imm); | |
1328 bne(CMPRES2, ZR, l); | |
1329 } else { | |
1330 ASSERT(rd != CMPRES2); | |
1331 LoadImmediate(CMPRES2, imm.value()); | |
1332 BranchSignedGreater(CMPRES2, rd, l); | |
1333 } | |
1334 } | |
1335 } | |
1336 | |
1337 void BranchUnsignedLess(Register rd, Register rs, Label* l) { | |
1338 ASSERT(!in_delay_slot_); | |
1339 BranchUnsignedGreater(rs, rd, l); | |
1340 } | |
1341 | |
1342 void BranchUnsignedLess(Register rd, const Immediate& imm, Label* l) { | |
1343 ASSERT(!in_delay_slot_); | |
1344 if (imm.value() == 0) { | |
1345 // Never branch. Fall through. | |
1346 } else { | |
1347 if (Utils::IsInt(kImmBits, imm.value())) { | |
1348 sltiu(CMPRES2, rd, imm); | |
1349 bne(CMPRES2, ZR, l); | |
1350 } else { | |
1351 ASSERT(rd != CMPRES2); | |
1352 LoadImmediate(CMPRES2, imm.value()); | |
1353 BranchUnsignedGreater(CMPRES2, rd, l); | |
1354 } | |
1355 } | |
1356 } | |
1357 | |
1358 void BranchSignedLessEqual(Register rd, Register rs, Label* l) { | |
1359 ASSERT(!in_delay_slot_); | |
1360 BranchSignedGreaterEqual(rs, rd, l); | |
1361 } | |
1362 | |
1363 void BranchSignedLessEqual(Register rd, const Immediate& imm, Label* l) { | |
1364 ASSERT(!in_delay_slot_); | |
1365 if (imm.value() == 0) { | |
1366 blez(rd, l); | |
1367 } else { | |
1368 if (Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1369 slti(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1370 bne(CMPRES2, ZR, l); | |
1371 } else { | |
1372 ASSERT(rd != CMPRES2); | |
1373 LoadImmediate(CMPRES2, imm.value()); | |
1374 BranchSignedGreaterEqual(CMPRES2, rd, l); | |
1375 } | |
1376 } | |
1377 } | |
1378 | |
1379 void BranchUnsignedLessEqual(Register rd, Register rs, Label* l) { | |
1380 ASSERT(!in_delay_slot_); | |
1381 BranchUnsignedGreaterEqual(rs, rd, l); | |
1382 } | |
1383 | |
1384 void BranchUnsignedLessEqual(Register rd, const Immediate& imm, Label* l) { | |
1385 ASSERT(!in_delay_slot_); | |
1386 if (imm.value() == 0) { | |
1387 beq(rd, ZR, l); | |
1388 } else { | |
1389 if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1390 sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1391 bne(CMPRES2, ZR, l); | |
1392 } else { | |
1393 ASSERT(rd != CMPRES2); | |
1394 LoadImmediate(CMPRES2, imm.value()); | |
1395 BranchUnsignedGreaterEqual(CMPRES2, rd, l); | |
1396 } | |
1397 } | |
1398 } | |
1399 | |
1400 void Push(Register rt) { | |
1401 ASSERT(!in_delay_slot_); | |
1402 addiu(SP, SP, Immediate(-kWordSize)); | |
1403 sw(rt, Address(SP)); | |
1404 } | |
1405 | |
1406 void Pop(Register rt) { | |
1407 ASSERT(!in_delay_slot_); | |
1408 lw(rt, Address(SP)); | |
1409 addiu(SP, SP, Immediate(kWordSize)); | |
1410 } | |
1411 | |
1412 void Ret() { jr(RA); } | |
1413 | |
1414 void SmiTag(Register reg) { sll(reg, reg, kSmiTagSize); } | |
1415 | |
1416 void SmiTag(Register dst, Register src) { sll(dst, src, kSmiTagSize); } | |
1417 | |
1418 void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); } | |
1419 | |
1420 void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); } | |
1421 | |
1422 void BranchIfNotSmi(Register reg, Label* label) { | |
1423 andi(CMPRES1, reg, Immediate(kSmiTagMask)); | |
1424 bne(CMPRES1, ZR, label); | |
1425 } | |
1426 | |
1427 void BranchIfSmi(Register reg, Label* label) { | |
1428 andi(CMPRES1, reg, Immediate(kSmiTagMask)); | |
1429 beq(CMPRES1, ZR, label); | |
1430 } | |
1431 | |
1432 void LoadFromOffset(Register reg, Register base, int32_t offset) { | |
1433 ASSERT(!in_delay_slot_); | |
1434 if (Utils::IsInt(kImmBits, offset)) { | |
1435 lw(reg, Address(base, offset)); | |
1436 } else { | |
1437 LoadImmediate(TMP, offset); | |
1438 addu(TMP, base, TMP); | |
1439 lw(reg, Address(TMP, 0)); | |
1440 } | |
1441 } | |
1442 | |
1443 void LoadFieldFromOffset(Register reg, Register base, int32_t offset) { | |
1444 LoadFromOffset(reg, base, offset - kHeapObjectTag); | |
1445 } | |
1446 | |
1447 void StoreToOffset(Register reg, Register base, int32_t offset) { | |
1448 ASSERT(!in_delay_slot_); | |
1449 if (Utils::IsInt(kImmBits, offset)) { | |
1450 sw(reg, Address(base, offset)); | |
1451 } else { | |
1452 LoadImmediate(TMP, offset); | |
1453 addu(TMP, base, TMP); | |
1454 sw(reg, Address(TMP, 0)); | |
1455 } | |
1456 } | |
1457 | |
1458 void StoreFieldToOffset(Register reg, Register base, int32_t offset) { | |
1459 StoreToOffset(reg, base, offset - kHeapObjectTag); | |
1460 } | |
1461 | |
1462 | |
1463 void StoreDToOffset(DRegister reg, Register base, int32_t offset) { | |
1464 ASSERT(!in_delay_slot_); | |
1465 FRegister lo = static_cast<FRegister>(reg * 2); | |
1466 FRegister hi = static_cast<FRegister>(reg * 2 + 1); | |
1467 swc1(lo, Address(base, offset)); | |
1468 swc1(hi, Address(base, offset + kWordSize)); | |
1469 } | |
1470 | |
1471 void LoadDFromOffset(DRegister reg, Register base, int32_t offset) { | |
1472 ASSERT(!in_delay_slot_); | |
1473 FRegister lo = static_cast<FRegister>(reg * 2); | |
1474 FRegister hi = static_cast<FRegister>(reg * 2 + 1); | |
1475 lwc1(lo, Address(base, offset)); | |
1476 lwc1(hi, Address(base, offset + kWordSize)); | |
1477 } | |
1478 | |
1479 // dest gets the address of the following instruction. If temp is given, | |
1480 // RA is preserved using it as a temporary. | |
1481 void GetNextPC(Register dest, Register temp = kNoRegister); | |
1482 | |
1483 void ReserveAlignedFrameSpace(intptr_t frame_space); | |
1484 | |
1485 // Create a frame for calling into runtime that preserves all volatile | |
1486 // registers. Frame's SP is guaranteed to be correctly aligned and | |
1487 // frame_space bytes are reserved under it. | |
1488 void EnterCallRuntimeFrame(intptr_t frame_space); | |
1489 void LeaveCallRuntimeFrame(); | |
1490 | |
1491 void LoadObject(Register rd, const Object& object); | |
1492 void LoadUniqueObject(Register rd, const Object& object); | |
1493 void LoadFunctionFromCalleePool(Register dst, | |
1494 const Function& function, | |
1495 Register new_pp); | |
1496 void LoadNativeEntry(Register rd, | |
1497 const ExternalLabel* label, | |
1498 Patchability patchable); | |
1499 void PushObject(const Object& object); | |
1500 | |
1501 void LoadIsolate(Register result); | |
1502 | |
1503 void LoadClassId(Register result, Register object); | |
1504 void LoadClassById(Register result, Register class_id); | |
1505 void LoadClass(Register result, Register object); | |
1506 void LoadClassIdMayBeSmi(Register result, Register object); | |
1507 void LoadTaggedClassIdMayBeSmi(Register result, Register object); | |
1508 | |
1509 void StoreIntoObject(Register object, // Object we are storing into. | |
1510 const Address& dest, // Where we are storing into. | |
1511 Register value, // Value we are storing. | |
1512 bool can_value_be_smi = true); | |
1513 void StoreIntoObjectOffset(Register object, | |
1514 int32_t offset, | |
1515 Register value, | |
1516 bool can_value_be_smi = true); | |
1517 | |
1518 void StoreIntoObjectNoBarrier(Register object, | |
1519 const Address& dest, | |
1520 Register value); | |
1521 void StoreIntoObjectNoBarrierOffset(Register object, | |
1522 int32_t offset, | |
1523 Register value); | |
1524 void StoreIntoObjectNoBarrier(Register object, | |
1525 const Address& dest, | |
1526 const Object& value); | |
1527 void StoreIntoObjectNoBarrierOffset(Register object, | |
1528 int32_t offset, | |
1529 const Object& value); | |
1530 | |
1531 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); | |
1532 | |
1533 // Set up a Dart frame on entry with a frame pointer and PC information to | |
1534 // enable easy access to the RawInstruction object of code corresponding | |
1535 // to this frame. | |
1536 void EnterDartFrame(intptr_t frame_size); | |
1537 void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP); | |
1538 void LeaveDartFrameAndReturn(Register ra = RA); | |
1539 | |
1540 // Set up a Dart frame for a function compiled for on-stack replacement. | |
1541 // The frame layout is a normal Dart frame, but the frame is partially set | |
1542 // up on entry (it is the frame of the unoptimized code). | |
1543 void EnterOsrFrame(intptr_t extra_size); | |
1544 | |
1545 Address ElementAddressForIntIndex(bool is_external, | |
1546 intptr_t cid, | |
1547 intptr_t index_scale, | |
1548 Register array, | |
1549 intptr_t index) const; | |
1550 void LoadElementAddressForIntIndex(Register address, | |
1551 bool is_external, | |
1552 intptr_t cid, | |
1553 intptr_t index_scale, | |
1554 Register array, | |
1555 intptr_t index); | |
1556 Address ElementAddressForRegIndex(bool is_load, | |
1557 bool is_external, | |
1558 intptr_t cid, | |
1559 intptr_t index_scale, | |
1560 Register array, | |
1561 Register index); | |
1562 void LoadElementAddressForRegIndex(Register address, | |
1563 bool is_load, | |
1564 bool is_external, | |
1565 intptr_t cid, | |
1566 intptr_t index_scale, | |
1567 Register array, | |
1568 Register index); | |
1569 | |
1570 void LoadHalfWordUnaligned(Register dst, Register addr, Register tmp); | |
1571 void LoadHalfWordUnsignedUnaligned(Register dst, Register addr, Register tmp); | |
1572 void StoreHalfWordUnaligned(Register src, Register addr, Register tmp); | |
1573 void LoadWordUnaligned(Register dst, Register addr, Register tmp); | |
1574 void StoreWordUnaligned(Register src, Register addr, Register tmp); | |
1575 | |
1576 static Address VMTagAddress() { | |
1577 return Address(THR, Thread::vm_tag_offset()); | |
1578 } | |
1579 | |
1580 // On some other platforms, we draw a distinction between safe and unsafe | |
1581 // smis. | |
1582 static bool IsSafe(const Object& object) { return true; } | |
1583 static bool IsSafeSmi(const Object& object) { return object.IsSmi(); } | |
1584 | |
1585 bool constant_pool_allowed() const { return constant_pool_allowed_; } | |
1586 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; } | |
1587 | |
1588 private: | |
1589 AssemblerBuffer buffer_; | |
1590 ObjectPoolWrapper object_pool_wrapper_; | |
1591 | |
1592 intptr_t prologue_offset_; | |
1593 bool has_single_entry_point_; | |
1594 bool use_far_branches_; | |
1595 bool delay_slot_available_; | |
1596 bool in_delay_slot_; | |
1597 | |
1598 class CodeComment : public ZoneAllocated { | |
1599 public: | |
1600 CodeComment(intptr_t pc_offset, const String& comment) | |
1601 : pc_offset_(pc_offset), comment_(comment) {} | |
1602 | |
1603 intptr_t pc_offset() const { return pc_offset_; } | |
1604 const String& comment() const { return comment_; } | |
1605 | |
1606 private: | |
1607 intptr_t pc_offset_; | |
1608 const String& comment_; | |
1609 | |
1610 DISALLOW_COPY_AND_ASSIGN(CodeComment); | |
1611 }; | |
1612 | |
1613 GrowableArray<CodeComment*> comments_; | |
1614 | |
1615 bool constant_pool_allowed_; | |
1616 | |
1617 void BranchLink(const ExternalLabel* label); | |
1618 void BranchLink(const Code& code, Patchability patchable); | |
1619 | |
1620 bool CanLoadFromObjectPool(const Object& object) const; | |
1621 | |
1622 void LoadWordFromPoolOffset(Register rd, int32_t offset, Register pp = PP); | |
1623 void LoadObjectHelper(Register rd, const Object& object, bool is_unique); | |
1624 | |
1625 void Emit(int32_t value) { | |
1626 // Emitting an instruction clears the delay slot state. | |
1627 in_delay_slot_ = false; | |
1628 delay_slot_available_ = false; | |
1629 AssemblerBuffer::EnsureCapacity ensured(&buffer_); | |
1630 buffer_.Emit<int32_t>(value); | |
1631 } | |
1632 | |
1633 // Encode CPU instructions according to the types specified in | |
1634 // Figures 4-1, 4-2 and 4-3 in VolI-A. | |
1635 void EmitIType(Opcode opcode, Register rs, Register rt, uint16_t imm) { | |
1636 Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | imm); | |
1637 } | |
1638 | |
1639 void EmitLoadStore(Opcode opcode, Register rt, const Address& addr) { | |
1640 Emit(opcode << kOpcodeShift | rt << kRtShift | addr.encoding()); | |
1641 } | |
1642 | |
1643 void EmitFpuLoadStore(Opcode opcode, FRegister ft, const Address& addr) { | |
1644 Emit(opcode << kOpcodeShift | ft << kFtShift | addr.encoding()); | |
1645 } | |
1646 | |
1647 void EmitRegImmType(Opcode opcode, Register rs, RtRegImm code, uint16_t imm) { | |
1648 Emit(opcode << kOpcodeShift | rs << kRsShift | code << kRtShift | imm); | |
1649 } | |
1650 | |
1651 void EmitJType(Opcode opcode, uint32_t destination) { UNIMPLEMENTED(); } | |
1652 | |
1653 void EmitRType(Opcode opcode, | |
1654 Register rs, | |
1655 Register rt, | |
1656 Register rd, | |
1657 int sa, | |
1658 SpecialFunction func) { | |
1659 ASSERT(Utils::IsUint(5, sa)); | |
1660 Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | | |
1661 rd << kRdShift | sa << kSaShift | func << kFunctionShift); | |
1662 } | |
1663 | |
1664 void EmitFpuRType(Opcode opcode, | |
1665 Format fmt, | |
1666 FRegister ft, | |
1667 FRegister fs, | |
1668 FRegister fd, | |
1669 Cop1Function func) { | |
1670 Emit(opcode << kOpcodeShift | fmt << kFmtShift | ft << kFtShift | | |
1671 fs << kFsShift | fd << kFdShift | func << kCop1FnShift); | |
1672 } | |
1673 | |
1674 int32_t EncodeBranchOffset(int32_t offset, int32_t instr); | |
1675 | |
1676 void EmitFarJump(int32_t offset, bool link); | |
1677 void EmitFarBranch(Opcode b, Register rs, Register rt, int32_t offset); | |
1678 void EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset); | |
1679 void EmitFarFpuBranch(bool kind, int32_t offset); | |
1680 void EmitBranch(Opcode b, Register rs, Register rt, Label* label); | |
1681 void EmitRegImmBranch(RtRegImm b, Register rs, Label* label); | |
1682 void EmitFpuBranch(bool kind, Label* label); | |
1683 | |
1684 void EmitBranchDelayNop() { | |
1685 Emit(Instr::kNopInstruction); // Branch delay NOP. | |
1686 delay_slot_available_ = true; | |
1687 } | |
1688 | |
1689 void StoreIntoObjectFilter(Register object, Register value, Label* no_update); | |
1690 | |
1691 // Shorter filtering sequence that assumes that value is not a smi. | |
1692 void StoreIntoObjectFilterNoSmi(Register object, | |
1693 Register value, | |
1694 Label* no_update); | |
1695 | |
1696 DISALLOW_ALLOCATION(); | |
1697 DISALLOW_COPY_AND_ASSIGN(Assembler); | |
1698 }; | |
1699 | |
1700 } // namespace dart | |
1701 | |
1702 #endif // RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
OLD | NEW |