OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #ifndef RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
6 #define RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
7 | |
8 #ifndef RUNTIME_VM_ASSEMBLER_H_ | |
9 #error Do not include assembler_mips.h directly; use assembler.h instead. | |
10 #endif | |
11 | |
12 #include "platform/assert.h" | |
13 #include "platform/utils.h" | |
14 #include "vm/constants_mips.h" | |
15 #include "vm/hash_map.h" | |
16 #include "vm/object.h" | |
17 #include "vm/simulator.h" | |
18 | |
19 // References to documentation in this file refer to: | |
20 // "MIPS® Architecture For Programmers Volume I-A: | |
21 // Introduction to the MIPS32® Architecture" in short "VolI-A" | |
22 // and | |
23 // "MIPS® Architecture For Programmers Volume II-A: | |
24 // The MIPS32® Instruction Set" in short "VolII-A" | |
25 namespace dart { | |
26 | |
27 // Forward declarations. | |
28 class RuntimeEntry; | |
29 class StubEntry; | |
30 | |
31 class Immediate : public ValueObject { | |
32 public: | |
33 explicit Immediate(int32_t value) : value_(value) {} | |
34 | |
35 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {} | |
36 Immediate& operator=(const Immediate& other) { | |
37 value_ = other.value_; | |
38 return *this; | |
39 } | |
40 | |
41 private: | |
42 int32_t value_; | |
43 | |
44 int32_t value() const { return value_; } | |
45 | |
46 friend class Assembler; | |
47 }; | |
48 | |
49 | |
50 class Address : public ValueObject { | |
51 public: | |
52 explicit Address(Register base, int32_t offset = 0) | |
53 : ValueObject(), base_(base), offset_(offset) {} | |
54 | |
55 // This addressing mode does not exist. | |
56 Address(Register base, Register offset); | |
57 | |
58 Address(const Address& other) | |
59 : ValueObject(), base_(other.base_), offset_(other.offset_) {} | |
60 Address& operator=(const Address& other) { | |
61 base_ = other.base_; | |
62 offset_ = other.offset_; | |
63 return *this; | |
64 } | |
65 | |
66 uint32_t encoding() const { | |
67 ASSERT(Utils::IsInt(kImmBits, offset_)); | |
68 uint16_t imm_value = static_cast<uint16_t>(offset_); | |
69 return (base_ << kRsShift) | imm_value; | |
70 } | |
71 | |
72 static bool CanHoldOffset(int32_t offset) { | |
73 return Utils::IsInt(kImmBits, offset); | |
74 } | |
75 | |
76 Register base() const { return base_; } | |
77 int32_t offset() const { return offset_; } | |
78 | |
79 private: | |
80 Register base_; | |
81 int32_t offset_; | |
82 }; | |
83 | |
84 | |
85 class FieldAddress : public Address { | |
86 public: | |
87 FieldAddress(Register base, int32_t disp) | |
88 : Address(base, disp - kHeapObjectTag) {} | |
89 | |
90 FieldAddress(const FieldAddress& other) : Address(other) {} | |
91 | |
92 FieldAddress& operator=(const FieldAddress& other) { | |
93 Address::operator=(other); | |
94 return *this; | |
95 } | |
96 }; | |
97 | |
98 | |
99 class Label : public ValueObject { | |
100 public: | |
101 Label() : position_(0) {} | |
102 | |
103 ~Label() { | |
104 // Assert if label is being destroyed with unresolved branches pending. | |
105 ASSERT(!IsLinked()); | |
106 } | |
107 | |
108 // Returns the position for bound and linked labels. Cannot be used | |
109 // for unused labels. | |
110 intptr_t Position() const { | |
111 ASSERT(!IsUnused()); | |
112 return IsBound() ? -position_ - kWordSize : position_ - kWordSize; | |
113 } | |
114 | |
115 bool IsBound() const { return position_ < 0; } | |
116 bool IsUnused() const { return position_ == 0; } | |
117 bool IsLinked() const { return position_ > 0; } | |
118 | |
119 private: | |
120 intptr_t position_; | |
121 | |
122 void Reinitialize() { position_ = 0; } | |
123 | |
124 void BindTo(intptr_t position) { | |
125 ASSERT(!IsBound()); | |
126 position_ = -position - kWordSize; | |
127 ASSERT(IsBound()); | |
128 } | |
129 | |
130 void LinkTo(intptr_t position) { | |
131 ASSERT(!IsBound()); | |
132 position_ = position + kWordSize; | |
133 ASSERT(IsLinked()); | |
134 } | |
135 | |
136 friend class Assembler; | |
137 DISALLOW_COPY_AND_ASSIGN(Label); | |
138 }; | |
139 | |
140 | |
141 // There is no dedicated status register on MIPS, but Condition values are used | |
142 // and passed around by the intermediate language, so we need a Condition type. | |
143 // We delay code generation of a comparison that would result in a traditional | |
144 // condition code in the status register by keeping both register operands and | |
145 // the relational operator between them as the Condition. | |
146 class Condition : public ValueObject { | |
147 public: | |
148 enum Bits { | |
149 kLeftPos = 0, | |
150 kLeftSize = 6, | |
151 kRightPos = kLeftPos + kLeftSize, | |
152 kRightSize = 6, | |
153 kRelOpPos = kRightPos + kRightSize, | |
154 kRelOpSize = 4, | |
155 kImmPos = kRelOpPos + kRelOpSize, | |
156 kImmSize = 16, | |
157 }; | |
158 | |
159 class LeftBits : public BitField<uword, Register, kLeftPos, kLeftSize> {}; | |
160 class RightBits : public BitField<uword, Register, kRightPos, kRightSize> {}; | |
161 class RelOpBits | |
162 : public BitField<uword, RelationOperator, kRelOpPos, kRelOpSize> {}; | |
163 class ImmBits : public BitField<uword, uint16_t, kImmPos, kImmSize> {}; | |
164 | |
165 Register left() const { | |
166 ASSERT(IsValid()); | |
167 return LeftBits::decode(bits_); | |
168 } | |
169 | |
170 Register right() const { | |
171 ASSERT(IsValid()); | |
172 return RightBits::decode(bits_); | |
173 } | |
174 RelationOperator rel_op() const { return RelOpBits::decode(bits_); } | |
175 int16_t imm() const { | |
176 ASSERT(IsValid()); | |
177 return static_cast<int16_t>(ImmBits::decode(bits_)); | |
178 } | |
179 | |
180 static bool IsValidImm(int32_t value) { | |
181 // We want both value and value + 1 to fit in an int16_t. | |
182 return (-0x08000 <= value) && (value < 0x7fff); | |
183 } | |
184 | |
185 void set_rel_op(RelationOperator value) { | |
186 ASSERT(IsValidRelOp(value)); | |
187 bits_ = RelOpBits::update(value, bits_); | |
188 } | |
189 | |
190 bool IsValid() const { return rel_op() != INVALID_RELATION; } | |
191 | |
192 // Uninitialized condition. | |
193 Condition() : ValueObject(), bits_(RelOpBits::update(INVALID_RELATION, 0)) {} | |
194 | |
195 // Copy constructor. | |
196 Condition(const Condition& other) : ValueObject(), bits_(other.bits_) {} | |
197 | |
198 // Copy assignment operator. | |
199 Condition& operator=(const Condition& other) { | |
200 bits_ = other.bits_; | |
201 return *this; | |
202 } | |
203 | |
204 Condition(Register left, | |
205 Register right, | |
206 RelationOperator rel_op, | |
207 int16_t imm = 0) { | |
208 // At most one constant, ZR or immediate. | |
209 ASSERT(!(((left == ZR) || (left == IMM)) && | |
210 ((right == ZR) || (right == IMM)))); | |
211 // Non-zero immediate value is only allowed for IMM. | |
212 ASSERT((imm != 0) == ((left == IMM) || (right == IMM))); | |
213 set_left(left); | |
214 set_right(right); | |
215 if (rel_op == INVALID_RELATION) { | |
216 SetToInvalidState(); | |
217 } else { | |
218 set_rel_op(rel_op); | |
219 } | |
220 set_imm(imm); | |
221 } | |
222 | |
223 private: | |
224 void SetToInvalidState() { | |
225 bits_ = RelOpBits::update(INVALID_RELATION, bits_); | |
226 } | |
227 | |
228 static bool IsValidRelOp(RelationOperator value) { | |
229 return (AL <= value) && (value <= ULE); | |
230 } | |
231 | |
232 static bool IsValidRegister(Register value) { | |
233 return (ZR <= value) && (value <= IMM) && (value != AT); | |
234 } | |
235 | |
236 void set_left(Register value) { | |
237 ASSERT(IsValidRegister(value)); | |
238 bits_ = LeftBits::update(value, bits_); | |
239 } | |
240 | |
241 void set_right(Register value) { | |
242 ASSERT(IsValidRegister(value)); | |
243 bits_ = RightBits::update(value, bits_); | |
244 } | |
245 | |
246 void set_imm(int16_t value) { | |
247 ASSERT(IsValidImm(value)); | |
248 bits_ = ImmBits::update(static_cast<uint16_t>(value), bits_); | |
249 } | |
250 | |
251 uword bits_; | |
252 }; | |
253 | |
254 | |
255 class Assembler : public ValueObject { | |
256 public: | |
257 explicit Assembler(bool use_far_branches = false) | |
258 : buffer_(), | |
259 prologue_offset_(-1), | |
260 has_single_entry_point_(true), | |
261 use_far_branches_(use_far_branches), | |
262 delay_slot_available_(false), | |
263 in_delay_slot_(false), | |
264 comments_(), | |
265 constant_pool_allowed_(true) {} | |
266 ~Assembler() {} | |
267 | |
268 void PopRegister(Register r) { Pop(r); } | |
269 | |
270 void Bind(Label* label); | |
271 void Jump(Label* label) { b(label); } | |
272 | |
273 // Misc. functionality | |
274 intptr_t CodeSize() const { return buffer_.Size(); } | |
275 intptr_t prologue_offset() const { return prologue_offset_; } | |
276 bool has_single_entry_point() const { return has_single_entry_point_; } | |
277 | |
278 // Count the fixups that produce a pointer offset, without processing | |
279 // the fixups. | |
280 intptr_t CountPointerOffsets() const { return buffer_.CountPointerOffsets(); } | |
281 | |
282 const ZoneGrowableArray<intptr_t>& GetPointerOffsets() const { | |
283 return buffer_.pointer_offsets(); | |
284 } | |
285 | |
286 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } | |
287 | |
288 RawObjectPool* MakeObjectPool() { | |
289 return object_pool_wrapper_.MakeObjectPool(); | |
290 } | |
291 | |
292 void FinalizeInstructions(const MemoryRegion& region) { | |
293 buffer_.FinalizeInstructions(region); | |
294 } | |
295 | |
296 bool use_far_branches() const { | |
297 return FLAG_use_far_branches || use_far_branches_; | |
298 } | |
299 | |
300 void set_use_far_branches(bool b) { use_far_branches_ = b; } | |
301 | |
302 void EnterFrame(); | |
303 void LeaveFrameAndReturn(); | |
304 | |
305 // Set up a stub frame so that the stack traversal code can easily identify | |
306 // a stub frame. | |
307 void EnterStubFrame(intptr_t frame_size = 0); | |
308 void LeaveStubFrame(); | |
309 // A separate macro for when a Ret immediately follows, so that we can use | |
310 // the branch delay slot. | |
311 void LeaveStubFrameAndReturn(Register ra = RA); | |
312 | |
313 void MonomorphicCheckedEntry(); | |
314 | |
315 void UpdateAllocationStats(intptr_t cid, | |
316 Register temp_reg, | |
317 Heap::Space space); | |
318 | |
319 void UpdateAllocationStatsWithSize(intptr_t cid, | |
320 Register size_reg, | |
321 Register temp_reg, | |
322 Heap::Space space); | |
323 | |
324 | |
325 void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace); | |
326 | |
327 // Inlined allocation of an instance of class 'cls', code has no runtime | |
328 // calls. Jump to 'failure' if the instance cannot be allocated here. | |
329 // Allocated instance is returned in 'instance_reg'. | |
330 // Only the tags field of the object is initialized. | |
331 void TryAllocate(const Class& cls, | |
332 Label* failure, | |
333 Register instance_reg, | |
334 Register temp_reg); | |
335 | |
336 void TryAllocateArray(intptr_t cid, | |
337 intptr_t instance_size, | |
338 Label* failure, | |
339 Register instance, | |
340 Register end_address, | |
341 Register temp1, | |
342 Register temp2); | |
343 | |
344 // Debugging and bringup support. | |
345 void Stop(const char* message); | |
346 void Unimplemented(const char* message); | |
347 void Untested(const char* message); | |
348 void Unreachable(const char* message); | |
349 | |
350 static void InitializeMemoryWithBreakpoints(uword data, intptr_t length); | |
351 | |
352 void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3); | |
353 static bool EmittingComments(); | |
354 | |
355 const Code::Comments& GetCodeComments() const; | |
356 | |
357 static const char* RegisterName(Register reg); | |
358 | |
359 static const char* FpuRegisterName(FpuRegister reg); | |
360 | |
361 void SetPrologueOffset() { | |
362 if (prologue_offset_ == -1) { | |
363 prologue_offset_ = CodeSize(); | |
364 } | |
365 } | |
366 | |
367 // A utility to be able to assemble an instruction into the delay slot. | |
368 Assembler* delay_slot() { | |
369 ASSERT(delay_slot_available_); | |
370 ASSERT(buffer_.Load<int32_t>(buffer_.GetPosition() - sizeof(int32_t)) == | |
371 Instr::kNopInstruction); | |
372 buffer_.Remit<int32_t>(); | |
373 delay_slot_available_ = false; | |
374 in_delay_slot_ = true; | |
375 return this; | |
376 } | |
377 | |
378 // CPU instructions in alphabetical order. | |
379 void addd(DRegister dd, DRegister ds, DRegister dt) { | |
380 // DRegisters start at the even FRegisters. | |
381 FRegister fd = static_cast<FRegister>(dd * 2); | |
382 FRegister fs = static_cast<FRegister>(ds * 2); | |
383 FRegister ft = static_cast<FRegister>(dt * 2); | |
384 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_ADD); | |
385 } | |
386 | |
387 void addiu(Register rt, Register rs, const Immediate& imm) { | |
388 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
389 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
390 EmitIType(ADDIU, rs, rt, imm_value); | |
391 } | |
392 | |
393 void addu(Register rd, Register rs, Register rt) { | |
394 EmitRType(SPECIAL, rs, rt, rd, 0, ADDU); | |
395 } | |
396 | |
397 void and_(Register rd, Register rs, Register rt) { | |
398 EmitRType(SPECIAL, rs, rt, rd, 0, AND); | |
399 } | |
400 | |
401 void andi(Register rt, Register rs, const Immediate& imm) { | |
402 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
403 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
404 EmitIType(ANDI, rs, rt, imm_value); | |
405 } | |
406 | |
407 // Unconditional branch. | |
408 void b(Label* l) { beq(R0, R0, l); } | |
409 | |
410 void bal(Label* l) { | |
411 ASSERT(!in_delay_slot_); | |
412 EmitRegImmBranch(BGEZAL, R0, l); | |
413 EmitBranchDelayNop(); | |
414 } | |
415 | |
416 // Branch on floating point false. | |
417 void bc1f(Label* l) { | |
418 EmitFpuBranch(false, l); | |
419 EmitBranchDelayNop(); | |
420 } | |
421 | |
422 // Branch on floating point true. | |
423 void bc1t(Label* l) { | |
424 EmitFpuBranch(true, l); | |
425 EmitBranchDelayNop(); | |
426 } | |
427 | |
428 // Branch if equal. | |
429 void beq(Register rs, Register rt, Label* l) { | |
430 ASSERT(!in_delay_slot_); | |
431 EmitBranch(BEQ, rs, rt, l); | |
432 EmitBranchDelayNop(); | |
433 } | |
434 | |
435 // Branch if equal, likely taken. | |
436 // Delay slot executed only when branch taken. | |
437 void beql(Register rs, Register rt, Label* l) { | |
438 ASSERT(!in_delay_slot_); | |
439 EmitBranch(BEQL, rs, rt, l); | |
440 EmitBranchDelayNop(); | |
441 } | |
442 | |
443 // Branch if rs >= 0. | |
444 void bgez(Register rs, Label* l) { | |
445 ASSERT(!in_delay_slot_); | |
446 EmitRegImmBranch(BGEZ, rs, l); | |
447 EmitBranchDelayNop(); | |
448 } | |
449 | |
450 // Branch if rs >= 0, likely taken. | |
451 // Delay slot executed only when branch taken. | |
452 void bgezl(Register rs, Label* l) { | |
453 ASSERT(!in_delay_slot_); | |
454 EmitRegImmBranch(BGEZL, rs, l); | |
455 EmitBranchDelayNop(); | |
456 } | |
457 | |
458 // Branch if rs > 0. | |
459 void bgtz(Register rs, Label* l) { | |
460 ASSERT(!in_delay_slot_); | |
461 EmitBranch(BGTZ, rs, R0, l); | |
462 EmitBranchDelayNop(); | |
463 } | |
464 | |
465 // Branch if rs > 0, likely taken. | |
466 // Delay slot executed only when branch taken. | |
467 void bgtzl(Register rs, Label* l) { | |
468 ASSERT(!in_delay_slot_); | |
469 EmitBranch(BGTZL, rs, R0, l); | |
470 EmitBranchDelayNop(); | |
471 } | |
472 | |
473 // Branch if rs <= 0. | |
474 void blez(Register rs, Label* l) { | |
475 ASSERT(!in_delay_slot_); | |
476 EmitBranch(BLEZ, rs, R0, l); | |
477 EmitBranchDelayNop(); | |
478 } | |
479 | |
480 // Branch if rs <= 0, likely taken. | |
481 // Delay slot executed only when branch taken. | |
482 void blezl(Register rs, Label* l) { | |
483 ASSERT(!in_delay_slot_); | |
484 EmitBranch(BLEZL, rs, R0, l); | |
485 EmitBranchDelayNop(); | |
486 } | |
487 | |
488 // Branch if rs < 0. | |
489 void bltz(Register rs, Label* l) { | |
490 ASSERT(!in_delay_slot_); | |
491 EmitRegImmBranch(BLTZ, rs, l); | |
492 EmitBranchDelayNop(); | |
493 } | |
494 | |
495 // Branch if rs < 0, likely taken. | |
496 // Delay slot executed only when branch taken. | |
497 void bltzl(Register rs, Label* l) { | |
498 ASSERT(!in_delay_slot_); | |
499 EmitRegImmBranch(BLTZL, rs, l); | |
500 EmitBranchDelayNop(); | |
501 } | |
502 | |
503 // Branch if not equal. | |
504 void bne(Register rs, Register rt, Label* l) { | |
505 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
506 EmitBranch(BNE, rs, rt, l); | |
507 EmitBranchDelayNop(); | |
508 } | |
509 | |
510 // Branch if not equal, likely taken. | |
511 // Delay slot executed only when branch taken. | |
512 void bnel(Register rs, Register rt, Label* l) { | |
513 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
514 EmitBranch(BNEL, rs, rt, l); | |
515 EmitBranchDelayNop(); | |
516 } | |
517 | |
518 static int32_t BreakEncoding(int32_t code) { | |
519 ASSERT(Utils::IsUint(20, code)); | |
520 return SPECIAL << kOpcodeShift | code << kBreakCodeShift | | |
521 BREAK << kFunctionShift; | |
522 } | |
523 | |
524 | |
525 void break_(int32_t code) { Emit(BreakEncoding(code)); } | |
526 | |
527 static uword GetBreakInstructionFiller() { return BreakEncoding(0); } | |
528 | |
529 // FPU compare, always false. | |
530 void cfd(DRegister ds, DRegister dt) { | |
531 FRegister fs = static_cast<FRegister>(ds * 2); | |
532 FRegister ft = static_cast<FRegister>(dt * 2); | |
533 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_F); | |
534 } | |
535 | |
536 // FPU compare, true if unordered, i.e. one is NaN. | |
537 void cund(DRegister ds, DRegister dt) { | |
538 FRegister fs = static_cast<FRegister>(ds * 2); | |
539 FRegister ft = static_cast<FRegister>(dt * 2); | |
540 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UN); | |
541 } | |
542 | |
543 // FPU compare, true if equal. | |
544 void ceqd(DRegister ds, DRegister dt) { | |
545 FRegister fs = static_cast<FRegister>(ds * 2); | |
546 FRegister ft = static_cast<FRegister>(dt * 2); | |
547 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_EQ); | |
548 } | |
549 | |
550 // FPU compare, true if unordered or equal. | |
551 void cueqd(DRegister ds, DRegister dt) { | |
552 FRegister fs = static_cast<FRegister>(ds * 2); | |
553 FRegister ft = static_cast<FRegister>(dt * 2); | |
554 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_UEQ); | |
555 } | |
556 | |
557 // FPU compare, true if less than. | |
558 void coltd(DRegister ds, DRegister dt) { | |
559 FRegister fs = static_cast<FRegister>(ds * 2); | |
560 FRegister ft = static_cast<FRegister>(dt * 2); | |
561 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLT); | |
562 } | |
563 | |
564 // FPU compare, true if unordered or less than. | |
565 void cultd(DRegister ds, DRegister dt) { | |
566 FRegister fs = static_cast<FRegister>(ds * 2); | |
567 FRegister ft = static_cast<FRegister>(dt * 2); | |
568 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULT); | |
569 } | |
570 | |
571 // FPU compare, true if less or equal. | |
572 void coled(DRegister ds, DRegister dt) { | |
573 FRegister fs = static_cast<FRegister>(ds * 2); | |
574 FRegister ft = static_cast<FRegister>(dt * 2); | |
575 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_OLE); | |
576 } | |
577 | |
578 // FPU compare, true if unordered or less or equal. | |
579 void culed(DRegister ds, DRegister dt) { | |
580 FRegister fs = static_cast<FRegister>(ds * 2); | |
581 FRegister ft = static_cast<FRegister>(dt * 2); | |
582 EmitFpuRType(COP1, FMT_D, ft, fs, F0, COP1_C_ULE); | |
583 } | |
584 | |
585 void clo(Register rd, Register rs) { | |
586 EmitRType(SPECIAL2, rs, rd, rd, 0, CLO); | |
587 } | |
588 | |
589 void clz(Register rd, Register rs) { | |
590 EmitRType(SPECIAL2, rs, rd, rd, 0, CLZ); | |
591 } | |
592 | |
593 // Convert a double in ds to a 32-bit signed int in fd rounding towards 0. | |
594 void truncwd(FRegister fd, DRegister ds) { | |
595 FRegister fs = static_cast<FRegister>(ds * 2); | |
596 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_TRUNC_W); | |
597 } | |
598 | |
599 // Convert a 32-bit float in fs to a 64-bit double in dd. | |
600 void cvtds(DRegister dd, FRegister fs) { | |
601 FRegister fd = static_cast<FRegister>(dd * 2); | |
602 EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_CVT_D); | |
603 } | |
604 | |
605 // Converts a 32-bit signed int in fs to a double in fd. | |
606 void cvtdw(DRegister dd, FRegister fs) { | |
607 FRegister fd = static_cast<FRegister>(dd * 2); | |
608 EmitFpuRType(COP1, FMT_W, F0, fs, fd, COP1_CVT_D); | |
609 } | |
610 | |
611 // Convert a 64-bit double in ds to a 32-bit float in fd. | |
612 void cvtsd(FRegister fd, DRegister ds) { | |
613 FRegister fs = static_cast<FRegister>(ds * 2); | |
614 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_CVT_S); | |
615 } | |
616 | |
617 void div(Register rs, Register rt) { EmitRType(SPECIAL, rs, rt, R0, 0, DIV); } | |
618 | |
619 void divd(DRegister dd, DRegister ds, DRegister dt) { | |
620 FRegister fd = static_cast<FRegister>(dd * 2); | |
621 FRegister fs = static_cast<FRegister>(ds * 2); | |
622 FRegister ft = static_cast<FRegister>(dt * 2); | |
623 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_DIV); | |
624 } | |
625 | |
626 void divu(Register rs, Register rt) { | |
627 EmitRType(SPECIAL, rs, rt, R0, 0, DIVU); | |
628 } | |
629 | |
630 void jalr(Register rs, Register rd = RA) { | |
631 ASSERT(rs != rd); | |
632 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
633 EmitRType(SPECIAL, rs, R0, rd, 0, JALR); | |
634 EmitBranchDelayNop(); | |
635 } | |
636 | |
637 void jr(Register rs) { | |
638 ASSERT(!in_delay_slot_); // Jump within a delay slot is not supported. | |
639 EmitRType(SPECIAL, rs, R0, R0, 0, JR); | |
640 EmitBranchDelayNop(); | |
641 } | |
642 | |
643 void lb(Register rt, const Address& addr) { EmitLoadStore(LB, rt, addr); } | |
644 | |
645 void lbu(Register rt, const Address& addr) { EmitLoadStore(LBU, rt, addr); } | |
646 | |
647 void ldc1(DRegister dt, const Address& addr) { | |
648 FRegister ft = static_cast<FRegister>(dt * 2); | |
649 EmitFpuLoadStore(LDC1, ft, addr); | |
650 } | |
651 | |
652 void lh(Register rt, const Address& addr) { EmitLoadStore(LH, rt, addr); } | |
653 | |
654 void lhu(Register rt, const Address& addr) { EmitLoadStore(LHU, rt, addr); } | |
655 | |
656 void ll(Register rt, const Address& addr) { EmitLoadStore(LL, rt, addr); } | |
657 | |
658 void lui(Register rt, const Immediate& imm) { | |
659 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
660 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
661 EmitIType(LUI, R0, rt, imm_value); | |
662 } | |
663 | |
664 void lw(Register rt, const Address& addr) { EmitLoadStore(LW, rt, addr); } | |
665 | |
666 void lwc1(FRegister ft, const Address& addr) { | |
667 EmitFpuLoadStore(LWC1, ft, addr); | |
668 } | |
669 | |
670 void madd(Register rs, Register rt) { | |
671 EmitRType(SPECIAL2, rs, rt, R0, 0, MADD); | |
672 } | |
673 | |
674 void maddu(Register rs, Register rt) { | |
675 EmitRType(SPECIAL2, rs, rt, R0, 0, MADDU); | |
676 } | |
677 | |
678 void mfc1(Register rt, FRegister fs) { | |
679 Emit(COP1 << kOpcodeShift | COP1_MF << kCop1SubShift | rt << kRtShift | | |
680 fs << kFsShift); | |
681 } | |
682 | |
683 void mfhi(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFHI); } | |
684 | |
685 void mflo(Register rd) { EmitRType(SPECIAL, R0, R0, rd, 0, MFLO); } | |
686 | |
687 void mov(Register rd, Register rs) { or_(rd, rs, ZR); } | |
688 | |
689 void movd(DRegister dd, DRegister ds) { | |
690 FRegister fd = static_cast<FRegister>(dd * 2); | |
691 FRegister fs = static_cast<FRegister>(ds * 2); | |
692 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_MOV); | |
693 } | |
694 | |
695 // Move if floating point false. | |
696 void movf(Register rd, Register rs) { | |
697 EmitRType(SPECIAL, rs, R0, rd, 0, MOVCI); | |
698 } | |
699 | |
700 void movn(Register rd, Register rs, Register rt) { | |
701 EmitRType(SPECIAL, rs, rt, rd, 0, MOVN); | |
702 } | |
703 | |
704 // Move if floating point true. | |
705 void movt(Register rd, Register rs) { | |
706 EmitRType(SPECIAL, rs, R1, rd, 0, MOVCI); | |
707 } | |
708 | |
709 // rd <- (rt == 0) ? rs : rd; | |
710 void movz(Register rd, Register rs, Register rt) { | |
711 EmitRType(SPECIAL, rs, rt, rd, 0, MOVZ); | |
712 } | |
713 | |
714 void movs(FRegister fd, FRegister fs) { | |
715 EmitFpuRType(COP1, FMT_S, F0, fs, fd, COP1_MOV); | |
716 } | |
717 | |
718 void mtc1(Register rt, FRegister fs) { | |
719 Emit(COP1 << kOpcodeShift | COP1_MT << kCop1SubShift | rt << kRtShift | | |
720 fs << kFsShift); | |
721 } | |
722 | |
723 void mthi(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTHI); } | |
724 | |
725 void mtlo(Register rs) { EmitRType(SPECIAL, rs, R0, R0, 0, MTLO); } | |
726 | |
727 void muld(DRegister dd, DRegister ds, DRegister dt) { | |
728 FRegister fd = static_cast<FRegister>(dd * 2); | |
729 FRegister fs = static_cast<FRegister>(ds * 2); | |
730 FRegister ft = static_cast<FRegister>(dt * 2); | |
731 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_MUL); | |
732 } | |
733 | |
734 void mult(Register rs, Register rt) { | |
735 EmitRType(SPECIAL, rs, rt, R0, 0, MULT); | |
736 } | |
737 | |
738 void multu(Register rs, Register rt) { | |
739 EmitRType(SPECIAL, rs, rt, R0, 0, MULTU); | |
740 } | |
741 | |
742 void negd(DRegister dd, DRegister ds) { | |
743 FRegister fd = static_cast<FRegister>(dd * 2); | |
744 FRegister fs = static_cast<FRegister>(ds * 2); | |
745 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_NEG); | |
746 } | |
747 | |
748 void nop() { Emit(Instr::kNopInstruction); } | |
749 | |
750 void nor(Register rd, Register rs, Register rt) { | |
751 EmitRType(SPECIAL, rs, rt, rd, 0, NOR); | |
752 } | |
753 | |
754 void or_(Register rd, Register rs, Register rt) { | |
755 EmitRType(SPECIAL, rs, rt, rd, 0, OR); | |
756 } | |
757 | |
758 void ori(Register rt, Register rs, const Immediate& imm) { | |
759 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
760 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
761 EmitIType(ORI, rs, rt, imm_value); | |
762 } | |
763 | |
764 void sb(Register rt, const Address& addr) { EmitLoadStore(SB, rt, addr); } | |
765 | |
766 // rt = 1 on success, 0 on failure. | |
767 void sc(Register rt, const Address& addr) { EmitLoadStore(SC, rt, addr); } | |
768 | |
769 void sdc1(DRegister dt, const Address& addr) { | |
770 FRegister ft = static_cast<FRegister>(dt * 2); | |
771 EmitFpuLoadStore(SDC1, ft, addr); | |
772 } | |
773 | |
774 void sh(Register rt, const Address& addr) { EmitLoadStore(SH, rt, addr); } | |
775 | |
776 void sll(Register rd, Register rt, int sa) { | |
777 EmitRType(SPECIAL, R0, rt, rd, sa, SLL); | |
778 } | |
779 | |
780 void sllv(Register rd, Register rt, Register rs) { | |
781 EmitRType(SPECIAL, rs, rt, rd, 0, SLLV); | |
782 } | |
783 | |
784 void slt(Register rd, Register rs, Register rt) { | |
785 EmitRType(SPECIAL, rs, rt, rd, 0, SLT); | |
786 } | |
787 | |
788 void slti(Register rt, Register rs, const Immediate& imm) { | |
789 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
790 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
791 EmitIType(SLTI, rs, rt, imm_value); | |
792 } | |
793 | |
794 // Although imm argument is int32_t, it is interpreted as an uint32_t. | |
795 // For example, -1 stands for 0xffffffffUL: it is encoded as 0xffff in the | |
796 // instruction imm field and is then sign extended back to 0xffffffffUL. | |
797 void sltiu(Register rt, Register rs, const Immediate& imm) { | |
798 ASSERT(Utils::IsInt(kImmBits, imm.value())); | |
799 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
800 EmitIType(SLTIU, rs, rt, imm_value); | |
801 } | |
802 | |
803 void sltu(Register rd, Register rs, Register rt) { | |
804 EmitRType(SPECIAL, rs, rt, rd, 0, SLTU); | |
805 } | |
806 | |
807 void sqrtd(DRegister dd, DRegister ds) { | |
808 FRegister fd = static_cast<FRegister>(dd * 2); | |
809 FRegister fs = static_cast<FRegister>(ds * 2); | |
810 EmitFpuRType(COP1, FMT_D, F0, fs, fd, COP1_SQRT); | |
811 } | |
812 | |
813 void sra(Register rd, Register rt, int sa) { | |
814 EmitRType(SPECIAL, R0, rt, rd, sa, SRA); | |
815 } | |
816 | |
817 void srav(Register rd, Register rt, Register rs) { | |
818 EmitRType(SPECIAL, rs, rt, rd, 0, SRAV); | |
819 } | |
820 | |
821 void srl(Register rd, Register rt, int sa) { | |
822 EmitRType(SPECIAL, R0, rt, rd, sa, SRL); | |
823 } | |
824 | |
825 void srlv(Register rd, Register rt, Register rs) { | |
826 EmitRType(SPECIAL, rs, rt, rd, 0, SRLV); | |
827 } | |
828 | |
829 void subd(DRegister dd, DRegister ds, DRegister dt) { | |
830 FRegister fd = static_cast<FRegister>(dd * 2); | |
831 FRegister fs = static_cast<FRegister>(ds * 2); | |
832 FRegister ft = static_cast<FRegister>(dt * 2); | |
833 EmitFpuRType(COP1, FMT_D, ft, fs, fd, COP1_SUB); | |
834 } | |
835 | |
836 void subu(Register rd, Register rs, Register rt) { | |
837 EmitRType(SPECIAL, rs, rt, rd, 0, SUBU); | |
838 } | |
839 | |
840 void sw(Register rt, const Address& addr) { EmitLoadStore(SW, rt, addr); } | |
841 | |
842 void swc1(FRegister ft, const Address& addr) { | |
843 EmitFpuLoadStore(SWC1, ft, addr); | |
844 } | |
845 | |
846 void xori(Register rt, Register rs, const Immediate& imm) { | |
847 ASSERT(Utils::IsUint(kImmBits, imm.value())); | |
848 const uint16_t imm_value = static_cast<uint16_t>(imm.value()); | |
849 EmitIType(XORI, rs, rt, imm_value); | |
850 } | |
851 | |
852 void xor_(Register rd, Register rs, Register rt) { | |
853 EmitRType(SPECIAL, rs, rt, rd, 0, XOR); | |
854 } | |
855 | |
856 // Macros in alphabetical order. | |
857 | |
858 // Addition of rs and rt with the result placed in rd. | |
859 // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. | |
860 // rd and ro must not be TMP. | |
861 // ro must be different from all the other registers. | |
862 // If rd, rs, and rt are the same register, then a scratch register different | |
863 // from the other registers is needed. | |
864 void AdduDetectOverflow(Register rd, | |
865 Register rs, | |
866 Register rt, | |
867 Register ro, | |
868 Register scratch = kNoRegister); | |
869 | |
870 // ro must be different from rd and rs. | |
871 // rd and ro must not be TMP. | |
872 // If rd and rs are the same, a scratch register different from the other | |
873 // registers is needed. | |
874 void AddImmediateDetectOverflow(Register rd, | |
875 Register rs, | |
876 int32_t imm, | |
877 Register ro, | |
878 Register scratch = kNoRegister) { | |
879 ASSERT(!in_delay_slot_); | |
880 LoadImmediate(rd, imm); | |
881 AdduDetectOverflow(rd, rs, rd, ro, scratch); | |
882 } | |
883 | |
884 // Subtraction of rt from rs (rs - rt) with the result placed in rd. | |
885 // After, ro < 0 if there was signed overflow, ro >= 0 otherwise. | |
886 // None of rd, rs, rt, or ro may be TMP. | |
887 // ro must be different from the other registers. | |
888 void SubuDetectOverflow(Register rd, Register rs, Register rt, Register ro); | |
889 | |
890 // ro must be different from rd and rs. | |
891 // None of rd, rs, rt, or ro may be TMP. | |
892 void SubImmediateDetectOverflow(Register rd, | |
893 Register rs, | |
894 int32_t imm, | |
895 Register ro) { | |
896 ASSERT(!in_delay_slot_); | |
897 LoadImmediate(rd, imm); | |
898 SubuDetectOverflow(rd, rs, rd, ro); | |
899 } | |
900 | |
901 void Branch(const StubEntry& stub_entry, Register pp = PP); | |
902 | |
903 void BranchLink(const StubEntry& stub_entry, | |
904 Patchability patchable = kNotPatchable); | |
905 | |
906 void BranchLinkPatchable(const StubEntry& stub_entry); | |
907 void BranchLinkToRuntime(); | |
908 | |
909 // Emit a call that shares its object pool entries with other calls | |
910 // that have the same equivalence marker. | |
911 void BranchLinkWithEquivalence(const StubEntry& stub_entry, | |
912 const Object& equivalence); | |
913 | |
914 void Drop(intptr_t stack_elements) { | |
915 ASSERT(stack_elements >= 0); | |
916 if (stack_elements > 0) { | |
917 addiu(SP, SP, Immediate(stack_elements * kWordSize)); | |
918 } | |
919 } | |
920 | |
921 void LoadPoolPointer(Register reg = PP) { | |
922 ASSERT(!in_delay_slot_); | |
923 CheckCodePointer(); | |
924 lw(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); | |
925 set_constant_pool_allowed(reg == PP); | |
926 } | |
927 | |
928 void CheckCodePointer(); | |
929 | |
930 void RestoreCodePointer(); | |
931 | |
932 void LoadImmediate(Register rd, int32_t value) { | |
933 ASSERT(!in_delay_slot_); | |
934 if (Utils::IsInt(kImmBits, value)) { | |
935 addiu(rd, ZR, Immediate(value)); | |
936 } else { | |
937 const uint16_t low = Utils::Low16Bits(value); | |
938 const uint16_t high = Utils::High16Bits(value); | |
939 lui(rd, Immediate(high)); | |
940 if (low != 0) { | |
941 ori(rd, rd, Immediate(low)); | |
942 } | |
943 } | |
944 } | |
945 | |
946 void LoadImmediate(DRegister rd, double value) { | |
947 ASSERT(!in_delay_slot_); | |
948 FRegister frd = static_cast<FRegister>(rd * 2); | |
949 const int64_t ival = bit_cast<uint64_t, double>(value); | |
950 const int32_t low = Utils::Low32Bits(ival); | |
951 const int32_t high = Utils::High32Bits(ival); | |
952 if (low != 0) { | |
953 LoadImmediate(TMP, low); | |
954 mtc1(TMP, frd); | |
955 } else { | |
956 mtc1(ZR, frd); | |
957 } | |
958 | |
959 if (high != 0) { | |
960 LoadImmediate(TMP, high); | |
961 mtc1(TMP, static_cast<FRegister>(frd + 1)); | |
962 } else { | |
963 mtc1(ZR, static_cast<FRegister>(frd + 1)); | |
964 } | |
965 } | |
966 | |
967 void LoadImmediate(FRegister rd, float value) { | |
968 ASSERT(!in_delay_slot_); | |
969 const int32_t ival = bit_cast<int32_t, float>(value); | |
970 if (ival == 0) { | |
971 mtc1(ZR, rd); | |
972 } else { | |
973 LoadImmediate(TMP, ival); | |
974 mtc1(TMP, rd); | |
975 } | |
976 } | |
977 | |
978 void AddImmediate(Register rd, Register rs, int32_t value) { | |
979 ASSERT(!in_delay_slot_); | |
980 if ((value == 0) && (rd == rs)) return; | |
981 // If value is 0, we still want to move rs to rd if they aren't the same. | |
982 if (Utils::IsInt(kImmBits, value)) { | |
983 addiu(rd, rs, Immediate(value)); | |
984 } else { | |
985 LoadImmediate(TMP, value); | |
986 addu(rd, rs, TMP); | |
987 } | |
988 } | |
989 | |
990 void AddImmediate(Register rd, int32_t value) { | |
991 ASSERT(!in_delay_slot_); | |
992 AddImmediate(rd, rd, value); | |
993 } | |
994 | |
995 void AndImmediate(Register rd, Register rs, int32_t imm) { | |
996 ASSERT(!in_delay_slot_); | |
997 if (imm == 0) { | |
998 mov(rd, ZR); | |
999 return; | |
1000 } | |
1001 | |
1002 if (Utils::IsUint(kImmBits, imm)) { | |
1003 andi(rd, rs, Immediate(imm)); | |
1004 } else { | |
1005 LoadImmediate(TMP, imm); | |
1006 and_(rd, rs, TMP); | |
1007 } | |
1008 } | |
1009 | |
1010 void OrImmediate(Register rd, Register rs, int32_t imm) { | |
1011 ASSERT(!in_delay_slot_); | |
1012 if (imm == 0) { | |
1013 mov(rd, rs); | |
1014 return; | |
1015 } | |
1016 | |
1017 if (Utils::IsUint(kImmBits, imm)) { | |
1018 ori(rd, rs, Immediate(imm)); | |
1019 } else { | |
1020 LoadImmediate(TMP, imm); | |
1021 or_(rd, rs, TMP); | |
1022 } | |
1023 } | |
1024 | |
1025 void XorImmediate(Register rd, Register rs, int32_t imm) { | |
1026 ASSERT(!in_delay_slot_); | |
1027 if (imm == 0) { | |
1028 mov(rd, rs); | |
1029 return; | |
1030 } | |
1031 | |
1032 if (Utils::IsUint(kImmBits, imm)) { | |
1033 xori(rd, rs, Immediate(imm)); | |
1034 } else { | |
1035 LoadImmediate(TMP, imm); | |
1036 xor_(rd, rs, TMP); | |
1037 } | |
1038 } | |
1039 | |
1040 Register LoadConditionOperand(Register rd, | |
1041 const Object& operand, | |
1042 int16_t* imm) { | |
1043 if (operand.IsSmi()) { | |
1044 const int32_t val = reinterpret_cast<int32_t>(operand.raw()); | |
1045 if (val == 0) { | |
1046 return ZR; | |
1047 } else if (Condition::IsValidImm(val)) { | |
1048 ASSERT(*imm == 0); | |
1049 *imm = val; | |
1050 return IMM; | |
1051 } | |
1052 } | |
1053 LoadObject(rd, operand); | |
1054 return rd; | |
1055 } | |
1056 | |
1057 // Branch to label if condition is true. | |
1058 void BranchOnCondition(Condition cond, Label* l) { | |
1059 ASSERT(!in_delay_slot_); | |
1060 Register left = cond.left(); | |
1061 Register right = cond.right(); | |
1062 RelationOperator rel_op = cond.rel_op(); | |
1063 switch (rel_op) { | |
1064 case NV: | |
1065 return; | |
1066 case AL: | |
1067 b(l); | |
1068 return; | |
1069 case EQ: // fall through. | |
1070 case NE: { | |
1071 if (left == IMM) { | |
1072 addiu(AT, ZR, Immediate(cond.imm())); | |
1073 left = AT; | |
1074 } else if (right == IMM) { | |
1075 addiu(AT, ZR, Immediate(cond.imm())); | |
1076 right = AT; | |
1077 } | |
1078 if (rel_op == EQ) { | |
1079 beq(left, right, l); | |
1080 } else { | |
1081 bne(left, right, l); | |
1082 } | |
1083 break; | |
1084 } | |
1085 case GT: { | |
1086 if (left == ZR) { | |
1087 bltz(right, l); | |
1088 } else if (right == ZR) { | |
1089 bgtz(left, l); | |
1090 } else if (left == IMM) { | |
1091 slti(AT, right, Immediate(cond.imm())); | |
1092 bne(AT, ZR, l); | |
1093 } else if (right == IMM) { | |
1094 slti(AT, left, Immediate(cond.imm() + 1)); | |
1095 beq(AT, ZR, l); | |
1096 } else { | |
1097 slt(AT, right, left); | |
1098 bne(AT, ZR, l); | |
1099 } | |
1100 break; | |
1101 } | |
1102 case GE: { | |
1103 if (left == ZR) { | |
1104 blez(right, l); | |
1105 } else if (right == ZR) { | |
1106 bgez(left, l); | |
1107 } else if (left == IMM) { | |
1108 slti(AT, right, Immediate(cond.imm() + 1)); | |
1109 bne(AT, ZR, l); | |
1110 } else if (right == IMM) { | |
1111 slti(AT, left, Immediate(cond.imm())); | |
1112 beq(AT, ZR, l); | |
1113 } else { | |
1114 slt(AT, left, right); | |
1115 beq(AT, ZR, l); | |
1116 } | |
1117 break; | |
1118 } | |
1119 case LT: { | |
1120 if (left == ZR) { | |
1121 bgtz(right, l); | |
1122 } else if (right == ZR) { | |
1123 bltz(left, l); | |
1124 } else if (left == IMM) { | |
1125 slti(AT, right, Immediate(cond.imm() + 1)); | |
1126 beq(AT, ZR, l); | |
1127 } else if (right == IMM) { | |
1128 slti(AT, left, Immediate(cond.imm())); | |
1129 bne(AT, ZR, l); | |
1130 } else { | |
1131 slt(AT, left, right); | |
1132 bne(AT, ZR, l); | |
1133 } | |
1134 break; | |
1135 } | |
1136 case LE: { | |
1137 if (left == ZR) { | |
1138 bgez(right, l); | |
1139 } else if (right == ZR) { | |
1140 blez(left, l); | |
1141 } else if (left == IMM) { | |
1142 slti(AT, right, Immediate(cond.imm())); | |
1143 beq(AT, ZR, l); | |
1144 } else if (right == IMM) { | |
1145 slti(AT, left, Immediate(cond.imm() + 1)); | |
1146 bne(AT, ZR, l); | |
1147 } else { | |
1148 slt(AT, right, left); | |
1149 beq(AT, ZR, l); | |
1150 } | |
1151 break; | |
1152 } | |
1153 case UGT: { | |
1154 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1155 if (left == ZR) { | |
1156 // NV: Never branch. Fall through. | |
1157 } else if (right == ZR) { | |
1158 bne(left, ZR, l); | |
1159 } else { | |
1160 sltu(AT, right, left); | |
1161 bne(AT, ZR, l); | |
1162 } | |
1163 break; | |
1164 } | |
1165 case UGE: { | |
1166 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1167 if (left == ZR) { | |
1168 beq(right, ZR, l); | |
1169 } else if (right == ZR) { | |
1170 // AL: Always branch to l. | |
1171 beq(ZR, ZR, l); | |
1172 } else { | |
1173 sltu(AT, left, right); | |
1174 beq(AT, ZR, l); | |
1175 } | |
1176 break; | |
1177 } | |
1178 case ULT: { | |
1179 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1180 if (left == ZR) { | |
1181 bne(right, ZR, l); | |
1182 } else if (right == ZR) { | |
1183 // NV: Never branch. Fall through. | |
1184 } else { | |
1185 sltu(AT, left, right); | |
1186 bne(AT, ZR, l); | |
1187 } | |
1188 break; | |
1189 } | |
1190 case ULE: { | |
1191 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
1192 if (left == ZR) { | |
1193 // AL: Always branch to l. | |
1194 beq(ZR, ZR, l); | |
1195 } else if (right == ZR) { | |
1196 beq(left, ZR, l); | |
1197 } else { | |
1198 sltu(AT, right, left); | |
1199 beq(AT, ZR, l); | |
1200 } | |
1201 break; | |
1202 } | |
1203 default: | |
1204 UNREACHABLE(); | |
1205 } | |
1206 } | |
1207 | |
1208 void BranchEqual(Register rd, Register rn, Label* l) { beq(rd, rn, l); } | |
1209 | |
1210 void BranchEqual(Register rd, const Immediate& imm, Label* l) { | |
1211 ASSERT(!in_delay_slot_); | |
1212 if (imm.value() == 0) { | |
1213 beq(rd, ZR, l); | |
1214 } else { | |
1215 ASSERT(rd != CMPRES2); | |
1216 LoadImmediate(CMPRES2, imm.value()); | |
1217 beq(rd, CMPRES2, l); | |
1218 } | |
1219 } | |
1220 | |
1221 void BranchEqual(Register rd, const Object& object, Label* l) { | |
1222 ASSERT(!in_delay_slot_); | |
1223 ASSERT(rd != CMPRES2); | |
1224 LoadObject(CMPRES2, object); | |
1225 beq(rd, CMPRES2, l); | |
1226 } | |
1227 | |
1228 void BranchNotEqual(Register rd, Register rn, Label* l) { bne(rd, rn, l); } | |
1229 | |
1230 void BranchNotEqual(Register rd, const Immediate& imm, Label* l) { | |
1231 ASSERT(!in_delay_slot_); | |
1232 if (imm.value() == 0) { | |
1233 bne(rd, ZR, l); | |
1234 } else { | |
1235 ASSERT(rd != CMPRES2); | |
1236 LoadImmediate(CMPRES2, imm.value()); | |
1237 bne(rd, CMPRES2, l); | |
1238 } | |
1239 } | |
1240 | |
1241 void BranchNotEqual(Register rd, const Object& object, Label* l) { | |
1242 ASSERT(!in_delay_slot_); | |
1243 ASSERT(rd != CMPRES2); | |
1244 LoadObject(CMPRES2, object); | |
1245 bne(rd, CMPRES2, l); | |
1246 } | |
1247 | |
1248 void BranchSignedGreater(Register rd, Register rs, Label* l) { | |
1249 ASSERT(!in_delay_slot_); | |
1250 slt(CMPRES2, rs, rd); // CMPRES2 = rd > rs ? 1 : 0. | |
1251 bne(CMPRES2, ZR, l); | |
1252 } | |
1253 | |
1254 void BranchSignedGreater(Register rd, const Immediate& imm, Label* l) { | |
1255 ASSERT(!in_delay_slot_); | |
1256 if (imm.value() == 0) { | |
1257 bgtz(rd, l); | |
1258 } else { | |
1259 if (Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1260 slti(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1261 beq(CMPRES2, ZR, l); | |
1262 } else { | |
1263 ASSERT(rd != CMPRES2); | |
1264 LoadImmediate(CMPRES2, imm.value()); | |
1265 BranchSignedGreater(rd, CMPRES2, l); | |
1266 } | |
1267 } | |
1268 } | |
1269 | |
1270 void BranchUnsignedGreater(Register rd, Register rs, Label* l) { | |
1271 ASSERT(!in_delay_slot_); | |
1272 sltu(CMPRES2, rs, rd); | |
1273 bne(CMPRES2, ZR, l); | |
1274 } | |
1275 | |
1276 void BranchUnsignedGreater(Register rd, const Immediate& imm, Label* l) { | |
1277 ASSERT(!in_delay_slot_); | |
1278 if (imm.value() == 0) { | |
1279 BranchNotEqual(rd, Immediate(0), l); | |
1280 } else { | |
1281 if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1282 sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1283 beq(CMPRES2, ZR, l); | |
1284 } else { | |
1285 ASSERT(rd != CMPRES2); | |
1286 LoadImmediate(CMPRES2, imm.value()); | |
1287 BranchUnsignedGreater(rd, CMPRES2, l); | |
1288 } | |
1289 } | |
1290 } | |
1291 | |
1292 void BranchSignedGreaterEqual(Register rd, Register rs, Label* l) { | |
1293 ASSERT(!in_delay_slot_); | |
1294 slt(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. | |
1295 beq(CMPRES2, ZR, l); // If CMPRES2 = 0, then rd >= rs. | |
1296 } | |
1297 | |
1298 void BranchSignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { | |
1299 ASSERT(!in_delay_slot_); | |
1300 if (imm.value() == 0) { | |
1301 bgez(rd, l); | |
1302 } else { | |
1303 if (Utils::IsInt(kImmBits, imm.value())) { | |
1304 slti(CMPRES2, rd, imm); | |
1305 beq(CMPRES2, ZR, l); | |
1306 } else { | |
1307 ASSERT(rd != CMPRES2); | |
1308 LoadImmediate(CMPRES2, imm.value()); | |
1309 BranchSignedGreaterEqual(rd, CMPRES2, l); | |
1310 } | |
1311 } | |
1312 } | |
1313 | |
1314 void BranchUnsignedGreaterEqual(Register rd, Register rs, Label* l) { | |
1315 ASSERT(!in_delay_slot_); | |
1316 sltu(CMPRES2, rd, rs); // CMPRES2 = rd < rs ? 1 : 0. | |
1317 beq(CMPRES2, ZR, l); | |
1318 } | |
1319 | |
1320 void BranchUnsignedGreaterEqual(Register rd, const Immediate& imm, Label* l) { | |
1321 ASSERT(!in_delay_slot_); | |
1322 if (imm.value() == 0) { | |
1323 b(l); | |
1324 } else { | |
1325 if (Utils::IsInt(kImmBits, imm.value())) { | |
1326 sltiu(CMPRES2, rd, imm); | |
1327 beq(CMPRES2, ZR, l); | |
1328 } else { | |
1329 ASSERT(rd != CMPRES2); | |
1330 LoadImmediate(CMPRES2, imm.value()); | |
1331 BranchUnsignedGreaterEqual(rd, CMPRES2, l); | |
1332 } | |
1333 } | |
1334 } | |
1335 | |
1336 void BranchSignedLess(Register rd, Register rs, Label* l) { | |
1337 ASSERT(!in_delay_slot_); | |
1338 BranchSignedGreater(rs, rd, l); | |
1339 } | |
1340 | |
1341 void BranchSignedLess(Register rd, const Immediate& imm, Label* l) { | |
1342 ASSERT(!in_delay_slot_); | |
1343 if (imm.value() == 0) { | |
1344 bltz(rd, l); | |
1345 } else { | |
1346 if (Utils::IsInt(kImmBits, imm.value())) { | |
1347 slti(CMPRES2, rd, imm); | |
1348 bne(CMPRES2, ZR, l); | |
1349 } else { | |
1350 ASSERT(rd != CMPRES2); | |
1351 LoadImmediate(CMPRES2, imm.value()); | |
1352 BranchSignedGreater(CMPRES2, rd, l); | |
1353 } | |
1354 } | |
1355 } | |
1356 | |
1357 void BranchUnsignedLess(Register rd, Register rs, Label* l) { | |
1358 ASSERT(!in_delay_slot_); | |
1359 BranchUnsignedGreater(rs, rd, l); | |
1360 } | |
1361 | |
1362 void BranchUnsignedLess(Register rd, const Immediate& imm, Label* l) { | |
1363 ASSERT(!in_delay_slot_); | |
1364 if (imm.value() == 0) { | |
1365 // Never branch. Fall through. | |
1366 } else { | |
1367 if (Utils::IsInt(kImmBits, imm.value())) { | |
1368 sltiu(CMPRES2, rd, imm); | |
1369 bne(CMPRES2, ZR, l); | |
1370 } else { | |
1371 ASSERT(rd != CMPRES2); | |
1372 LoadImmediate(CMPRES2, imm.value()); | |
1373 BranchUnsignedGreater(CMPRES2, rd, l); | |
1374 } | |
1375 } | |
1376 } | |
1377 | |
1378 void BranchSignedLessEqual(Register rd, Register rs, Label* l) { | |
1379 ASSERT(!in_delay_slot_); | |
1380 BranchSignedGreaterEqual(rs, rd, l); | |
1381 } | |
1382 | |
1383 void BranchSignedLessEqual(Register rd, const Immediate& imm, Label* l) { | |
1384 ASSERT(!in_delay_slot_); | |
1385 if (imm.value() == 0) { | |
1386 blez(rd, l); | |
1387 } else { | |
1388 if (Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1389 slti(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1390 bne(CMPRES2, ZR, l); | |
1391 } else { | |
1392 ASSERT(rd != CMPRES2); | |
1393 LoadImmediate(CMPRES2, imm.value()); | |
1394 BranchSignedGreaterEqual(CMPRES2, rd, l); | |
1395 } | |
1396 } | |
1397 } | |
1398 | |
1399 void BranchUnsignedLessEqual(Register rd, Register rs, Label* l) { | |
1400 ASSERT(!in_delay_slot_); | |
1401 BranchUnsignedGreaterEqual(rs, rd, l); | |
1402 } | |
1403 | |
1404 void BranchUnsignedLessEqual(Register rd, const Immediate& imm, Label* l) { | |
1405 ASSERT(!in_delay_slot_); | |
1406 if (imm.value() == 0) { | |
1407 beq(rd, ZR, l); | |
1408 } else { | |
1409 if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { | |
1410 sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); | |
1411 bne(CMPRES2, ZR, l); | |
1412 } else { | |
1413 ASSERT(rd != CMPRES2); | |
1414 LoadImmediate(CMPRES2, imm.value()); | |
1415 BranchUnsignedGreaterEqual(CMPRES2, rd, l); | |
1416 } | |
1417 } | |
1418 } | |
1419 | |
1420 void Push(Register rt) { | |
1421 ASSERT(!in_delay_slot_); | |
1422 addiu(SP, SP, Immediate(-kWordSize)); | |
1423 sw(rt, Address(SP)); | |
1424 } | |
1425 | |
1426 void Pop(Register rt) { | |
1427 ASSERT(!in_delay_slot_); | |
1428 lw(rt, Address(SP)); | |
1429 addiu(SP, SP, Immediate(kWordSize)); | |
1430 } | |
1431 | |
1432 void Ret() { jr(RA); } | |
1433 | |
1434 void SmiTag(Register reg) { sll(reg, reg, kSmiTagSize); } | |
1435 | |
1436 void SmiTag(Register dst, Register src) { sll(dst, src, kSmiTagSize); } | |
1437 | |
1438 void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); } | |
1439 | |
1440 void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); } | |
1441 | |
1442 void BranchIfNotSmi(Register reg, Label* label) { | |
1443 andi(CMPRES1, reg, Immediate(kSmiTagMask)); | |
1444 bne(CMPRES1, ZR, label); | |
1445 } | |
1446 | |
1447 void BranchIfSmi(Register reg, Label* label) { | |
1448 andi(CMPRES1, reg, Immediate(kSmiTagMask)); | |
1449 beq(CMPRES1, ZR, label); | |
1450 } | |
1451 | |
1452 void LoadFromOffset(Register reg, Register base, int32_t offset) { | |
1453 ASSERT(!in_delay_slot_); | |
1454 if (Utils::IsInt(kImmBits, offset)) { | |
1455 lw(reg, Address(base, offset)); | |
1456 } else { | |
1457 LoadImmediate(TMP, offset); | |
1458 addu(TMP, base, TMP); | |
1459 lw(reg, Address(TMP, 0)); | |
1460 } | |
1461 } | |
1462 | |
1463 void LoadFieldFromOffset(Register reg, Register base, int32_t offset) { | |
1464 LoadFromOffset(reg, base, offset - kHeapObjectTag); | |
1465 } | |
1466 | |
1467 void StoreToOffset(Register reg, Register base, int32_t offset) { | |
1468 ASSERT(!in_delay_slot_); | |
1469 if (Utils::IsInt(kImmBits, offset)) { | |
1470 sw(reg, Address(base, offset)); | |
1471 } else { | |
1472 LoadImmediate(TMP, offset); | |
1473 addu(TMP, base, TMP); | |
1474 sw(reg, Address(TMP, 0)); | |
1475 } | |
1476 } | |
1477 | |
1478 void StoreFieldToOffset(Register reg, Register base, int32_t offset) { | |
1479 StoreToOffset(reg, base, offset - kHeapObjectTag); | |
1480 } | |
1481 | |
1482 | |
1483 void StoreDToOffset(DRegister reg, Register base, int32_t offset) { | |
1484 ASSERT(!in_delay_slot_); | |
1485 FRegister lo = static_cast<FRegister>(reg * 2); | |
1486 FRegister hi = static_cast<FRegister>(reg * 2 + 1); | |
1487 swc1(lo, Address(base, offset)); | |
1488 swc1(hi, Address(base, offset + kWordSize)); | |
1489 } | |
1490 | |
1491 void LoadDFromOffset(DRegister reg, Register base, int32_t offset) { | |
1492 ASSERT(!in_delay_slot_); | |
1493 FRegister lo = static_cast<FRegister>(reg * 2); | |
1494 FRegister hi = static_cast<FRegister>(reg * 2 + 1); | |
1495 lwc1(lo, Address(base, offset)); | |
1496 lwc1(hi, Address(base, offset + kWordSize)); | |
1497 } | |
1498 | |
1499 // dest gets the address of the following instruction. If temp is given, | |
1500 // RA is preserved using it as a temporary. | |
1501 void GetNextPC(Register dest, Register temp = kNoRegister); | |
1502 | |
1503 void ReserveAlignedFrameSpace(intptr_t frame_space); | |
1504 | |
1505 // Create a frame for calling into runtime that preserves all volatile | |
1506 // registers. Frame's SP is guaranteed to be correctly aligned and | |
1507 // frame_space bytes are reserved under it. | |
1508 void EnterCallRuntimeFrame(intptr_t frame_space); | |
1509 void LeaveCallRuntimeFrame(); | |
1510 | |
1511 void LoadObject(Register rd, const Object& object); | |
1512 void LoadUniqueObject(Register rd, const Object& object); | |
1513 void LoadFunctionFromCalleePool(Register dst, | |
1514 const Function& function, | |
1515 Register new_pp); | |
1516 void LoadNativeEntry(Register rd, | |
1517 const ExternalLabel* label, | |
1518 Patchability patchable); | |
1519 void PushObject(const Object& object); | |
1520 | |
1521 void LoadIsolate(Register result); | |
1522 | |
1523 void LoadClassId(Register result, Register object); | |
1524 void LoadClassById(Register result, Register class_id); | |
1525 void LoadClass(Register result, Register object); | |
1526 void LoadClassIdMayBeSmi(Register result, Register object); | |
1527 void LoadTaggedClassIdMayBeSmi(Register result, Register object); | |
1528 | |
1529 void StoreIntoObject(Register object, // Object we are storing into. | |
1530 const Address& dest, // Where we are storing into. | |
1531 Register value, // Value we are storing. | |
1532 bool can_value_be_smi = true); | |
1533 void StoreIntoObjectOffset(Register object, | |
1534 int32_t offset, | |
1535 Register value, | |
1536 bool can_value_be_smi = true); | |
1537 | |
1538 void StoreIntoObjectNoBarrier(Register object, | |
1539 const Address& dest, | |
1540 Register value); | |
1541 void StoreIntoObjectNoBarrierOffset(Register object, | |
1542 int32_t offset, | |
1543 Register value); | |
1544 void StoreIntoObjectNoBarrier(Register object, | |
1545 const Address& dest, | |
1546 const Object& value); | |
1547 void StoreIntoObjectNoBarrierOffset(Register object, | |
1548 int32_t offset, | |
1549 const Object& value); | |
1550 | |
1551 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); | |
1552 | |
1553 // Set up a Dart frame on entry with a frame pointer and PC information to | |
1554 // enable easy access to the RawInstruction object of code corresponding | |
1555 // to this frame. | |
1556 void EnterDartFrame(intptr_t frame_size); | |
1557 void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP); | |
1558 void LeaveDartFrameAndReturn(Register ra = RA); | |
1559 | |
1560 // Set up a Dart frame for a function compiled for on-stack replacement. | |
1561 // The frame layout is a normal Dart frame, but the frame is partially set | |
1562 // up on entry (it is the frame of the unoptimized code). | |
1563 void EnterOsrFrame(intptr_t extra_size); | |
1564 | |
1565 Address ElementAddressForIntIndex(bool is_external, | |
1566 intptr_t cid, | |
1567 intptr_t index_scale, | |
1568 Register array, | |
1569 intptr_t index) const; | |
1570 void LoadElementAddressForIntIndex(Register address, | |
1571 bool is_external, | |
1572 intptr_t cid, | |
1573 intptr_t index_scale, | |
1574 Register array, | |
1575 intptr_t index); | |
1576 Address ElementAddressForRegIndex(bool is_load, | |
1577 bool is_external, | |
1578 intptr_t cid, | |
1579 intptr_t index_scale, | |
1580 Register array, | |
1581 Register index); | |
1582 void LoadElementAddressForRegIndex(Register address, | |
1583 bool is_load, | |
1584 bool is_external, | |
1585 intptr_t cid, | |
1586 intptr_t index_scale, | |
1587 Register array, | |
1588 Register index); | |
1589 | |
1590 void LoadHalfWordUnaligned(Register dst, Register addr, Register tmp); | |
1591 void LoadHalfWordUnsignedUnaligned(Register dst, Register addr, Register tmp); | |
1592 void StoreHalfWordUnaligned(Register src, Register addr, Register tmp); | |
1593 void LoadWordUnaligned(Register dst, Register addr, Register tmp); | |
1594 void StoreWordUnaligned(Register src, Register addr, Register tmp); | |
1595 | |
1596 static Address VMTagAddress() { | |
1597 return Address(THR, Thread::vm_tag_offset()); | |
1598 } | |
1599 | |
1600 // On some other platforms, we draw a distinction between safe and unsafe | |
1601 // smis. | |
1602 static bool IsSafe(const Object& object) { return true; } | |
1603 static bool IsSafeSmi(const Object& object) { return object.IsSmi(); } | |
1604 | |
1605 bool constant_pool_allowed() const { return constant_pool_allowed_; } | |
1606 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; } | |
1607 | |
1608 private: | |
1609 AssemblerBuffer buffer_; | |
1610 ObjectPoolWrapper object_pool_wrapper_; | |
1611 | |
1612 intptr_t prologue_offset_; | |
1613 bool has_single_entry_point_; | |
1614 bool use_far_branches_; | |
1615 bool delay_slot_available_; | |
1616 bool in_delay_slot_; | |
1617 | |
1618 class CodeComment : public ZoneAllocated { | |
1619 public: | |
1620 CodeComment(intptr_t pc_offset, const String& comment) | |
1621 : pc_offset_(pc_offset), comment_(comment) {} | |
1622 | |
1623 intptr_t pc_offset() const { return pc_offset_; } | |
1624 const String& comment() const { return comment_; } | |
1625 | |
1626 private: | |
1627 intptr_t pc_offset_; | |
1628 const String& comment_; | |
1629 | |
1630 DISALLOW_COPY_AND_ASSIGN(CodeComment); | |
1631 }; | |
1632 | |
1633 GrowableArray<CodeComment*> comments_; | |
1634 | |
1635 bool constant_pool_allowed_; | |
1636 | |
1637 void BranchLink(const ExternalLabel* label); | |
1638 void BranchLink(const Code& code, Patchability patchable); | |
1639 | |
1640 bool CanLoadFromObjectPool(const Object& object) const; | |
1641 | |
1642 void LoadWordFromPoolOffset(Register rd, int32_t offset, Register pp = PP); | |
1643 void LoadObjectHelper(Register rd, const Object& object, bool is_unique); | |
1644 | |
1645 void Emit(int32_t value) { | |
1646 // Emitting an instruction clears the delay slot state. | |
1647 in_delay_slot_ = false; | |
1648 delay_slot_available_ = false; | |
1649 AssemblerBuffer::EnsureCapacity ensured(&buffer_); | |
1650 buffer_.Emit<int32_t>(value); | |
1651 } | |
1652 | |
1653 // Encode CPU instructions according to the types specified in | |
1654 // Figures 4-1, 4-2 and 4-3 in VolI-A. | |
1655 void EmitIType(Opcode opcode, Register rs, Register rt, uint16_t imm) { | |
1656 Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | imm); | |
1657 } | |
1658 | |
1659 void EmitLoadStore(Opcode opcode, Register rt, const Address& addr) { | |
1660 Emit(opcode << kOpcodeShift | rt << kRtShift | addr.encoding()); | |
1661 } | |
1662 | |
1663 void EmitFpuLoadStore(Opcode opcode, FRegister ft, const Address& addr) { | |
1664 Emit(opcode << kOpcodeShift | ft << kFtShift | addr.encoding()); | |
1665 } | |
1666 | |
1667 void EmitRegImmType(Opcode opcode, Register rs, RtRegImm code, uint16_t imm) { | |
1668 Emit(opcode << kOpcodeShift | rs << kRsShift | code << kRtShift | imm); | |
1669 } | |
1670 | |
1671 void EmitJType(Opcode opcode, uint32_t destination) { UNIMPLEMENTED(); } | |
1672 | |
1673 void EmitRType(Opcode opcode, | |
1674 Register rs, | |
1675 Register rt, | |
1676 Register rd, | |
1677 int sa, | |
1678 SpecialFunction func) { | |
1679 ASSERT(Utils::IsUint(5, sa)); | |
1680 Emit(opcode << kOpcodeShift | rs << kRsShift | rt << kRtShift | | |
1681 rd << kRdShift | sa << kSaShift | func << kFunctionShift); | |
1682 } | |
1683 | |
1684 void EmitFpuRType(Opcode opcode, | |
1685 Format fmt, | |
1686 FRegister ft, | |
1687 FRegister fs, | |
1688 FRegister fd, | |
1689 Cop1Function func) { | |
1690 Emit(opcode << kOpcodeShift | fmt << kFmtShift | ft << kFtShift | | |
1691 fs << kFsShift | fd << kFdShift | func << kCop1FnShift); | |
1692 } | |
1693 | |
1694 int32_t EncodeBranchOffset(int32_t offset, int32_t instr); | |
1695 | |
1696 void EmitFarJump(int32_t offset, bool link); | |
1697 void EmitFarBranch(Opcode b, Register rs, Register rt, int32_t offset); | |
1698 void EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset); | |
1699 void EmitFarFpuBranch(bool kind, int32_t offset); | |
1700 void EmitBranch(Opcode b, Register rs, Register rt, Label* label); | |
1701 void EmitRegImmBranch(RtRegImm b, Register rs, Label* label); | |
1702 void EmitFpuBranch(bool kind, Label* label); | |
1703 | |
1704 void EmitBranchDelayNop() { | |
1705 Emit(Instr::kNopInstruction); // Branch delay NOP. | |
1706 delay_slot_available_ = true; | |
1707 } | |
1708 | |
1709 void StoreIntoObjectFilter(Register object, Register value, Label* no_update); | |
1710 | |
1711 // Shorter filtering sequence that assumes that value is not a smi. | |
1712 void StoreIntoObjectFilterNoSmi(Register object, | |
1713 Register value, | |
1714 Label* no_update); | |
1715 | |
1716 DISALLOW_ALLOCATION(); | |
1717 DISALLOW_COPY_AND_ASSIGN(Assembler); | |
1718 }; | |
1719 | |
1720 } // namespace dart | |
1721 | |
1722 #endif // RUNTIME_VM_ASSEMBLER_MIPS_H_ | |
OLD | NEW |