OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #include "vm/globals.h" // NOLINT | |
6 #if defined(TARGET_ARCH_MIPS) | |
7 | |
8 #include "vm/assembler.h" | |
9 #include "vm/longjump.h" | |
10 #include "vm/runtime_entry.h" | |
11 #include "vm/simulator.h" | |
12 #include "vm/stack_frame.h" | |
13 #include "vm/stub_code.h" | |
14 | |
15 namespace dart { | |
16 | |
17 DECLARE_FLAG(bool, check_code_pointer); | |
18 DECLARE_FLAG(bool, inline_alloc); | |
19 #if defined(USING_SIMULATOR) | |
20 DECLARE_FLAG(int, trace_sim_after); | |
21 #endif | |
22 | |
23 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { | |
24 ASSERT(Utils::IsAligned(data, 4)); | |
25 ASSERT(Utils::IsAligned(length, 4)); | |
26 const uword end = data + length; | |
27 while (data < end) { | |
28 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; | |
29 data += 4; | |
30 } | |
31 } | |
32 | |
33 | |
34 void Assembler::GetNextPC(Register dest, Register temp) { | |
35 if (temp != kNoRegister) { | |
36 mov(temp, RA); | |
37 } | |
38 EmitRegImmType(REGIMM, R0, BGEZAL, 1); | |
39 mov(dest, RA); | |
40 if (temp != kNoRegister) { | |
41 mov(RA, temp); | |
42 } | |
43 } | |
44 | |
45 | |
46 static bool CanEncodeBranchOffset(int32_t offset) { | |
47 ASSERT(Utils::IsAligned(offset, 4)); | |
48 return Utils::IsInt(18, offset); | |
49 } | |
50 | |
51 | |
52 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t instr) { | |
53 if (!CanEncodeBranchOffset(offset)) { | |
54 ASSERT(!use_far_branches()); | |
55 Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); | |
56 } | |
57 | |
58 // Properly preserve only the bits supported in the instruction. | |
59 offset >>= 2; | |
60 offset &= kBranchOffsetMask; | |
61 return (instr & ~kBranchOffsetMask) | offset; | |
62 } | |
63 | |
64 | |
65 static intptr_t DecodeBranchOffset(int32_t instr) { | |
66 // Sign-extend, left-shift by 2. | |
67 return (((instr & kBranchOffsetMask) << 16) >> 14); | |
68 } | |
69 | |
70 | |
71 static int32_t DecodeLoadImmediate(int32_t ori_instr, int32_t lui_instr) { | |
72 return (((lui_instr & kBranchOffsetMask) << 16) | | |
73 (ori_instr & kBranchOffsetMask)); | |
74 } | |
75 | |
76 | |
77 static int32_t EncodeLoadImmediate(int32_t dest, int32_t instr) { | |
78 return ((instr & ~kBranchOffsetMask) | (dest & kBranchOffsetMask)); | |
79 } | |
80 | |
81 | |
82 class PatchFarJump : public AssemblerFixup { | |
83 public: | |
84 PatchFarJump() {} | |
85 | |
86 void Process(const MemoryRegion& region, intptr_t position) { | |
87 const int32_t high = region.Load<int32_t>(position); | |
88 const int32_t low = region.Load<int32_t>(position + Instr::kInstrSize); | |
89 const int32_t offset = DecodeLoadImmediate(low, high); | |
90 const int32_t dest = region.start() + offset; | |
91 | |
92 if ((Instr::At(reinterpret_cast<uword>(&high))->OpcodeField() == LUI) && | |
93 (Instr::At(reinterpret_cast<uword>(&low))->OpcodeField() == ORI)) { | |
94 // Change the offset to the absolute value. | |
95 const int32_t encoded_low = | |
96 EncodeLoadImmediate(dest & kBranchOffsetMask, low); | |
97 const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); | |
98 | |
99 region.Store<int32_t>(position, encoded_high); | |
100 region.Store<int32_t>(position + Instr::kInstrSize, encoded_low); | |
101 return; | |
102 } | |
103 // If the offset loading instructions aren't there, we must have replaced | |
104 // the far branch with a near one, and so these instructions should be NOPs. | |
105 ASSERT((high == Instr::kNopInstruction) && (low == Instr::kNopInstruction)); | |
106 } | |
107 | |
108 virtual bool IsPointerOffset() const { return false; } | |
109 }; | |
110 | |
111 | |
112 void Assembler::EmitFarJump(int32_t offset, bool link) { | |
113 ASSERT(!in_delay_slot_); | |
114 ASSERT(use_far_branches()); | |
115 const uint16_t low = Utils::Low16Bits(offset); | |
116 const uint16_t high = Utils::High16Bits(offset); | |
117 buffer_.EmitFixup(new PatchFarJump()); | |
118 lui(T9, Immediate(high)); | |
119 ori(T9, T9, Immediate(low)); | |
120 if (link) { | |
121 EmitRType(SPECIAL, T9, R0, RA, 0, JALR); | |
122 } else { | |
123 EmitRType(SPECIAL, T9, R0, R0, 0, JR); | |
124 } | |
125 } | |
126 | |
127 | |
128 static Opcode OppositeBranchOpcode(Opcode b) { | |
129 switch (b) { | |
130 case BEQ: | |
131 return BNE; | |
132 case BNE: | |
133 return BEQ; | |
134 case BGTZ: | |
135 return BLEZ; | |
136 case BLEZ: | |
137 return BGTZ; | |
138 case BEQL: | |
139 return BNEL; | |
140 case BNEL: | |
141 return BEQL; | |
142 case BGTZL: | |
143 return BLEZL; | |
144 case BLEZL: | |
145 return BGTZL; | |
146 default: | |
147 UNREACHABLE(); | |
148 break; | |
149 } | |
150 return BNE; | |
151 } | |
152 | |
153 | |
154 void Assembler::EmitFarBranch(Opcode b, | |
155 Register rs, | |
156 Register rt, | |
157 int32_t offset) { | |
158 ASSERT(!in_delay_slot_); | |
159 EmitIType(b, rs, rt, 4); | |
160 nop(); | |
161 EmitFarJump(offset, false); | |
162 } | |
163 | |
164 | |
165 static RtRegImm OppositeBranchNoLink(RtRegImm b) { | |
166 switch (b) { | |
167 case BLTZ: | |
168 return BGEZ; | |
169 case BGEZ: | |
170 return BLTZ; | |
171 case BLTZAL: | |
172 return BGEZ; | |
173 case BGEZAL: | |
174 return BLTZ; | |
175 default: | |
176 UNREACHABLE(); | |
177 break; | |
178 } | |
179 return BLTZ; | |
180 } | |
181 | |
182 | |
183 void Assembler::EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset) { | |
184 ASSERT(!in_delay_slot_); | |
185 EmitRegImmType(REGIMM, rs, b, 4); | |
186 nop(); | |
187 EmitFarJump(offset, (b == BLTZAL) || (b == BGEZAL)); | |
188 } | |
189 | |
190 | |
191 void Assembler::EmitFarFpuBranch(bool kind, int32_t offset) { | |
192 ASSERT(!in_delay_slot_); | |
193 const uint32_t b16 = kind ? (1 << 16) : 0; | |
194 Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | 4); | |
195 nop(); | |
196 EmitFarJump(offset, false); | |
197 } | |
198 | |
199 | |
200 void Assembler::EmitBranch(Opcode b, Register rs, Register rt, Label* label) { | |
201 ASSERT(!in_delay_slot_); | |
202 if (label->IsBound()) { | |
203 // Relative destination from an instruction after the branch. | |
204 const int32_t dest = | |
205 label->Position() - (buffer_.Size() + Instr::kInstrSize); | |
206 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | |
207 EmitFarBranch(OppositeBranchOpcode(b), rs, rt, label->Position()); | |
208 } else { | |
209 const uint16_t dest_off = EncodeBranchOffset(dest, 0); | |
210 EmitIType(b, rs, rt, dest_off); | |
211 } | |
212 } else { | |
213 const intptr_t position = buffer_.Size(); | |
214 if (use_far_branches()) { | |
215 const uint32_t dest_off = label->position_; | |
216 EmitFarBranch(b, rs, rt, dest_off); | |
217 } else { | |
218 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); | |
219 EmitIType(b, rs, rt, dest_off); | |
220 } | |
221 label->LinkTo(position); | |
222 } | |
223 } | |
224 | |
225 | |
226 void Assembler::EmitRegImmBranch(RtRegImm b, Register rs, Label* label) { | |
227 ASSERT(!in_delay_slot_); | |
228 if (label->IsBound()) { | |
229 // Relative destination from an instruction after the branch. | |
230 const int32_t dest = | |
231 label->Position() - (buffer_.Size() + Instr::kInstrSize); | |
232 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | |
233 EmitFarRegImmBranch(OppositeBranchNoLink(b), rs, label->Position()); | |
234 } else { | |
235 const uint16_t dest_off = EncodeBranchOffset(dest, 0); | |
236 EmitRegImmType(REGIMM, rs, b, dest_off); | |
237 } | |
238 } else { | |
239 const intptr_t position = buffer_.Size(); | |
240 if (use_far_branches()) { | |
241 const uint32_t dest_off = label->position_; | |
242 EmitFarRegImmBranch(b, rs, dest_off); | |
243 } else { | |
244 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); | |
245 EmitRegImmType(REGIMM, rs, b, dest_off); | |
246 } | |
247 label->LinkTo(position); | |
248 } | |
249 } | |
250 | |
251 | |
252 void Assembler::EmitFpuBranch(bool kind, Label* label) { | |
253 ASSERT(!in_delay_slot_); | |
254 const int32_t b16 = kind ? (1 << 16) : 0; // Bit 16 set for branch on true. | |
255 if (label->IsBound()) { | |
256 // Relative destination from an instruction after the branch. | |
257 const int32_t dest = | |
258 label->Position() - (buffer_.Size() + Instr::kInstrSize); | |
259 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | |
260 EmitFarFpuBranch(kind, label->Position()); | |
261 } else { | |
262 const uint16_t dest_off = EncodeBranchOffset(dest, 0); | |
263 Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); | |
264 } | |
265 } else { | |
266 const intptr_t position = buffer_.Size(); | |
267 if (use_far_branches()) { | |
268 const uint32_t dest_off = label->position_; | |
269 EmitFarFpuBranch(kind, dest_off); | |
270 } else { | |
271 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); | |
272 Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); | |
273 } | |
274 label->LinkTo(position); | |
275 } | |
276 } | |
277 | |
278 | |
279 static int32_t FlipBranchInstruction(int32_t instr) { | |
280 Instr* i = Instr::At(reinterpret_cast<uword>(&instr)); | |
281 if (i->OpcodeField() == REGIMM) { | |
282 RtRegImm b = OppositeBranchNoLink(i->RegImmFnField()); | |
283 i->SetRegImmFnField(b); | |
284 return i->InstructionBits(); | |
285 } else if (i->OpcodeField() == COP1) { | |
286 return instr ^ (1 << 16); | |
287 } | |
288 Opcode b = OppositeBranchOpcode(i->OpcodeField()); | |
289 i->SetOpcodeField(b); | |
290 return i->InstructionBits(); | |
291 } | |
292 | |
293 | |
294 void Assembler::Bind(Label* label) { | |
295 ASSERT(!label->IsBound()); | |
296 intptr_t bound_pc = buffer_.Size(); | |
297 | |
298 while (label->IsLinked()) { | |
299 int32_t position = label->Position(); | |
300 int32_t dest = bound_pc - (position + Instr::kInstrSize); | |
301 | |
302 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | |
303 // Far branches are enabled and we can't encode the branch offset. | |
304 | |
305 // Grab the branch instruction. We'll need to flip it later. | |
306 const int32_t branch = buffer_.Load<int32_t>(position); | |
307 | |
308 // Grab instructions that load the offset. | |
309 const int32_t high = | |
310 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | |
311 const int32_t low = | |
312 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | |
313 | |
314 // Change from relative to the branch to relative to the assembler buffer. | |
315 dest = buffer_.Size(); | |
316 const int32_t encoded_low = | |
317 EncodeLoadImmediate(dest & kBranchOffsetMask, low); | |
318 const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); | |
319 | |
320 // Skip the unconditional far jump if the test fails by flipping the | |
321 // sense of the branch instruction. | |
322 buffer_.Store<int32_t>(position, FlipBranchInstruction(branch)); | |
323 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, encoded_high); | |
324 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, encoded_low); | |
325 label->position_ = DecodeLoadImmediate(low, high); | |
326 } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { | |
327 // We assembled a far branch, but we don't need it. Replace with a near | |
328 // branch. | |
329 | |
330 // Grab the link to the next branch. | |
331 const int32_t high = | |
332 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | |
333 const int32_t low = | |
334 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | |
335 | |
336 // Grab the original branch instruction. | |
337 int32_t branch = buffer_.Load<int32_t>(position); | |
338 | |
339 // Clear out the old (far) branch. | |
340 for (int i = 0; i < 5; i++) { | |
341 buffer_.Store<int32_t>(position + i * Instr::kInstrSize, | |
342 Instr::kNopInstruction); | |
343 } | |
344 | |
345 // Calculate the new offset. | |
346 dest = dest - 4 * Instr::kInstrSize; | |
347 const int32_t encoded = EncodeBranchOffset(dest, branch); | |
348 buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize, encoded); | |
349 label->position_ = DecodeLoadImmediate(low, high); | |
350 } else { | |
351 const int32_t next = buffer_.Load<int32_t>(position); | |
352 const int32_t encoded = EncodeBranchOffset(dest, next); | |
353 buffer_.Store<int32_t>(position, encoded); | |
354 label->position_ = DecodeBranchOffset(next); | |
355 } | |
356 } | |
357 label->BindTo(bound_pc); | |
358 delay_slot_available_ = false; | |
359 } | |
360 | |
361 | |
362 void Assembler::LoadWordFromPoolOffset(Register rd, | |
363 int32_t offset, | |
364 Register pp) { | |
365 ASSERT((pp != PP) || constant_pool_allowed()); | |
366 ASSERT(!in_delay_slot_); | |
367 ASSERT(rd != pp); | |
368 if (Address::CanHoldOffset(offset)) { | |
369 lw(rd, Address(pp, offset)); | |
370 } else { | |
371 const int16_t offset_low = Utils::Low16Bits(offset); // Signed. | |
372 offset -= offset_low; | |
373 const uint16_t offset_high = Utils::High16Bits(offset); // Unsigned. | |
374 if (offset_high != 0) { | |
375 lui(rd, Immediate(offset_high)); | |
376 addu(rd, rd, pp); | |
377 lw(rd, Address(rd, offset_low)); | |
378 } else { | |
379 lw(rd, Address(pp, offset_low)); | |
380 } | |
381 } | |
382 } | |
383 | |
384 | |
385 void Assembler::AdduDetectOverflow(Register rd, | |
386 Register rs, | |
387 Register rt, | |
388 Register ro, | |
389 Register scratch) { | |
390 ASSERT(!in_delay_slot_); | |
391 ASSERT(rd != ro); | |
392 ASSERT(rd != TMP); | |
393 ASSERT(ro != TMP); | |
394 ASSERT(ro != rs); | |
395 ASSERT(ro != rt); | |
396 | |
397 if ((rs == rt) && (rd == rs)) { | |
398 ASSERT(scratch != kNoRegister); | |
399 ASSERT(scratch != TMP); | |
400 ASSERT(rd != scratch); | |
401 ASSERT(ro != scratch); | |
402 ASSERT(rs != scratch); | |
403 ASSERT(rt != scratch); | |
404 mov(scratch, rt); | |
405 rt = scratch; | |
406 } | |
407 | |
408 if (rd == rs) { | |
409 mov(TMP, rs); // Preserve rs. | |
410 addu(rd, rs, rt); // rs is overwritten. | |
411 xor_(TMP, rd, TMP); // Original rs. | |
412 xor_(ro, rd, rt); | |
413 and_(ro, ro, TMP); | |
414 } else if (rd == rt) { | |
415 mov(TMP, rt); // Preserve rt. | |
416 addu(rd, rs, rt); // rt is overwritten. | |
417 xor_(TMP, rd, TMP); // Original rt. | |
418 xor_(ro, rd, rs); | |
419 and_(ro, ro, TMP); | |
420 } else { | |
421 addu(rd, rs, rt); | |
422 xor_(ro, rd, rs); | |
423 xor_(TMP, rd, rt); | |
424 and_(ro, TMP, ro); | |
425 } | |
426 } | |
427 | |
428 | |
429 void Assembler::SubuDetectOverflow(Register rd, | |
430 Register rs, | |
431 Register rt, | |
432 Register ro) { | |
433 ASSERT(!in_delay_slot_); | |
434 ASSERT(rd != ro); | |
435 ASSERT(rd != TMP); | |
436 ASSERT(ro != TMP); | |
437 ASSERT(ro != rs); | |
438 ASSERT(ro != rt); | |
439 ASSERT(rs != TMP); | |
440 ASSERT(rt != TMP); | |
441 | |
442 // This happens with some crankshaft code. Since Subu works fine if | |
443 // left == right, let's not make that restriction here. | |
444 if (rs == rt) { | |
445 mov(rd, ZR); | |
446 mov(ro, ZR); | |
447 return; | |
448 } | |
449 | |
450 if (rd == rs) { | |
451 mov(TMP, rs); // Preserve left. | |
452 subu(rd, rs, rt); // Left is overwritten. | |
453 xor_(ro, rd, TMP); // scratch is original left. | |
454 xor_(TMP, TMP, rs); // scratch is original left. | |
455 and_(ro, TMP, ro); | |
456 } else if (rd == rt) { | |
457 mov(TMP, rt); // Preserve right. | |
458 subu(rd, rs, rt); // Right is overwritten. | |
459 xor_(ro, rd, rs); | |
460 xor_(TMP, rs, TMP); // Original right. | |
461 and_(ro, TMP, ro); | |
462 } else { | |
463 subu(rd, rs, rt); | |
464 xor_(ro, rd, rs); | |
465 xor_(TMP, rs, rt); | |
466 and_(ro, TMP, ro); | |
467 } | |
468 } | |
469 | |
470 | |
471 void Assembler::CheckCodePointer() { | |
472 #ifdef DEBUG | |
473 if (!FLAG_check_code_pointer) { | |
474 return; | |
475 } | |
476 Comment("CheckCodePointer"); | |
477 Label cid_ok, instructions_ok; | |
478 Push(CMPRES1); | |
479 Push(CMPRES2); | |
480 LoadClassId(CMPRES1, CODE_REG); | |
481 BranchEqual(CMPRES1, Immediate(kCodeCid), &cid_ok); | |
482 break_(0); | |
483 Bind(&cid_ok); | |
484 GetNextPC(CMPRES1, TMP); | |
485 const intptr_t entry_offset = CodeSize() - Instr::kInstrSize + | |
486 Instructions::HeaderSize() - kHeapObjectTag; | |
487 AddImmediate(CMPRES1, CMPRES1, -entry_offset); | |
488 lw(CMPRES2, FieldAddress(CODE_REG, Code::saved_instructions_offset())); | |
489 BranchEqual(CMPRES1, CMPRES2, &instructions_ok); | |
490 break_(1); | |
491 Bind(&instructions_ok); | |
492 Pop(CMPRES2); | |
493 Pop(CMPRES1); | |
494 #endif | |
495 } | |
496 | |
497 | |
498 void Assembler::RestoreCodePointer() { | |
499 lw(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); | |
500 CheckCodePointer(); | |
501 } | |
502 | |
503 | |
504 void Assembler::Branch(const StubEntry& stub_entry, Register pp) { | |
505 ASSERT(!in_delay_slot_); | |
506 const Code& target_code = Code::ZoneHandle(stub_entry.code()); | |
507 const int32_t offset = ObjectPool::element_offset( | |
508 object_pool_wrapper_.AddObject(target_code, kPatchable)); | |
509 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp); | |
510 lw(TMP, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
511 jr(TMP); | |
512 } | |
513 | |
514 | |
515 void Assembler::BranchLink(const ExternalLabel* label) { | |
516 ASSERT(!in_delay_slot_); | |
517 LoadImmediate(T9, label->address()); | |
518 jalr(T9); | |
519 } | |
520 | |
521 | |
522 void Assembler::BranchLink(const Code& target, Patchability patchable) { | |
523 ASSERT(!in_delay_slot_); | |
524 const int32_t offset = ObjectPool::element_offset( | |
525 object_pool_wrapper_.FindObject(target, patchable)); | |
526 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag); | |
527 lw(T9, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
528 jalr(T9); | |
529 if (patchable == kPatchable) { | |
530 delay_slot_available_ = false; // CodePatcher expects a nop. | |
531 } | |
532 } | |
533 | |
534 | |
535 void Assembler::BranchLink(const StubEntry& stub_entry, | |
536 Patchability patchable) { | |
537 BranchLink(Code::ZoneHandle(stub_entry.code()), patchable); | |
538 } | |
539 | |
540 | |
541 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { | |
542 BranchLink(Code::ZoneHandle(stub_entry.code()), kPatchable); | |
543 } | |
544 | |
545 | |
546 void Assembler::BranchLinkToRuntime() { | |
547 lw(T9, Address(THR, Thread::call_to_runtime_entry_point_offset())); | |
548 jalr(T9); | |
549 delay_slot()->lw(CODE_REG, | |
550 Address(THR, Thread::call_to_runtime_stub_offset())); | |
551 } | |
552 | |
553 | |
554 void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry, | |
555 const Object& equivalence) { | |
556 const Code& target = Code::ZoneHandle(stub_entry.code()); | |
557 ASSERT(!in_delay_slot_); | |
558 const int32_t offset = ObjectPool::element_offset( | |
559 object_pool_wrapper_.FindObject(target, equivalence)); | |
560 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag); | |
561 lw(T9, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
562 jalr(T9); | |
563 delay_slot_available_ = false; // CodePatcher expects a nop. | |
564 } | |
565 | |
566 | |
567 bool Assembler::CanLoadFromObjectPool(const Object& object) const { | |
568 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | |
569 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | |
570 ASSERT(!Thread::CanLoadFromThread(object)); | |
571 if (!constant_pool_allowed()) { | |
572 return false; | |
573 } | |
574 | |
575 ASSERT(object.IsNotTemporaryScopedHandle()); | |
576 ASSERT(object.IsOld()); | |
577 return true; | |
578 } | |
579 | |
580 | |
581 void Assembler::LoadObjectHelper(Register rd, | |
582 const Object& object, | |
583 bool is_unique) { | |
584 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | |
585 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | |
586 ASSERT(!in_delay_slot_); | |
587 if (Thread::CanLoadFromThread(object)) { | |
588 // Load common VM constants from the thread. This works also in places where | |
589 // no constant pool is set up (e.g. intrinsic code). | |
590 lw(rd, Address(THR, Thread::OffsetFromThread(object))); | |
591 } else if (object.IsSmi()) { | |
592 // Relocation doesn't apply to Smis. | |
593 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw())); | |
594 } else if (CanLoadFromObjectPool(object)) { | |
595 // Make sure that class CallPattern is able to decode this load from the | |
596 // object pool. | |
597 const int32_t offset = ObjectPool::element_offset( | |
598 is_unique ? object_pool_wrapper_.AddObject(object) | |
599 : object_pool_wrapper_.FindObject(object)); | |
600 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag); | |
601 } else { | |
602 UNREACHABLE(); | |
603 } | |
604 } | |
605 | |
606 | |
607 void Assembler::LoadObject(Register rd, const Object& object) { | |
608 LoadObjectHelper(rd, object, false); | |
609 } | |
610 | |
611 | |
612 void Assembler::LoadUniqueObject(Register rd, const Object& object) { | |
613 LoadObjectHelper(rd, object, true); | |
614 } | |
615 | |
616 | |
617 void Assembler::LoadFunctionFromCalleePool(Register dst, | |
618 const Function& function, | |
619 Register new_pp) { | |
620 const int32_t offset = | |
621 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); | |
622 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp); | |
623 } | |
624 | |
625 | |
626 void Assembler::LoadNativeEntry(Register rd, | |
627 const ExternalLabel* label, | |
628 Patchability patchable) { | |
629 const int32_t offset = ObjectPool::element_offset( | |
630 object_pool_wrapper_.FindNativeEntry(label, patchable)); | |
631 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag); | |
632 } | |
633 | |
634 | |
635 void Assembler::PushObject(const Object& object) { | |
636 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | |
637 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | |
638 ASSERT(!in_delay_slot_); | |
639 LoadObject(TMP, object); | |
640 Push(TMP); | |
641 } | |
642 | |
643 | |
644 // Preserves object and value registers. | |
645 void Assembler::StoreIntoObjectFilterNoSmi(Register object, | |
646 Register value, | |
647 Label* no_update) { | |
648 ASSERT(!in_delay_slot_); | |
649 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && | |
650 (kOldObjectAlignmentOffset == 0)); | |
651 | |
652 // Write-barrier triggers if the value is in the new space (has bit set) and | |
653 // the object is in the old space (has bit cleared). | |
654 // To check that, we compute value & ~object and skip the write barrier | |
655 // if the bit is not set. We can't destroy the object. | |
656 nor(TMP, ZR, object); | |
657 and_(TMP, value, TMP); | |
658 andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset)); | |
659 beq(CMPRES1, ZR, no_update); | |
660 } | |
661 | |
662 | |
663 // Preserves object and value registers. | |
664 void Assembler::StoreIntoObjectFilter(Register object, | |
665 Register value, | |
666 Label* no_update) { | |
667 ASSERT(!in_delay_slot_); | |
668 // For the value we are only interested in the new/old bit and the tag bit. | |
669 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. | |
670 sll(TMP, value, kObjectAlignmentLog2 - 1); | |
671 and_(TMP, value, TMP); | |
672 // And the result with the negated space bit of the object. | |
673 nor(CMPRES1, ZR, object); | |
674 and_(TMP, TMP, CMPRES1); | |
675 andi(CMPRES1, TMP, Immediate(kNewObjectAlignmentOffset)); | |
676 beq(CMPRES1, ZR, no_update); | |
677 } | |
678 | |
679 | |
680 void Assembler::StoreIntoObject(Register object, | |
681 const Address& dest, | |
682 Register value, | |
683 bool can_value_be_smi) { | |
684 ASSERT(!in_delay_slot_); | |
685 ASSERT(object != value); | |
686 sw(value, dest); | |
687 Label done; | |
688 if (can_value_be_smi) { | |
689 StoreIntoObjectFilter(object, value, &done); | |
690 } else { | |
691 StoreIntoObjectFilterNoSmi(object, value, &done); | |
692 } | |
693 // A store buffer update is required. | |
694 if (value != T0) { | |
695 // Preserve T0. | |
696 addiu(SP, SP, Immediate(-2 * kWordSize)); | |
697 sw(T0, Address(SP, 1 * kWordSize)); | |
698 } else { | |
699 addiu(SP, SP, Immediate(-1 * kWordSize)); | |
700 } | |
701 sw(RA, Address(SP, 0 * kWordSize)); | |
702 if (object != T0) { | |
703 mov(T0, object); | |
704 } | |
705 lw(T9, Address(THR, Thread::update_store_buffer_entry_point_offset())); | |
706 jalr(T9); | |
707 delay_slot()->lw(CODE_REG, | |
708 Address(THR, Thread::update_store_buffer_code_offset())); | |
709 lw(RA, Address(SP, 0 * kWordSize)); | |
710 if (value != T0) { | |
711 // Restore T0. | |
712 lw(T0, Address(SP, 1 * kWordSize)); | |
713 addiu(SP, SP, Immediate(2 * kWordSize)); | |
714 } else { | |
715 addiu(SP, SP, Immediate(1 * kWordSize)); | |
716 } | |
717 Bind(&done); | |
718 } | |
719 | |
720 | |
721 void Assembler::StoreIntoObjectOffset(Register object, | |
722 int32_t offset, | |
723 Register value, | |
724 bool can_value_be_smi) { | |
725 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | |
726 StoreIntoObject(object, FieldAddress(object, offset), value, | |
727 can_value_be_smi); | |
728 } else { | |
729 AddImmediate(TMP, object, offset - kHeapObjectTag); | |
730 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); | |
731 } | |
732 } | |
733 | |
734 | |
735 void Assembler::StoreIntoObjectNoBarrier(Register object, | |
736 const Address& dest, | |
737 Register value) { | |
738 ASSERT(!in_delay_slot_); | |
739 sw(value, dest); | |
740 #if defined(DEBUG) | |
741 Label done; | |
742 StoreIntoObjectFilter(object, value, &done); | |
743 Stop("Store buffer update is required"); | |
744 Bind(&done); | |
745 #endif // defined(DEBUG) | |
746 // No store buffer update. | |
747 } | |
748 | |
749 | |
750 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, | |
751 int32_t offset, | |
752 Register value) { | |
753 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | |
754 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | |
755 } else { | |
756 AddImmediate(TMP, object, offset - kHeapObjectTag); | |
757 StoreIntoObjectNoBarrier(object, Address(TMP), value); | |
758 } | |
759 } | |
760 | |
761 | |
762 void Assembler::StoreIntoObjectNoBarrier(Register object, | |
763 const Address& dest, | |
764 const Object& value) { | |
765 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); | |
766 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); | |
767 ASSERT(!in_delay_slot_); | |
768 ASSERT(value.IsSmi() || value.InVMHeap() || | |
769 (value.IsOld() && value.IsNotTemporaryScopedHandle())); | |
770 // No store buffer update. | |
771 LoadObject(TMP, value); | |
772 sw(TMP, dest); | |
773 } | |
774 | |
775 | |
776 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, | |
777 int32_t offset, | |
778 const Object& value) { | |
779 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | |
780 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | |
781 } else { | |
782 AddImmediate(TMP, object, offset - kHeapObjectTag); | |
783 StoreIntoObjectNoBarrier(object, Address(TMP), value); | |
784 } | |
785 } | |
786 | |
787 | |
788 void Assembler::LoadIsolate(Register result) { | |
789 lw(result, Address(THR, Thread::isolate_offset())); | |
790 } | |
791 | |
792 | |
793 void Assembler::LoadClassId(Register result, Register object) { | |
794 ASSERT(RawObject::kClassIdTagPos == 16); | |
795 ASSERT(RawObject::kClassIdTagSize == 16); | |
796 const intptr_t class_id_offset = | |
797 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; | |
798 lhu(result, FieldAddress(object, class_id_offset)); | |
799 } | |
800 | |
801 | |
802 void Assembler::LoadClassById(Register result, Register class_id) { | |
803 ASSERT(!in_delay_slot_); | |
804 ASSERT(result != class_id); | |
805 LoadIsolate(result); | |
806 const intptr_t offset = | |
807 Isolate::class_table_offset() + ClassTable::table_offset(); | |
808 lw(result, Address(result, offset)); | |
809 sll(TMP, class_id, 2); | |
810 addu(result, result, TMP); | |
811 lw(result, Address(result)); | |
812 } | |
813 | |
814 | |
815 void Assembler::LoadClass(Register result, Register object) { | |
816 ASSERT(!in_delay_slot_); | |
817 ASSERT(TMP != result); | |
818 LoadClassId(TMP, object); | |
819 LoadClassById(result, TMP); | |
820 } | |
821 | |
822 | |
823 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { | |
824 Label heap_object, done; | |
825 andi(CMPRES1, object, Immediate(kSmiTagMask)); | |
826 bne(CMPRES1, ZR, &heap_object); | |
827 LoadImmediate(result, kSmiCid); | |
828 b(&done); | |
829 Bind(&heap_object); | |
830 LoadClassId(result, object); | |
831 Bind(&done); | |
832 } | |
833 | |
834 | |
835 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { | |
836 LoadClassIdMayBeSmi(result, object); | |
837 SmiTag(result); | |
838 } | |
839 | |
840 | |
841 void Assembler::EnterFrame() { | |
842 ASSERT(!in_delay_slot_); | |
843 addiu(SP, SP, Immediate(-2 * kWordSize)); | |
844 sw(RA, Address(SP, 1 * kWordSize)); | |
845 sw(FP, Address(SP, 0 * kWordSize)); | |
846 mov(FP, SP); | |
847 } | |
848 | |
849 | |
850 void Assembler::LeaveFrameAndReturn() { | |
851 ASSERT(!in_delay_slot_); | |
852 mov(SP, FP); | |
853 lw(RA, Address(SP, 1 * kWordSize)); | |
854 lw(FP, Address(SP, 0 * kWordSize)); | |
855 Ret(); | |
856 delay_slot()->addiu(SP, SP, Immediate(2 * kWordSize)); | |
857 } | |
858 | |
859 | |
860 void Assembler::EnterStubFrame(intptr_t frame_size) { | |
861 EnterDartFrame(frame_size); | |
862 } | |
863 | |
864 | |
865 void Assembler::LeaveStubFrame() { | |
866 LeaveDartFrame(); | |
867 } | |
868 | |
869 | |
870 void Assembler::LeaveStubFrameAndReturn(Register ra) { | |
871 LeaveDartFrameAndReturn(ra); | |
872 } | |
873 | |
874 | |
875 // T0 receiver, S5 guarded cid as Smi | |
876 void Assembler::MonomorphicCheckedEntry() { | |
877 ASSERT(has_single_entry_point_); | |
878 has_single_entry_point_ = false; | |
879 bool saved_use_far_branches = use_far_branches(); | |
880 set_use_far_branches(false); | |
881 | |
882 Label have_cid, miss; | |
883 Bind(&miss); | |
884 lw(T9, Address(THR, Thread::monomorphic_miss_entry_offset())); | |
885 jr(T9); | |
886 | |
887 Comment("MonomorphicCheckedEntry"); | |
888 ASSERT(CodeSize() == Instructions::kCheckedEntryOffset); | |
889 SmiUntag(S5); | |
890 LoadClassIdMayBeSmi(S4, T0); | |
891 bne(S4, S5, &miss); | |
892 | |
893 // Fall through to unchecked entry. | |
894 ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset); | |
895 | |
896 set_use_far_branches(saved_use_far_branches); | |
897 } | |
898 | |
899 | |
900 #ifndef PRODUCT | |
901 void Assembler::MaybeTraceAllocation(intptr_t cid, | |
902 Register temp_reg, | |
903 Label* trace) { | |
904 ASSERT(cid > 0); | |
905 ASSERT(!in_delay_slot_); | |
906 ASSERT(temp_reg != kNoRegister); | |
907 ASSERT(temp_reg != TMP); | |
908 intptr_t state_offset = ClassTable::StateOffsetFor(cid); | |
909 LoadIsolate(temp_reg); | |
910 intptr_t table_offset = | |
911 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | |
912 lw(temp_reg, Address(temp_reg, table_offset)); | |
913 AddImmediate(temp_reg, state_offset); | |
914 lw(temp_reg, Address(temp_reg, 0)); | |
915 andi(CMPRES1, temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); | |
916 bne(CMPRES1, ZR, trace); | |
917 } | |
918 | |
919 | |
920 void Assembler::UpdateAllocationStats(intptr_t cid, | |
921 Register temp_reg, | |
922 Heap::Space space) { | |
923 ASSERT(!in_delay_slot_); | |
924 ASSERT(temp_reg != kNoRegister); | |
925 ASSERT(temp_reg != TMP); | |
926 ASSERT(cid > 0); | |
927 intptr_t counter_offset = | |
928 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); | |
929 LoadIsolate(temp_reg); | |
930 intptr_t table_offset = | |
931 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | |
932 lw(temp_reg, Address(temp_reg, table_offset)); | |
933 AddImmediate(temp_reg, counter_offset); | |
934 lw(TMP, Address(temp_reg, 0)); | |
935 AddImmediate(TMP, 1); | |
936 sw(TMP, Address(temp_reg, 0)); | |
937 } | |
938 | |
939 | |
940 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, | |
941 Register size_reg, | |
942 Register temp_reg, | |
943 Heap::Space space) { | |
944 ASSERT(!in_delay_slot_); | |
945 ASSERT(temp_reg != kNoRegister); | |
946 ASSERT(cid > 0); | |
947 ASSERT(temp_reg != TMP); | |
948 const uword class_offset = ClassTable::ClassOffsetFor(cid); | |
949 const uword count_field_offset = | |
950 (space == Heap::kNew) | |
951 ? ClassHeapStats::allocated_since_gc_new_space_offset() | |
952 : ClassHeapStats::allocated_since_gc_old_space_offset(); | |
953 const uword size_field_offset = | |
954 (space == Heap::kNew) | |
955 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() | |
956 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); | |
957 LoadIsolate(temp_reg); | |
958 intptr_t table_offset = | |
959 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | |
960 lw(temp_reg, Address(temp_reg, table_offset)); | |
961 AddImmediate(temp_reg, class_offset); | |
962 lw(TMP, Address(temp_reg, count_field_offset)); | |
963 AddImmediate(TMP, 1); | |
964 sw(TMP, Address(temp_reg, count_field_offset)); | |
965 lw(TMP, Address(temp_reg, size_field_offset)); | |
966 addu(TMP, TMP, size_reg); | |
967 sw(TMP, Address(temp_reg, size_field_offset)); | |
968 } | |
969 #endif // !PRODUCT | |
970 | |
971 | |
972 void Assembler::TryAllocate(const Class& cls, | |
973 Label* failure, | |
974 Register instance_reg, | |
975 Register temp_reg) { | |
976 ASSERT(!in_delay_slot_); | |
977 ASSERT(failure != NULL); | |
978 if (FLAG_inline_alloc) { | |
979 // If this allocation is traced, program will jump to failure path | |
980 // (i.e. the allocation stub) which will allocate the object and trace the | |
981 // allocation call site. | |
982 NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), temp_reg, failure)); | |
983 const intptr_t instance_size = cls.instance_size(); | |
984 Heap::Space space = Heap::kNew; | |
985 lw(temp_reg, Address(THR, Thread::heap_offset())); | |
986 lw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); | |
987 // TODO(koda): Protect against unsigned overflow here. | |
988 AddImmediate(instance_reg, instance_size); | |
989 | |
990 // instance_reg: potential next object start. | |
991 lw(TMP, Address(temp_reg, Heap::EndOffset(space))); | |
992 // Fail if heap end unsigned less than or equal to instance_reg. | |
993 BranchUnsignedLessEqual(TMP, instance_reg, failure); | |
994 | |
995 // Successfully allocated the object, now update top to point to | |
996 // next object start and store the class in the class field of object. | |
997 sw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); | |
998 | |
999 ASSERT(instance_size >= kHeapObjectTag); | |
1000 AddImmediate(instance_reg, -instance_size + kHeapObjectTag); | |
1001 NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), temp_reg, space)); | |
1002 uint32_t tags = 0; | |
1003 tags = RawObject::SizeTag::update(instance_size, tags); | |
1004 ASSERT(cls.id() != kIllegalCid); | |
1005 tags = RawObject::ClassIdTag::update(cls.id(), tags); | |
1006 LoadImmediate(TMP, tags); | |
1007 sw(TMP, FieldAddress(instance_reg, Object::tags_offset())); | |
1008 } else { | |
1009 b(failure); | |
1010 } | |
1011 } | |
1012 | |
1013 | |
1014 void Assembler::TryAllocateArray(intptr_t cid, | |
1015 intptr_t instance_size, | |
1016 Label* failure, | |
1017 Register instance, | |
1018 Register end_address, | |
1019 Register temp1, | |
1020 Register temp2) { | |
1021 if (FLAG_inline_alloc) { | |
1022 // If this allocation is traced, program will jump to failure path | |
1023 // (i.e. the allocation stub) which will allocate the object and trace the | |
1024 // allocation call site. | |
1025 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure)); | |
1026 Isolate* isolate = Isolate::Current(); | |
1027 Heap* heap = isolate->heap(); | |
1028 Heap::Space space = Heap::kNew; | |
1029 lw(temp1, Address(THR, Thread::heap_offset())); | |
1030 // Potential new object start. | |
1031 lw(instance, Address(temp1, heap->TopOffset(space))); | |
1032 // Potential next object start. | |
1033 AddImmediate(end_address, instance, instance_size); | |
1034 // Branch on unsigned overflow. | |
1035 BranchUnsignedLess(end_address, instance, failure); | |
1036 | |
1037 // Check if the allocation fits into the remaining space. | |
1038 // instance: potential new object start, /* inline_isolate = */ false. | |
1039 // end_address: potential next object start. | |
1040 lw(temp2, Address(temp1, Heap::EndOffset(space))); | |
1041 BranchUnsignedGreaterEqual(end_address, temp2, failure); | |
1042 | |
1043 // Successfully allocated the object(s), now update top to point to | |
1044 // next object start and initialize the object. | |
1045 sw(end_address, Address(temp1, Heap::TopOffset(space))); | |
1046 addiu(instance, instance, Immediate(kHeapObjectTag)); | |
1047 LoadImmediate(temp1, instance_size); | |
1048 NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp1, temp2, space)); | |
1049 | |
1050 // Initialize the tags. | |
1051 // instance: new object start as a tagged pointer. | |
1052 uint32_t tags = 0; | |
1053 tags = RawObject::ClassIdTag::update(cid, tags); | |
1054 tags = RawObject::SizeTag::update(instance_size, tags); | |
1055 LoadImmediate(temp1, tags); | |
1056 sw(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. | |
1057 } else { | |
1058 b(failure); | |
1059 } | |
1060 } | |
1061 | |
1062 | |
1063 void Assembler::CallRuntime(const RuntimeEntry& entry, | |
1064 intptr_t argument_count) { | |
1065 entry.Call(this, argument_count); | |
1066 } | |
1067 | |
1068 | |
1069 void Assembler::EnterDartFrame(intptr_t frame_size) { | |
1070 ASSERT(!in_delay_slot_); | |
1071 | |
1072 SetPrologueOffset(); | |
1073 | |
1074 addiu(SP, SP, Immediate(-4 * kWordSize)); | |
1075 sw(RA, Address(SP, 3 * kWordSize)); | |
1076 sw(FP, Address(SP, 2 * kWordSize)); | |
1077 sw(CODE_REG, Address(SP, 1 * kWordSize)); | |
1078 sw(PP, Address(SP, 0 * kWordSize)); | |
1079 | |
1080 // Set FP to the saved previous FP. | |
1081 addiu(FP, SP, Immediate(2 * kWordSize)); | |
1082 | |
1083 LoadPoolPointer(); | |
1084 | |
1085 // Reserve space for locals. | |
1086 AddImmediate(SP, -frame_size); | |
1087 } | |
1088 | |
1089 | |
1090 // On entry to a function compiled for OSR, the caller's frame pointer, the | |
1091 // stack locals, and any copied parameters are already in place. The frame | |
1092 // pointer is already set up. The PC marker is not correct for the | |
1093 // optimized function and there may be extra space for spill slots to | |
1094 // allocate. We must also set up the pool pointer for the function. | |
1095 void Assembler::EnterOsrFrame(intptr_t extra_size) { | |
1096 ASSERT(!in_delay_slot_); | |
1097 Comment("EnterOsrFrame"); | |
1098 | |
1099 // Restore return address. | |
1100 lw(RA, Address(FP, 1 * kWordSize)); | |
1101 | |
1102 // Load the pool pointer. offset has already been subtracted from temp. | |
1103 RestoreCodePointer(); | |
1104 LoadPoolPointer(); | |
1105 | |
1106 // Reserve space for locals. | |
1107 AddImmediate(SP, -extra_size); | |
1108 } | |
1109 | |
1110 | |
1111 void Assembler::LeaveDartFrame(RestorePP restore_pp) { | |
1112 ASSERT(!in_delay_slot_); | |
1113 addiu(SP, FP, Immediate(-2 * kWordSize)); | |
1114 | |
1115 lw(RA, Address(SP, 3 * kWordSize)); | |
1116 lw(FP, Address(SP, 2 * kWordSize)); | |
1117 if (restore_pp == kRestoreCallerPP) { | |
1118 lw(PP, Address(SP, 0 * kWordSize)); | |
1119 } | |
1120 | |
1121 // Adjust SP for PC, RA, FP, PP pushed in EnterDartFrame. | |
1122 addiu(SP, SP, Immediate(4 * kWordSize)); | |
1123 } | |
1124 | |
1125 | |
1126 void Assembler::LeaveDartFrameAndReturn(Register ra) { | |
1127 ASSERT(!in_delay_slot_); | |
1128 addiu(SP, FP, Immediate(-2 * kWordSize)); | |
1129 | |
1130 lw(RA, Address(SP, 3 * kWordSize)); | |
1131 lw(FP, Address(SP, 2 * kWordSize)); | |
1132 lw(PP, Address(SP, 0 * kWordSize)); | |
1133 | |
1134 // Adjust SP for PC, RA, FP, PP pushed in EnterDartFrame, and return. | |
1135 jr(ra); | |
1136 delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize)); | |
1137 } | |
1138 | |
1139 | |
1140 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { | |
1141 ASSERT(!in_delay_slot_); | |
1142 // Reserve space for arguments and align frame before entering | |
1143 // the C++ world. | |
1144 AddImmediate(SP, -frame_space); | |
1145 if (OS::ActivationFrameAlignment() > 1) { | |
1146 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); | |
1147 and_(SP, SP, TMP); | |
1148 } | |
1149 } | |
1150 | |
1151 | |
1152 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { | |
1153 ASSERT(!in_delay_slot_); | |
1154 const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + | |
1155 3 * kWordSize + // PP, FP and RA. | |
1156 kDartVolatileFpuRegCount * kWordSize; | |
1157 | |
1158 SetPrologueOffset(); | |
1159 | |
1160 Comment("EnterCallRuntimeFrame"); | |
1161 | |
1162 // Save volatile CPU and FPU registers on the stack: | |
1163 // ------------- | |
1164 // FPU Registers | |
1165 // CPU Registers | |
1166 // RA | |
1167 // FP | |
1168 // ------------- | |
1169 // TODO(zra): It may be a problem for walking the stack that FP is below | |
1170 // the saved registers. If it turns out to be a problem in the | |
1171 // future, try pushing RA and FP before the volatile registers. | |
1172 addiu(SP, SP, Immediate(-kPushedRegistersSize)); | |
1173 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { | |
1174 // These go above the volatile CPU registers. | |
1175 const int slot = | |
1176 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; | |
1177 FRegister reg = static_cast<FRegister>(i); | |
1178 swc1(reg, Address(SP, slot * kWordSize)); | |
1179 } | |
1180 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { | |
1181 // + 2 because FP goes in slot 0. | |
1182 const int slot = (i - kDartFirstVolatileCpuReg) + 3; | |
1183 Register reg = static_cast<Register>(i); | |
1184 sw(reg, Address(SP, slot * kWordSize)); | |
1185 } | |
1186 sw(RA, Address(SP, 2 * kWordSize)); | |
1187 sw(FP, Address(SP, 1 * kWordSize)); | |
1188 sw(PP, Address(SP, 0 * kWordSize)); | |
1189 LoadPoolPointer(); | |
1190 | |
1191 mov(FP, SP); | |
1192 | |
1193 ReserveAlignedFrameSpace(frame_space); | |
1194 } | |
1195 | |
1196 | |
1197 void Assembler::LeaveCallRuntimeFrame() { | |
1198 ASSERT(!in_delay_slot_); | |
1199 const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + | |
1200 3 * kWordSize + // FP and RA. | |
1201 kDartVolatileFpuRegCount * kWordSize; | |
1202 | |
1203 Comment("LeaveCallRuntimeFrame"); | |
1204 | |
1205 // SP might have been modified to reserve space for arguments | |
1206 // and ensure proper alignment of the stack frame. | |
1207 // We need to restore it before restoring registers. | |
1208 mov(SP, FP); | |
1209 | |
1210 // Restore volatile CPU and FPU registers from the stack. | |
1211 lw(PP, Address(SP, 0 * kWordSize)); | |
1212 lw(FP, Address(SP, 1 * kWordSize)); | |
1213 lw(RA, Address(SP, 2 * kWordSize)); | |
1214 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { | |
1215 // + 2 because FP goes in slot 0. | |
1216 const int slot = (i - kDartFirstVolatileCpuReg) + 3; | |
1217 Register reg = static_cast<Register>(i); | |
1218 lw(reg, Address(SP, slot * kWordSize)); | |
1219 } | |
1220 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { | |
1221 // These go above the volatile CPU registers. | |
1222 const int slot = | |
1223 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; | |
1224 FRegister reg = static_cast<FRegister>(i); | |
1225 lwc1(reg, Address(SP, slot * kWordSize)); | |
1226 } | |
1227 addiu(SP, SP, Immediate(kPushedRegistersSize)); | |
1228 } | |
1229 | |
1230 | |
1231 Address Assembler::ElementAddressForIntIndex(bool is_external, | |
1232 intptr_t cid, | |
1233 intptr_t index_scale, | |
1234 Register array, | |
1235 intptr_t index) const { | |
1236 const int64_t offset = | |
1237 index * index_scale + | |
1238 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | |
1239 ASSERT(Utils::IsInt(32, offset)); | |
1240 ASSERT(Address::CanHoldOffset(offset)); | |
1241 return Address(array, static_cast<int32_t>(offset)); | |
1242 } | |
1243 | |
1244 | |
1245 void Assembler::LoadElementAddressForIntIndex(Register address, | |
1246 bool is_external, | |
1247 intptr_t cid, | |
1248 intptr_t index_scale, | |
1249 Register array, | |
1250 intptr_t index) { | |
1251 const int64_t offset = | |
1252 index * index_scale + | |
1253 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | |
1254 AddImmediate(address, array, offset); | |
1255 } | |
1256 | |
1257 | |
1258 Address Assembler::ElementAddressForRegIndex(bool is_load, | |
1259 bool is_external, | |
1260 intptr_t cid, | |
1261 intptr_t index_scale, | |
1262 Register array, | |
1263 Register index) { | |
1264 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. | |
1265 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; | |
1266 const int32_t offset = | |
1267 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); | |
1268 ASSERT(array != TMP); | |
1269 ASSERT(index != TMP); | |
1270 const Register base = is_load ? TMP : index; | |
1271 if (shift < 0) { | |
1272 ASSERT(shift == -1); | |
1273 sra(TMP, index, 1); | |
1274 addu(base, array, TMP); | |
1275 } else if (shift == 0) { | |
1276 addu(base, array, index); | |
1277 } else { | |
1278 sll(TMP, index, shift); | |
1279 addu(base, array, TMP); | |
1280 } | |
1281 ASSERT(Address::CanHoldOffset(offset)); | |
1282 return Address(base, offset); | |
1283 } | |
1284 | |
1285 | |
1286 void Assembler::LoadElementAddressForRegIndex(Register address, | |
1287 bool is_load, | |
1288 bool is_external, | |
1289 intptr_t cid, | |
1290 intptr_t index_scale, | |
1291 Register array, | |
1292 Register index) { | |
1293 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. | |
1294 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; | |
1295 const int32_t offset = | |
1296 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); | |
1297 if (shift < 0) { | |
1298 ASSERT(shift == -1); | |
1299 sra(address, index, 1); | |
1300 addu(address, array, address); | |
1301 } else if (shift == 0) { | |
1302 addu(address, array, index); | |
1303 } else { | |
1304 sll(address, index, shift); | |
1305 addu(address, array, address); | |
1306 } | |
1307 if (offset != 0) { | |
1308 AddImmediate(address, offset); | |
1309 } | |
1310 } | |
1311 | |
1312 | |
1313 void Assembler::LoadHalfWordUnaligned(Register dst, | |
1314 Register addr, | |
1315 Register tmp) { | |
1316 ASSERT(dst != addr); | |
1317 lbu(dst, Address(addr, 0)); | |
1318 lb(tmp, Address(addr, 1)); | |
1319 sll(tmp, tmp, 8); | |
1320 or_(dst, dst, tmp); | |
1321 } | |
1322 | |
1323 | |
1324 void Assembler::LoadHalfWordUnsignedUnaligned(Register dst, | |
1325 Register addr, | |
1326 Register tmp) { | |
1327 ASSERT(dst != addr); | |
1328 lbu(dst, Address(addr, 0)); | |
1329 lbu(tmp, Address(addr, 1)); | |
1330 sll(tmp, tmp, 8); | |
1331 or_(dst, dst, tmp); | |
1332 } | |
1333 | |
1334 | |
1335 void Assembler::StoreHalfWordUnaligned(Register src, | |
1336 Register addr, | |
1337 Register tmp) { | |
1338 sb(src, Address(addr, 0)); | |
1339 srl(tmp, src, 8); | |
1340 sb(tmp, Address(addr, 1)); | |
1341 } | |
1342 | |
1343 | |
1344 void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) { | |
1345 // TODO(rmacnak): LWL + LWR | |
1346 ASSERT(dst != addr); | |
1347 lbu(dst, Address(addr, 0)); | |
1348 lbu(tmp, Address(addr, 1)); | |
1349 sll(tmp, tmp, 8); | |
1350 or_(dst, dst, tmp); | |
1351 lbu(tmp, Address(addr, 2)); | |
1352 sll(tmp, tmp, 16); | |
1353 or_(dst, dst, tmp); | |
1354 lbu(tmp, Address(addr, 3)); | |
1355 sll(tmp, tmp, 24); | |
1356 or_(dst, dst, tmp); | |
1357 } | |
1358 | |
1359 | |
1360 void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) { | |
1361 // TODO(rmacnak): SWL + SWR | |
1362 sb(src, Address(addr, 0)); | |
1363 srl(tmp, src, 8); | |
1364 sb(tmp, Address(addr, 1)); | |
1365 srl(tmp, src, 16); | |
1366 sb(tmp, Address(addr, 2)); | |
1367 srl(tmp, src, 24); | |
1368 sb(tmp, Address(addr, 3)); | |
1369 } | |
1370 | |
1371 | |
1372 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | |
1373 "zr", "tmp", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", | |
1374 "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", | |
1375 "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", | |
1376 }; | |
1377 | |
1378 | |
1379 const char* Assembler::RegisterName(Register reg) { | |
1380 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | |
1381 return cpu_reg_names[reg]; | |
1382 } | |
1383 | |
1384 | |
1385 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { | |
1386 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", | |
1387 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", | |
1388 }; | |
1389 | |
1390 | |
1391 const char* Assembler::FpuRegisterName(FpuRegister reg) { | |
1392 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | |
1393 return fpu_reg_names[reg]; | |
1394 } | |
1395 | |
1396 | |
1397 void Assembler::Stop(const char* message) { | |
1398 if (FLAG_print_stop_message) { | |
1399 UNIMPLEMENTED(); | |
1400 } | |
1401 Label stop; | |
1402 b(&stop); | |
1403 Emit(reinterpret_cast<int32_t>(message)); | |
1404 Bind(&stop); | |
1405 break_(Instr::kStopMessageCode); | |
1406 } | |
1407 | |
1408 } // namespace dart | |
1409 | |
1410 #endif // defined TARGET_ARCH_MIPS | |
OLD | NEW |