Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 543161: Added support for MIPS in architecture independent files.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29
30 #include "v8.h"
31
32 #include "bootstrapper.h"
33 #include "codegen-inl.h"
34 #include "debug.h"
35 #include "runtime.h"
36
37 namespace v8 {
38 namespace internal {
39
40 MacroAssembler::MacroAssembler(void* buffer, int size)
41 : Assembler(buffer, size),
42 unresolved_(0),
43 generating_stub_(false),
44 allow_stub_calls_(true),
45 code_object_(Heap::undefined_value()) {
46 }
47
48
49
50 void MacroAssembler::Jump(Register target, Condition cond,
51 Register r1, const Operand& r2) {
52 Jump(Operand(target), cond, r1, r2);
53 }
54
55
56 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
57 Condition cond, Register r1, const Operand& r2) {
58 Jump(Operand(target), cond, r1, r2);
59 }
60
61
62 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
63 Condition cond, Register r1, const Operand& r2) {
64 ASSERT(!RelocInfo::IsCodeTarget(rmode));
65 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
66 }
67
68
69 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
70 Condition cond, Register r1, const Operand& r2) {
71 ASSERT(RelocInfo::IsCodeTarget(rmode));
72 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
73 }
74
75
76 void MacroAssembler::Call(Register target,
77 Condition cond, Register r1, const Operand& r2) {
78 Call(Operand(target), cond, r1, r2);
79 }
80
81
82 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
83 Condition cond, Register r1, const Operand& r2) {
84 Call(Operand(target), cond, r1, r2);
85 }
86
87
88 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
89 Condition cond, Register r1, const Operand& r2) {
90 ASSERT(!RelocInfo::IsCodeTarget(rmode));
91 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
92 }
93
94
95 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
96 Condition cond, Register r1, const Operand& r2) {
97 ASSERT(RelocInfo::IsCodeTarget(rmode));
98 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
99 }
100
101
102 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
103 Jump(Operand(ra), cond, r1, r2);
104 }
105
106
107 void MacroAssembler::LoadRoot(Register destination,
108 Heap::RootListIndex index) {
109 lw(destination, MemOperand(s4, index << kPointerSizeLog2));
110 }
111
112 void MacroAssembler::LoadRoot(Register destination,
113 Heap::RootListIndex index,
114 Condition cond,
115 Register src1, const Operand& src2) {
116 Branch(NegateCondition(cond), 2, src1, src2);
117 nop();
118 lw(destination, MemOperand(s4, index << kPointerSizeLog2));
119 }
120
121
122 void MacroAssembler::RecordWrite(Register object, Register offset,
123 Register scratch) {
124 UNIMPLEMENTED_MIPS();
125 }
126
127
128 // ---------------------------------------------------------------------------
129 // Instruction macros
130
131 void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
132 if (rt.is_reg()) {
133 add(rd, rs, rt.rm());
134 } else {
135 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
136 addi(rd, rs, rt.imm32_);
137 } else {
138 // li handles the relocation.
139 ASSERT(!rs.is(at));
140 li(at, rt);
141 add(rd, rs, at);
142 }
143 }
144 }
145
146
147 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
148 if (rt.is_reg()) {
149 addu(rd, rs, rt.rm());
150 } else {
151 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
152 addiu(rd, rs, rt.imm32_);
153 } else {
154 // li handles the relocation.
155 ASSERT(!rs.is(at));
156 li(at, rt);
157 addu(rd, rs, at);
158 }
159 }
160 }
161
162
163 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
164 if (rt.is_reg()) {
165 mul(rd, rs, rt.rm());
166 } else {
167 // li handles the relocation.
168 ASSERT(!rs.is(at));
169 li(at, rt);
170 mul(rd, rs, at);
171 }
172 }
173
174
175 void MacroAssembler::Mult(Register rs, const Operand& rt) {
176 if (rt.is_reg()) {
177 mult(rs, rt.rm());
178 } else {
179 // li handles the relocation.
180 ASSERT(!rs.is(at));
181 li(at, rt);
182 mult(rs, at);
183 }
184 }
185
186
187 void MacroAssembler::Multu(Register rs, const Operand& rt) {
188 if (rt.is_reg()) {
189 multu(rs, rt.rm());
190 } else {
191 // li handles the relocation.
192 ASSERT(!rs.is(at));
193 li(at, rt);
194 multu(rs, at);
195 }
196 }
197
198
199 void MacroAssembler::Div(Register rs, const Operand& rt) {
200 if (rt.is_reg()) {
201 div(rs, rt.rm());
202 } else {
203 // li handles the relocation.
204 ASSERT(!rs.is(at));
205 li(at, rt);
206 div(rs, at);
207 }
208 }
209
210
211 void MacroAssembler::Divu(Register rs, const Operand& rt) {
212 if (rt.is_reg()) {
213 divu(rs, rt.rm());
214 } else {
215 // li handles the relocation.
216 ASSERT(!rs.is(at));
217 li(at, rt);
218 divu(rs, at);
219 }
220 }
221
222
223 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
224 if (rt.is_reg()) {
225 and_(rd, rs, rt.rm());
226 } else {
227 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
228 andi(rd, rs, rt.imm32_);
229 } else {
230 // li handles the relocation.
231 ASSERT(!rs.is(at));
232 li(at, rt);
233 and_(rd, rs, at);
234 }
235 }
236 }
237
238
239 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
240 if (rt.is_reg()) {
241 or_(rd, rs, rt.rm());
242 } else {
243 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
244 ori(rd, rs, rt.imm32_);
245 } else {
246 // li handles the relocation.
247 ASSERT(!rs.is(at));
248 li(at, rt);
249 or_(rd, rs, at);
250 }
251 }
252 }
253
254
255 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
256 if (rt.is_reg()) {
257 xor_(rd, rs, rt.rm());
258 } else {
259 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
260 xori(rd, rs, rt.imm32_);
261 } else {
262 // li handles the relocation.
263 ASSERT(!rs.is(at));
264 li(at, rt);
265 xor_(rd, rs, at);
266 }
267 }
268 }
269
270
271 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
272 if (rt.is_reg()) {
273 nor(rd, rs, rt.rm());
274 } else {
275 // li handles the relocation.
276 ASSERT(!rs.is(at));
277 li(at, rt);
278 nor(rd, rs, at);
279 }
280 }
281
282
283 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
284 if (rt.is_reg()) {
285 slt(rd, rs, rt.rm());
286 } else {
287 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
288 slti(rd, rs, rt.imm32_);
289 } else {
290 // li handles the relocation.
291 ASSERT(!rs.is(at));
292 li(at, rt);
293 slt(rd, rs, at);
294 }
295 }
296 }
297
298
299 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
300 if (rt.is_reg()) {
301 sltu(rd, rs, rt.rm());
302 } else {
303 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
304 sltiu(rd, rs, rt.imm32_);
305 } else {
306 // li handles the relocation.
307 ASSERT(!rs.is(at));
308 li(at, rt);
309 sltu(rd, rs, at);
310 }
311 }
312 }
313
314
315 //------------Pseudo-instructions-------------
316
317 void MacroAssembler::movn(Register rd, Register rt) {
318 addiu(at, zero_reg, -1); // Fill at with ones.
319 xor_(rd, rt, at);
320 }
321
322
323 // load wartd in a register
324 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
325 ASSERT(!j.is_reg());
326
327 if (!MustUseAt(j.rmode_) && !gen2instr) {
328 // Normal load of an immediate value which does not need Relocation Info.
329 if (is_int16(j.imm32_)) {
330 addiu(rd, zero_reg, j.imm32_);
331 } else if (!(j.imm32_ & HIMask)) {
332 ori(rd, zero_reg, j.imm32_);
333 } else if (!(j.imm32_ & LOMask)) {
334 lui(rd, (HIMask & j.imm32_) >> 16);
335 } else {
336 lui(rd, (HIMask & j.imm32_) >> 16);
337 ori(rd, rd, (LOMask & j.imm32_));
338 }
339 } else if (MustUseAt(j.rmode_) || gen2instr) {
340 if (MustUseAt(j.rmode_)) {
341 RecordRelocInfo(j.rmode_, j.imm32_);
342 }
343 // We need always the same number of instructions as we may need to patch
344 // this code to load another value which may need 2 instructions to load.
345 if (is_int16(j.imm32_)) {
346 nop();
347 addiu(rd, zero_reg, j.imm32_);
348 } else if (!(j.imm32_ & HIMask)) {
349 nop();
350 ori(rd, zero_reg, j.imm32_);
351 } else if (!(j.imm32_ & LOMask)) {
352 nop();
353 lui(rd, (HIMask & j.imm32_) >> 16);
354 } else {
355 lui(rd, (HIMask & j.imm32_) >> 16);
356 ori(rd, rd, (LOMask & j.imm32_));
357 }
358 }
359 }
360
361
362 // Exception-generating instructions and debugging support
363 void MacroAssembler::stop(const char* msg) {
364 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
365 // We use the 0x54321 value to be able to find it easily when reading memory.
366 break_(0x54321);
367 }
368
369
370 void MacroAssembler::MultiPush(RegList regs) {
371 int16_t NumSaved = 0;
372 int16_t NumToPush = NumberOfBitsSet(regs);
373
374 addiu(sp, sp, -4*NumToPush);
375 for (int16_t i = 0; i< kNumRegisters; i++) {
376 if ((regs & (1 << i)) != 0) {
377 sw(ToRegister(i),
378 MemOperand(sp, 4*(NumToPush - ++NumSaved)));
379 }
380 }
381 }
382
383
384 void MacroAssembler::MultiPushReversed(RegList regs) {
385 int16_t NumSaved = 0;
386 int16_t NumToPush = NumberOfBitsSet(regs);
387
388 addiu(sp, sp, -4*NumToPush);
389 for (int16_t i = kNumRegisters; --i >= 0;) {
390 if ((regs & (1 << i)) != 0) {
391 sw(ToRegister(i),
392 MemOperand(sp, 4*(NumToPush - ++NumSaved)));
393 }
394 }
395 }
396
397
398 void MacroAssembler::MultiPop(RegList regs) {
399 int16_t NumSaved = 0;
400
401 for (int16_t i = kNumRegisters; --i >= 0;) {
402 if ((regs & (1 << i)) != 0) {
403 lw(ToRegister(i), MemOperand(sp, 4*(NumSaved++)));
404 }
405 }
406 addiu(sp, sp, 4*NumSaved);
407 }
408
409
410 void MacroAssembler::MultiPopReversed(RegList regs) {
411 int16_t NumSaved = 0;
412
413 for (int16_t i = 0; i< kNumRegisters; i++) {
414 if ((regs & (1 << i)) != 0) {
415 lw(ToRegister(i), MemOperand(sp, 4*(NumSaved++)));
416 }
417 }
418 addiu(sp, sp, 4*NumSaved);
419 }
420
421
422 // Emulated condtional branches do not emit a nop in the branch delay slot.
423
424 // Trashes the at register if no scratch register is provided.
425 void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
426 const Operand& rt, Register scratch) {
427 Register r2;
428 if (rt.is_reg()) {
429 // We don't want any other register but scratch clobbered.
430 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
431 r2 = rt.rm_;
432 } else if (cond != cc_always) {
433 // We don't want any other register but scratch clobbered.
434 ASSERT(!scratch.is(rs));
435 r2 = scratch;
436 li(r2, rt);
437 }
438
439 switch (cond) {
440 case cc_always:
441 b(offset);
442 break;
443 case eq:
444 beq(rs, r2, offset);
445 break;
446 case ne:
447 bne(rs, r2, offset);
448 break;
449
450 // Signed comparison
451 case greater:
452 slt(scratch, r2, rs);
453 bne(scratch, zero_reg, offset);
454 break;
455 case greater_equal:
456 slt(scratch, rs, r2);
457 beq(scratch, zero_reg, offset);
458 break;
459 case less:
460 slt(scratch, rs, r2);
461 bne(scratch, zero_reg, offset);
462 break;
463 case less_equal:
464 slt(scratch, r2, rs);
465 beq(scratch, zero_reg, offset);
466 break;
467
468 // Unsigned comparison.
469 case Ugreater:
470 sltu(scratch, r2, rs);
471 bne(scratch, zero_reg, offset);
472 break;
473 case Ugreater_equal:
474 sltu(scratch, rs, r2);
475 beq(scratch, zero_reg, offset);
476 break;
477 case Uless:
478 sltu(scratch, rs, r2);
479 bne(scratch, zero_reg, offset);
480 break;
481 case Uless_equal:
482 sltu(scratch, r2, rs);
483 beq(scratch, zero_reg, offset);
484 break;
485
486 default:
487 UNREACHABLE();
488 }
489 }
490
491
492 void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
493 const Operand& rt, Register scratch) {
494 Register r2;
495 if (rt.is_reg()) {
496 r2 = rt.rm_;
497 } else if (cond != cc_always) {
498 r2 = scratch;
499 li(r2, rt);
500 }
501
502 // We use branch_offset as an argument for the branch instructions to be sure
503 // it is called just before generating the branch instruction, as needed.
504
505 switch (cond) {
506 case cc_always:
507 b(shifted_branch_offset(L, false));
508 break;
509 case eq:
510 beq(rs, r2, shifted_branch_offset(L, false));
511 break;
512 case ne:
513 bne(rs, r2, shifted_branch_offset(L, false));
514 break;
515
516 // Signed comparison
517 case greater:
518 slt(scratch, r2, rs);
519 bne(scratch, zero_reg, shifted_branch_offset(L, false));
520 break;
521 case greater_equal:
522 slt(scratch, rs, r2);
523 beq(scratch, zero_reg, shifted_branch_offset(L, false));
524 break;
525 case less:
526 slt(scratch, rs, r2);
527 bne(scratch, zero_reg, shifted_branch_offset(L, false));
528 break;
529 case less_equal:
530 slt(scratch, r2, rs);
531 beq(scratch, zero_reg, shifted_branch_offset(L, false));
532 break;
533
534 // Unsigned comparison.
535 case Ugreater:
536 sltu(scratch, r2, rs);
537 bne(scratch, zero_reg, shifted_branch_offset(L, false));
538 break;
539 case Ugreater_equal:
540 sltu(scratch, rs, r2);
541 beq(scratch, zero_reg, shifted_branch_offset(L, false));
542 break;
543 case Uless:
544 sltu(scratch, rs, r2);
545 bne(scratch, zero_reg, shifted_branch_offset(L, false));
546 break;
547 case Uless_equal:
548 sltu(scratch, r2, rs);
549 beq(scratch, zero_reg, shifted_branch_offset(L, false));
550 break;
551
552 default:
553 UNREACHABLE();
554 }
555 }
556
557
558 // Trashes the at register if no scratch register is provided.
559 // We need to use a bgezal or bltzal, but they can't be used directly with the
560 // slt instructions. We could use sub or add instead but we would miss overflow
561 // cases, so we keep slt and add an intermediate third instruction.
562 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
563 const Operand& rt, Register scratch) {
564 Register r2;
565 if (rt.is_reg()) {
566 r2 = rt.rm_;
567 } else if (cond != cc_always) {
568 r2 = scratch;
569 li(r2, rt);
570 }
571
572 switch (cond) {
573 case cc_always:
574 bal(offset);
575 break;
576 case eq:
577 bne(rs, r2, 2);
578 nop();
579 bal(offset);
580 break;
581 case ne:
582 beq(rs, r2, 2);
583 nop();
584 bal(offset);
585 break;
586
587 // Signed comparison
588 case greater:
589 slt(scratch, r2, rs);
590 addiu(scratch, scratch, -1);
591 bgezal(scratch, offset);
592 break;
593 case greater_equal:
594 slt(scratch, rs, r2);
595 addiu(scratch, scratch, -1);
596 bltzal(scratch, offset);
597 break;
598 case less:
599 slt(scratch, rs, r2);
600 addiu(scratch, scratch, -1);
601 bgezal(scratch, offset);
602 break;
603 case less_equal:
604 slt(scratch, r2, rs);
605 addiu(scratch, scratch, -1);
606 bltzal(scratch, offset);
607 break;
608
609 // Unsigned comparison.
610 case Ugreater:
611 sltu(scratch, r2, rs);
612 addiu(scratch, scratch, -1);
613 bgezal(scratch, offset);
614 break;
615 case Ugreater_equal:
616 sltu(scratch, rs, r2);
617 addiu(scratch, scratch, -1);
618 bltzal(scratch, offset);
619 break;
620 case Uless:
621 sltu(scratch, rs, r2);
622 addiu(scratch, scratch, -1);
623 bgezal(scratch, offset);
624 break;
625 case Uless_equal:
626 sltu(scratch, r2, rs);
627 addiu(scratch, scratch, -1);
628 bltzal(scratch, offset);
629 break;
630
631 default:
632 UNREACHABLE();
633 }
634 }
635
636
637 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
638 const Operand& rt, Register scratch) {
639 Register r2;
640 if (rt.is_reg()) {
641 r2 = rt.rm_;
642 } else if (cond != cc_always) {
643 r2 = scratch;
644 li(r2, rt);
645 }
646
647 switch (cond) {
648 case cc_always:
649 bal(shifted_branch_offset(L, false));
650 break;
651 case eq:
652 bne(rs, r2, 2);
653 nop();
654 bal(shifted_branch_offset(L, false));
655 break;
656 case ne:
657 beq(rs, r2, 2);
658 nop();
659 bal(shifted_branch_offset(L, false));
660 break;
661
662 // Signed comparison
663 case greater:
664 slt(scratch, r2, rs);
665 addiu(scratch, scratch, -1);
666 bgezal(scratch, shifted_branch_offset(L, false));
667 break;
668 case greater_equal:
669 slt(scratch, rs, r2);
670 addiu(scratch, scratch, -1);
671 bltzal(scratch, shifted_branch_offset(L, false));
672 break;
673 case less:
674 slt(scratch, rs, r2);
675 addiu(scratch, scratch, -1);
676 bgezal(scratch, shifted_branch_offset(L, false));
677 break;
678 case less_equal:
679 slt(scratch, r2, rs);
680 addiu(scratch, scratch, -1);
681 bltzal(scratch, shifted_branch_offset(L, false));
682 break;
683
684 // Unsigned comparison.
685 case Ugreater:
686 sltu(scratch, r2, rs);
687 addiu(scratch, scratch, -1);
688 bgezal(scratch, shifted_branch_offset(L, false));
689 break;
690 case Ugreater_equal:
691 sltu(scratch, rs, r2);
692 addiu(scratch, scratch, -1);
693 bltzal(scratch, shifted_branch_offset(L, false));
694 break;
695 case Uless:
696 sltu(scratch, rs, r2);
697 addiu(scratch, scratch, -1);
698 bgezal(scratch, shifted_branch_offset(L, false));
699 break;
700 case Uless_equal:
701 sltu(scratch, r2, rs);
702 addiu(scratch, scratch, -1);
703 bltzal(scratch, shifted_branch_offset(L, false));
704 break;
705
706 default:
707 UNREACHABLE();
708 }
709 }
710
711
712 void MacroAssembler::Jump(const Operand& target,
713 Condition cond, Register rs, const Operand& rt) {
714 if (target.is_reg()) {
715 if (cond == cc_always) {
716 jr(target.rm());
717 } else {
718 Branch(NegateCondition(cond), 2, rs, rt);
719 nop();
720 jr(target.rm());
721 }
722 } else { // !target.is_reg()
723 if (!MustUseAt(target.rmode_)) {
724 if (cond == cc_always) {
725 j(target.imm32_);
726 } else {
727 Branch(NegateCondition(cond), 2, rs, rt);
728 nop();
729 j(target.imm32_); // will generate only one instruction.
730 }
731 } else { // MustUseAt(target)
732 li(at, rt);
733 if (cond == cc_always) {
734 jr(at);
735 } else {
736 Branch(NegateCondition(cond), 2, rs, rt);
737 nop();
738 jr(at); // will generate only one instruction.
739 }
740 }
741 }
742 }
743
744
745 void MacroAssembler::Call(const Operand& target,
746 Condition cond, Register rs, const Operand& rt) {
747 if (target.is_reg()) {
748 if (cond == cc_always) {
749 jalr(target.rm());
750 } else {
751 Branch(NegateCondition(cond), 2, rs, rt);
752 nop();
753 jalr(target.rm());
754 }
755 } else { // !target.is_reg()
756 if (!MustUseAt(target.rmode_)) {
757 if (cond == cc_always) {
758 jal(target.imm32_);
759 } else {
760 Branch(NegateCondition(cond), 2, rs, rt);
761 nop();
762 jal(target.imm32_); // will generate only one instruction.
763 }
764 } else { // MustUseAt(target)
765 li(at, rt);
766 if (cond == cc_always) {
767 jalr(at);
768 } else {
769 Branch(NegateCondition(cond), 2, rs, rt);
770 nop();
771 jalr(at); // will generate only one instruction.
772 }
773 }
774 }
775 }
776
777 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
778 UNIMPLEMENTED_MIPS();
779 }
780
781
782 void MacroAssembler::Drop(int count, Condition cond) {
783 UNIMPLEMENTED_MIPS();
784 }
785
786
787 void MacroAssembler::Call(Label* target) {
788 UNIMPLEMENTED_MIPS();
789 }
790
791
792 // ---------------------------------------------------------------------------
793 // Exception handling
794
795 void MacroAssembler::PushTryHandler(CodeLocation try_location,
796 HandlerType type) {
797 UNIMPLEMENTED_MIPS();
798 }
799
800
801 void MacroAssembler::PopTryHandler() {
802 UNIMPLEMENTED_MIPS();
803 }
804
805
806
807 // ---------------------------------------------------------------------------
808 // Activation frames
809
810 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
811 Register r1, const Operand& r2) {
812 UNIMPLEMENTED_MIPS();
813 }
814
815
816 void MacroAssembler::StubReturn(int argc) {
817 UNIMPLEMENTED_MIPS();
818 }
819
820
821 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
822 UNIMPLEMENTED_MIPS();
823 }
824
825
826 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
827 UNIMPLEMENTED_MIPS();
828 }
829
830
831 void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
832 int num_arguments,
833 int result_size) {
834 UNIMPLEMENTED_MIPS();
835 }
836
837
838 void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
839 UNIMPLEMENTED_MIPS();
840 }
841
842
843 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
844 bool* resolved) {
845 UNIMPLEMENTED_MIPS();
846 return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
847 }
848
849
850 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
851 InvokeJSFlags flags) {
852 UNIMPLEMENTED_MIPS();
853 }
854
855
856 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
857 UNIMPLEMENTED_MIPS();
858 }
859
860
861 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
862 Register scratch1, Register scratch2) {
863 UNIMPLEMENTED_MIPS();
864 }
865
866
867 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
868 Register scratch1, Register scratch2) {
869 UNIMPLEMENTED_MIPS();
870 }
871
872
873 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
874 Register scratch1, Register scratch2) {
875 UNIMPLEMENTED_MIPS();
876 }
877
878
879
880 void MacroAssembler::Assert(Condition cc, const char* msg,
881 Register rs, Operand rt) {
882 UNIMPLEMENTED_MIPS();
883 }
884
885
886 void MacroAssembler::Check(Condition cc, const char* msg,
887 Register rs, Operand rt) {
888 UNIMPLEMENTED_MIPS();
889 }
890
891
892 void MacroAssembler::Abort(const char* msg) {
893 UNIMPLEMENTED_MIPS();
894 }
895
896 } } // namespace v8::internal
897
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698