OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 | 48 |
49 | 49 |
50 void MacroAssembler::Jump(Register target, Condition cond, | 50 void MacroAssembler::Jump(Register target, Condition cond, |
51 Register r1, const Operand& r2) { | 51 Register r1, const Operand& r2) { |
52 Jump(Operand(target), cond, r1, r2); | 52 Jump(Operand(target), cond, r1, r2); |
53 } | 53 } |
54 | 54 |
55 | 55 |
56 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 56 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
57 Condition cond, Register r1, const Operand& r2) { | 57 Condition cond, Register r1, const Operand& r2) { |
58 Jump(Operand(target), cond, r1, r2); | 58 Jump(Operand(target, rmode), cond, r1, r2); |
59 } | 59 } |
60 | 60 |
61 | 61 |
62 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, | 62 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, |
63 Condition cond, Register r1, const Operand& r2) { | 63 Condition cond, Register r1, const Operand& r2) { |
64 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 64 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
65 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); | 65 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
66 } | 66 } |
67 | 67 |
68 | 68 |
69 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | 69 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
70 Condition cond, Register r1, const Operand& r2) { | 70 Condition cond, Register r1, const Operand& r2) { |
71 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 71 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
72 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | 72 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
73 } | 73 } |
74 | 74 |
75 | 75 |
76 void MacroAssembler::Call(Register target, | 76 void MacroAssembler::Call(Register target, |
77 Condition cond, Register r1, const Operand& r2) { | 77 Condition cond, Register r1, const Operand& r2) { |
78 Call(Operand(target), cond, r1, r2); | 78 Call(Operand(target), cond, r1, r2); |
79 } | 79 } |
80 | 80 |
81 | 81 |
82 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, | 82 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, |
83 Condition cond, Register r1, const Operand& r2) { | 83 Condition cond, Register r1, const Operand& r2) { |
84 Call(Operand(target), cond, r1, r2); | 84 Call(Operand(target, rmode), cond, r1, r2); |
85 } | 85 } |
86 | 86 |
87 | 87 |
88 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, | 88 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, |
89 Condition cond, Register r1, const Operand& r2) { | 89 Condition cond, Register r1, const Operand& r2) { |
90 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 90 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
91 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); | 91 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
92 } | 92 } |
93 | 93 |
94 | 94 |
95 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, | 95 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, |
96 Condition cond, Register r1, const Operand& r2) { | 96 Condition cond, Register r1, const Operand& r2) { |
97 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 97 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
98 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); | 98 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); |
99 } | 99 } |
100 | 100 |
101 | 101 |
102 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { | 102 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { |
103 Jump(Operand(ra), cond, r1, r2); | 103 Jump(Operand(ra), cond, r1, r2); |
104 } | 104 } |
105 | 105 |
106 | 106 |
107 void MacroAssembler::LoadRoot(Register destination, | 107 void MacroAssembler::LoadRoot(Register destination, |
108 Heap::RootListIndex index) { | 108 Heap::RootListIndex index) { |
109 lw(destination, MemOperand(s4, index << kPointerSizeLog2)); | 109 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
110 } | 110 } |
111 | 111 |
112 void MacroAssembler::LoadRoot(Register destination, | 112 void MacroAssembler::LoadRoot(Register destination, |
113 Heap::RootListIndex index, | 113 Heap::RootListIndex index, |
114 Condition cond, | 114 Condition cond, |
115 Register src1, const Operand& src2) { | 115 Register src1, const Operand& src2) { |
116 Branch(NegateCondition(cond), 2, src1, src2); | 116 Branch(NegateCondition(cond), 2, src1, src2); |
117 nop(); | 117 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
118 lw(destination, MemOperand(s4, index << kPointerSizeLog2)); | |
119 } | 118 } |
120 | 119 |
121 | 120 |
122 void MacroAssembler::RecordWrite(Register object, Register offset, | 121 void MacroAssembler::RecordWrite(Register object, Register offset, |
123 Register scratch) { | 122 Register scratch) { |
124 UNIMPLEMENTED_MIPS(); | 123 UNIMPLEMENTED_MIPS(); |
125 } | 124 } |
126 | 125 |
127 | 126 |
128 // --------------------------------------------------------------------------- | 127 // --------------------------------------------------------------------------- |
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 | 312 |
314 | 313 |
315 //------------Pseudo-instructions------------- | 314 //------------Pseudo-instructions------------- |
316 | 315 |
317 void MacroAssembler::movn(Register rd, Register rt) { | 316 void MacroAssembler::movn(Register rd, Register rt) { |
318 addiu(at, zero_reg, -1); // Fill at with ones. | 317 addiu(at, zero_reg, -1); // Fill at with ones. |
319 xor_(rd, rt, at); | 318 xor_(rd, rt, at); |
320 } | 319 } |
321 | 320 |
322 | 321 |
323 // load wartd in a register | |
324 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { | 322 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { |
325 ASSERT(!j.is_reg()); | 323 ASSERT(!j.is_reg()); |
326 | 324 |
327 if (!MustUseAt(j.rmode_) && !gen2instr) { | 325 if (!MustUseAt(j.rmode_) && !gen2instr) { |
328 // Normal load of an immediate value which does not need Relocation Info. | 326 // Normal load of an immediate value which does not need Relocation Info. |
329 if (is_int16(j.imm32_)) { | 327 if (is_int16(j.imm32_)) { |
330 addiu(rd, zero_reg, j.imm32_); | 328 addiu(rd, zero_reg, j.imm32_); |
331 } else if (!(j.imm32_ & HIMask)) { | 329 } else if (!(j.imm32_ & HIMask)) { |
332 ori(rd, zero_reg, j.imm32_); | 330 ori(rd, zero_reg, j.imm32_); |
333 } else if (!(j.imm32_ & LOMask)) { | 331 } else if (!(j.imm32_ & LOMask)) { |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
365 // We use the 0x54321 value to be able to find it easily when reading memory. | 363 // We use the 0x54321 value to be able to find it easily when reading memory. |
366 break_(0x54321); | 364 break_(0x54321); |
367 } | 365 } |
368 | 366 |
369 | 367 |
370 void MacroAssembler::MultiPush(RegList regs) { | 368 void MacroAssembler::MultiPush(RegList regs) { |
371 int16_t NumSaved = 0; | 369 int16_t NumSaved = 0; |
372 int16_t NumToPush = NumberOfBitsSet(regs); | 370 int16_t NumToPush = NumberOfBitsSet(regs); |
373 | 371 |
374 addiu(sp, sp, -4 * NumToPush); | 372 addiu(sp, sp, -4 * NumToPush); |
375 for (int16_t i = 0; i < kNumRegisters; i++) { | 373 for (int16_t i = kNumRegisters; i > 0; i--) { |
376 if ((regs & (1 << i)) != 0) { | 374 if ((regs & (1 << i)) != 0) { |
377 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); | 375 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); |
378 } | 376 } |
379 } | 377 } |
380 } | 378 } |
381 | 379 |
382 | 380 |
383 void MacroAssembler::MultiPushReversed(RegList regs) { | 381 void MacroAssembler::MultiPushReversed(RegList regs) { |
384 int16_t NumSaved = 0; | 382 int16_t NumSaved = 0; |
385 int16_t NumToPush = NumberOfBitsSet(regs); | 383 int16_t NumToPush = NumberOfBitsSet(regs); |
386 | 384 |
387 addiu(sp, sp, -4 * NumToPush); | 385 addiu(sp, sp, -4 * NumToPush); |
388 for (int16_t i = kNumRegisters; i > 0; i--) { | 386 for (int16_t i = 0; i < kNumRegisters; i++) { |
389 if ((regs & (1 << i)) != 0) { | 387 if ((regs & (1 << i)) != 0) { |
390 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); | 388 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); |
391 } | 389 } |
392 } | 390 } |
393 } | 391 } |
394 | 392 |
395 | 393 |
396 void MacroAssembler::MultiPop(RegList regs) { | 394 void MacroAssembler::MultiPop(RegList regs) { |
397 int16_t NumSaved = 0; | 395 int16_t NumSaved = 0; |
398 | 396 |
399 for (int16_t i = kNumRegisters; i > 0; i--) { | 397 for (int16_t i = 0; i < kNumRegisters; i++) { |
400 if ((regs & (1 << i)) != 0) { | 398 if ((regs & (1 << i)) != 0) { |
401 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); | 399 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); |
402 } | 400 } |
403 } | 401 } |
404 addiu(sp, sp, 4 * NumSaved); | 402 addiu(sp, sp, 4 * NumSaved); |
405 } | 403 } |
406 | 404 |
407 | 405 |
408 void MacroAssembler::MultiPopReversed(RegList regs) { | 406 void MacroAssembler::MultiPopReversed(RegList regs) { |
409 int16_t NumSaved = 0; | 407 int16_t NumSaved = 0; |
410 | 408 |
411 for (int16_t i = 0; i < kNumRegisters; i++) { | 409 for (int16_t i = kNumRegisters; i > 0; i--) { |
412 if ((regs & (1 << i)) != 0) { | 410 if ((regs & (1 << i)) != 0) { |
413 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); | 411 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); |
414 } | 412 } |
415 } | 413 } |
416 addiu(sp, sp, 4 * NumSaved); | 414 addiu(sp, sp, 4 * NumSaved); |
417 } | 415 } |
418 | 416 |
419 | 417 |
420 // Emulated condtional branches do not emit a nop in the branch delay slot. | 418 // Emulated condtional branches do not emit a nop in the branch delay slot. |
421 | 419 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
477 bne(scratch, zero_reg, offset); | 475 bne(scratch, zero_reg, offset); |
478 break; | 476 break; |
479 case Uless_equal: | 477 case Uless_equal: |
480 sltu(scratch, r2, rs); | 478 sltu(scratch, r2, rs); |
481 beq(scratch, zero_reg, offset); | 479 beq(scratch, zero_reg, offset); |
482 break; | 480 break; |
483 | 481 |
484 default: | 482 default: |
485 UNREACHABLE(); | 483 UNREACHABLE(); |
486 } | 484 } |
| 485 // Emit a nop in the branch delay slot. |
| 486 nop(); |
487 } | 487 } |
488 | 488 |
489 | 489 |
490 void MacroAssembler::Branch(Condition cond, Label* L, Register rs, | 490 void MacroAssembler::Branch(Condition cond, Label* L, Register rs, |
491 const Operand& rt, Register scratch) { | 491 const Operand& rt, Register scratch) { |
492 Register r2 = no_reg; | 492 Register r2 = no_reg; |
493 if (rt.is_reg()) { | 493 if (rt.is_reg()) { |
494 r2 = rt.rm_; | 494 r2 = rt.rm_; |
495 } else if (cond != cc_always) { | 495 } else if (cond != cc_always) { |
496 r2 = scratch; | 496 r2 = scratch; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
543 bne(scratch, zero_reg, shifted_branch_offset(L, false)); | 543 bne(scratch, zero_reg, shifted_branch_offset(L, false)); |
544 break; | 544 break; |
545 case Uless_equal: | 545 case Uless_equal: |
546 sltu(scratch, r2, rs); | 546 sltu(scratch, r2, rs); |
547 beq(scratch, zero_reg, shifted_branch_offset(L, false)); | 547 beq(scratch, zero_reg, shifted_branch_offset(L, false)); |
548 break; | 548 break; |
549 | 549 |
550 default: | 550 default: |
551 UNREACHABLE(); | 551 UNREACHABLE(); |
552 } | 552 } |
| 553 // Emit a nop in the branch delay slot. |
| 554 nop(); |
553 } | 555 } |
554 | 556 |
555 | 557 |
556 // Trashes the at register if no scratch register is provided. | 558 // Trashes the at register if no scratch register is provided. |
557 // We need to use a bgezal or bltzal, but they can't be used directly with the | 559 // We need to use a bgezal or bltzal, but they can't be used directly with the |
558 // slt instructions. We could use sub or add instead but we would miss overflow | 560 // slt instructions. We could use sub or add instead but we would miss overflow |
559 // cases, so we keep slt and add an intermediate third instruction. | 561 // cases, so we keep slt and add an intermediate third instruction. |
560 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, | 562 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, |
561 const Operand& rt, Register scratch) { | 563 const Operand& rt, Register scratch) { |
562 Register r2 = no_reg; | 564 Register r2 = no_reg; |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
622 break; | 624 break; |
623 case Uless_equal: | 625 case Uless_equal: |
624 sltu(scratch, r2, rs); | 626 sltu(scratch, r2, rs); |
625 addiu(scratch, scratch, -1); | 627 addiu(scratch, scratch, -1); |
626 bltzal(scratch, offset); | 628 bltzal(scratch, offset); |
627 break; | 629 break; |
628 | 630 |
629 default: | 631 default: |
630 UNREACHABLE(); | 632 UNREACHABLE(); |
631 } | 633 } |
| 634 // Emit a nop in the branch delay slot. |
| 635 nop(); |
632 } | 636 } |
633 | 637 |
634 | 638 |
635 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, | 639 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, |
636 const Operand& rt, Register scratch) { | 640 const Operand& rt, Register scratch) { |
637 Register r2 = no_reg; | 641 Register r2 = no_reg; |
638 if (rt.is_reg()) { | 642 if (rt.is_reg()) { |
639 r2 = rt.rm_; | 643 r2 = rt.rm_; |
640 } else if (cond != cc_always) { | 644 } else if (cond != cc_always) { |
641 r2 = scratch; | 645 r2 = scratch; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
697 break; | 701 break; |
698 case Uless_equal: | 702 case Uless_equal: |
699 sltu(scratch, r2, rs); | 703 sltu(scratch, r2, rs); |
700 addiu(scratch, scratch, -1); | 704 addiu(scratch, scratch, -1); |
701 bltzal(scratch, shifted_branch_offset(L, false)); | 705 bltzal(scratch, shifted_branch_offset(L, false)); |
702 break; | 706 break; |
703 | 707 |
704 default: | 708 default: |
705 UNREACHABLE(); | 709 UNREACHABLE(); |
706 } | 710 } |
| 711 // Emit a nop in the branch delay slot. |
| 712 nop(); |
707 } | 713 } |
708 | 714 |
709 | 715 |
710 void MacroAssembler::Jump(const Operand& target, | 716 void MacroAssembler::Jump(const Operand& target, |
711 Condition cond, Register rs, const Operand& rt) { | 717 Condition cond, Register rs, const Operand& rt) { |
712 if (target.is_reg()) { | 718 if (target.is_reg()) { |
713 if (cond == cc_always) { | 719 if (cond == cc_always) { |
714 jr(target.rm()); | 720 jr(target.rm()); |
715 } else { | 721 } else { |
716 Branch(NegateCondition(cond), 2, rs, rt); | 722 Branch(NegateCondition(cond), 2, rs, rt); |
717 nop(); | |
718 jr(target.rm()); | 723 jr(target.rm()); |
719 } | 724 } |
720 } else { // !target.is_reg() | 725 } else { // !target.is_reg() |
721 if (!MustUseAt(target.rmode_)) { | 726 if (!MustUseAt(target.rmode_)) { |
722 if (cond == cc_always) { | 727 if (cond == cc_always) { |
723 j(target.imm32_); | 728 j(target.imm32_); |
724 } else { | 729 } else { |
725 Branch(NegateCondition(cond), 2, rs, rt); | 730 Branch(NegateCondition(cond), 2, rs, rt); |
726 nop(); | 731 j(target.imm32_); // Will generate only one instruction. |
727 j(target.imm32_); // will generate only one instruction. | |
728 } | 732 } |
729 } else { // MustUseAt(target) | 733 } else { // MustUseAt(target) |
730 li(at, rt); | 734 li(at, target); |
731 if (cond == cc_always) { | 735 if (cond == cc_always) { |
732 jr(at); | 736 jr(at); |
733 } else { | 737 } else { |
734 Branch(NegateCondition(cond), 2, rs, rt); | 738 Branch(NegateCondition(cond), 2, rs, rt); |
735 nop(); | 739 jr(at); // Will generate only one instruction. |
736 jr(at); // will generate only one instruction. | |
737 } | 740 } |
738 } | 741 } |
739 } | 742 } |
| 743 // Emit a nop in the branch delay slot. |
| 744 nop(); |
740 } | 745 } |
741 | 746 |
742 | 747 |
743 void MacroAssembler::Call(const Operand& target, | 748 void MacroAssembler::Call(const Operand& target, |
744 Condition cond, Register rs, const Operand& rt) { | 749 Condition cond, Register rs, const Operand& rt) { |
745 if (target.is_reg()) { | 750 if (target.is_reg()) { |
746 if (cond == cc_always) { | 751 if (cond == cc_always) { |
747 jalr(target.rm()); | 752 jalr(target.rm()); |
748 } else { | 753 } else { |
749 Branch(NegateCondition(cond), 2, rs, rt); | 754 Branch(NegateCondition(cond), 2, rs, rt); |
750 nop(); | |
751 jalr(target.rm()); | 755 jalr(target.rm()); |
752 } | 756 } |
753 } else { // !target.is_reg() | 757 } else { // !target.is_reg() |
754 if (!MustUseAt(target.rmode_)) { | 758 if (!MustUseAt(target.rmode_)) { |
755 if (cond == cc_always) { | 759 if (cond == cc_always) { |
756 jal(target.imm32_); | 760 jal(target.imm32_); |
757 } else { | 761 } else { |
758 Branch(NegateCondition(cond), 2, rs, rt); | 762 Branch(NegateCondition(cond), 2, rs, rt); |
759 nop(); | 763 jal(target.imm32_); // Will generate only one instruction. |
760 jal(target.imm32_); // will generate only one instruction. | |
761 } | 764 } |
762 } else { // MustUseAt(target) | 765 } else { // MustUseAt(target) |
763 li(at, rt); | 766 li(at, target); |
764 if (cond == cc_always) { | 767 if (cond == cc_always) { |
765 jalr(at); | 768 jalr(at); |
766 } else { | 769 } else { |
767 Branch(NegateCondition(cond), 2, rs, rt); | 770 Branch(NegateCondition(cond), 2, rs, rt); |
768 nop(); | 771 jalr(at); // Will generate only one instruction. |
769 jalr(at); // will generate only one instruction. | |
770 } | 772 } |
771 } | 773 } |
772 } | 774 } |
| 775 // Emit a nop in the branch delay slot. |
| 776 nop(); |
773 } | 777 } |
774 | 778 |
775 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { | 779 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { |
776 UNIMPLEMENTED_MIPS(); | 780 UNIMPLEMENTED_MIPS(); |
777 } | 781 } |
778 | 782 |
779 | 783 |
780 void MacroAssembler::Drop(int count, Condition cond) { | 784 void MacroAssembler::Drop(int count, Condition cond) { |
781 UNIMPLEMENTED_MIPS(); | 785 UNIMPLEMENTED_MIPS(); |
782 } | 786 } |
(...skipping 12 matching lines...) Expand all Loading... |
795 UNIMPLEMENTED_MIPS(); | 799 UNIMPLEMENTED_MIPS(); |
796 } | 800 } |
797 #endif | 801 #endif |
798 | 802 |
799 | 803 |
800 // --------------------------------------------------------------------------- | 804 // --------------------------------------------------------------------------- |
801 // Exception handling | 805 // Exception handling |
802 | 806 |
803 void MacroAssembler::PushTryHandler(CodeLocation try_location, | 807 void MacroAssembler::PushTryHandler(CodeLocation try_location, |
804 HandlerType type) { | 808 HandlerType type) { |
805 UNIMPLEMENTED_MIPS(); | 809 // Adjust this code if not the case. |
| 810 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 811 // The return address is passed in register ra. |
| 812 if (try_location == IN_JAVASCRIPT) { |
| 813 if (type == TRY_CATCH_HANDLER) { |
| 814 li(t0, Operand(StackHandler::TRY_CATCH)); |
| 815 } else { |
| 816 li(t0, Operand(StackHandler::TRY_FINALLY)); |
| 817 } |
| 818 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
| 819 && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
| 820 && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
| 821 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 822 // Save the current handler as the next handler. |
| 823 LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); |
| 824 lw(t1, MemOperand(t2)); |
| 825 |
| 826 addiu(sp, sp, -StackHandlerConstants::kSize); |
| 827 sw(ra, MemOperand(sp, 12)); |
| 828 sw(fp, MemOperand(sp, 8)); |
| 829 sw(t0, MemOperand(sp, 4)); |
| 830 sw(t1, MemOperand(sp, 0)); |
| 831 |
| 832 // Link this handler as the new current one. |
| 833 sw(sp, MemOperand(t2)); |
| 834 |
| 835 } else { |
| 836 // Must preserve a0-a3, and s0 (argv). |
| 837 ASSERT(try_location == IN_JS_ENTRY); |
| 838 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
| 839 && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
| 840 && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
| 841 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 842 |
| 843 // The frame pointer does not point to a JS frame so we save NULL |
| 844 // for fp. We expect the code throwing an exception to check fp |
| 845 // before dereferencing it to restore the context. |
| 846 li(t0, Operand(StackHandler::ENTRY)); |
| 847 |
| 848 // Save the current handler as the next handler. |
| 849 LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); |
| 850 lw(t1, MemOperand(t2)); |
| 851 |
| 852 addiu(sp, sp, -StackHandlerConstants::kSize); |
| 853 sw(ra, MemOperand(sp, 12)); |
| 854 sw(zero_reg, MemOperand(sp, 8)); |
| 855 sw(t0, MemOperand(sp, 4)); |
| 856 sw(t1, MemOperand(sp, 0)); |
| 857 |
| 858 // Link this handler as the new current one. |
| 859 sw(sp, MemOperand(t2)); |
| 860 } |
806 } | 861 } |
807 | 862 |
808 | 863 |
809 void MacroAssembler::PopTryHandler() { | 864 void MacroAssembler::PopTryHandler() { |
810 UNIMPLEMENTED_MIPS(); | 865 UNIMPLEMENTED_MIPS(); |
811 } | 866 } |
812 | 867 |
813 | 868 |
814 | 869 |
| 870 // ----------------------------------------------------------------------------- |
| 871 // Activation frames |
| 872 |
| 873 void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { |
| 874 Label extra_push, end; |
| 875 |
| 876 andi(scratch, sp, 7); |
| 877 |
| 878 // We check for args and receiver size on the stack, all of them word sized. |
| 879 // We add one for sp, that we also want to store on the stack. |
| 880 if (((arg_count + 1) % kPointerSizeLog2) == 0) { |
| 881 Branch(ne, &extra_push, at, Operand(zero_reg)); |
| 882 } else { // ((arg_count + 1) % 2) == 1 |
| 883 Branch(eq, &extra_push, at, Operand(zero_reg)); |
| 884 } |
| 885 |
| 886 // Save sp on the stack. |
| 887 mov(scratch, sp); |
| 888 Push(scratch); |
| 889 b(&end); |
| 890 |
| 891 // Align before saving sp on the stack. |
| 892 bind(&extra_push); |
| 893 mov(scratch, sp); |
| 894 addiu(sp, sp, -8); |
| 895 sw(scratch, MemOperand(sp)); |
| 896 |
| 897 // The stack is aligned and sp is stored on the top. |
| 898 bind(&end); |
| 899 } |
| 900 |
| 901 |
| 902 void MacroAssembler::ReturnFromAlignedCall() { |
| 903 lw(sp, MemOperand(sp)); |
| 904 } |
| 905 |
| 906 |
| 907 // ----------------------------------------------------------------------------- |
| 908 // JavaScript invokes |
| 909 |
| 910 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 911 const ParameterCount& actual, |
| 912 Handle<Code> code_constant, |
| 913 Register code_reg, |
| 914 Label* done, |
| 915 InvokeFlag flag) { |
| 916 bool definitely_matches = false; |
| 917 Label regular_invoke; |
| 918 |
| 919 // Check whether the expected and actual arguments count match. If not, |
| 920 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
| 921 // a0: actual arguments count |
| 922 // a1: function (passed through to callee) |
| 923 // a2: expected arguments count |
| 924 // a3: callee code entry |
| 925 |
| 926 // The code below is made a lot easier because the calling code already sets |
| 927 // up actual and expected registers according to the contract if values are |
| 928 // passed in registers. |
| 929 ASSERT(actual.is_immediate() || actual.reg().is(a0)); |
| 930 ASSERT(expected.is_immediate() || expected.reg().is(a2)); |
| 931 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); |
| 932 |
| 933 if (expected.is_immediate()) { |
| 934 ASSERT(actual.is_immediate()); |
| 935 if (expected.immediate() == actual.immediate()) { |
| 936 definitely_matches = true; |
| 937 } else { |
| 938 li(a0, Operand(actual.immediate())); |
| 939 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
| 940 if (expected.immediate() == sentinel) { |
| 941 // Don't worry about adapting arguments for builtins that |
| 942 // don't want that done. Skip adaption code by making it look |
| 943 // like we have a match between expected and actual number of |
| 944 // arguments. |
| 945 definitely_matches = true; |
| 946 } else { |
| 947 li(a2, Operand(expected.immediate())); |
| 948 } |
| 949 } |
| 950 } else if (actual.is_immediate()) { |
| 951 Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); |
| 952 li(a0, Operand(actual.immediate())); |
| 953 } else { |
| 954 Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); |
| 955 } |
| 956 |
| 957 if (!definitely_matches) { |
| 958 if (!code_constant.is_null()) { |
| 959 li(a3, Operand(code_constant)); |
| 960 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); |
| 961 } |
| 962 |
| 963 ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); |
| 964 if (flag == CALL_FUNCTION) { |
| 965 CallBuiltin(adaptor); |
| 966 b(done); |
| 967 nop(); |
| 968 } else { |
| 969 JumpToBuiltin(adaptor); |
| 970 } |
| 971 bind(®ular_invoke); |
| 972 } |
| 973 } |
| 974 |
| 975 void MacroAssembler::InvokeCode(Register code, |
| 976 const ParameterCount& expected, |
| 977 const ParameterCount& actual, |
| 978 InvokeFlag flag) { |
| 979 Label done; |
| 980 |
| 981 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); |
| 982 if (flag == CALL_FUNCTION) { |
| 983 Call(code); |
| 984 } else { |
| 985 ASSERT(flag == JUMP_FUNCTION); |
| 986 Jump(code); |
| 987 } |
| 988 // Continue here if InvokePrologue does handle the invocation due to |
| 989 // mismatched parameter counts. |
| 990 bind(&done); |
| 991 } |
| 992 |
| 993 |
| 994 void MacroAssembler::InvokeCode(Handle<Code> code, |
| 995 const ParameterCount& expected, |
| 996 const ParameterCount& actual, |
| 997 RelocInfo::Mode rmode, |
| 998 InvokeFlag flag) { |
| 999 Label done; |
| 1000 |
| 1001 InvokePrologue(expected, actual, code, no_reg, &done, flag); |
| 1002 if (flag == CALL_FUNCTION) { |
| 1003 Call(code, rmode); |
| 1004 } else { |
| 1005 Jump(code, rmode); |
| 1006 } |
| 1007 // Continue here if InvokePrologue does handle the invocation due to |
| 1008 // mismatched parameter counts. |
| 1009 bind(&done); |
| 1010 } |
| 1011 |
| 1012 |
| 1013 void MacroAssembler::InvokeFunction(Register function, |
| 1014 const ParameterCount& actual, |
| 1015 InvokeFlag flag) { |
| 1016 // Contract with called JS functions requires that function is passed in a1. |
| 1017 ASSERT(function.is(a1)); |
| 1018 Register expected_reg = a2; |
| 1019 Register code_reg = a3; |
| 1020 |
| 1021 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 1022 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 1023 lw(expected_reg, |
| 1024 FieldMemOperand(code_reg, |
| 1025 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1026 lw(code_reg, |
| 1027 MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); |
| 1028 addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag); |
| 1029 |
| 1030 ParameterCount expected(expected_reg); |
| 1031 InvokeCode(code_reg, expected, actual, flag); |
| 1032 } |
| 1033 |
| 1034 |
815 // --------------------------------------------------------------------------- | 1035 // --------------------------------------------------------------------------- |
816 // Activation frames | 1036 // Support functions. |
| 1037 |
| 1038 void MacroAssembler::GetObjectType(Register function, |
| 1039 Register map, |
| 1040 Register type_reg) { |
| 1041 lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); |
| 1042 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1043 } |
| 1044 |
| 1045 |
| 1046 void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { |
| 1047 // Load builtin address. |
| 1048 LoadExternalReference(t9, builtin_entry); |
| 1049 lw(t9, MemOperand(t9)); // Deref address. |
| 1050 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
| 1051 // Call and allocate arguments slots. |
| 1052 jalr(t9); |
| 1053 // Use the branch delay slot to allocated argument slots. |
| 1054 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 1055 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
| 1056 } |
| 1057 |
| 1058 |
| 1059 void MacroAssembler::CallBuiltin(Register target) { |
| 1060 // Target already holds target address. |
| 1061 // Call and allocate arguments slots. |
| 1062 jalr(target); |
| 1063 // Use the branch delay slot to allocated argument slots. |
| 1064 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 1065 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
| 1066 } |
| 1067 |
| 1068 |
| 1069 void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { |
| 1070 // Load builtin address. |
| 1071 LoadExternalReference(t9, builtin_entry); |
| 1072 lw(t9, MemOperand(t9)); // Deref address. |
| 1073 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
| 1074 // Call and allocate arguments slots. |
| 1075 jr(t9); |
| 1076 // Use the branch delay slot to allocated argument slots. |
| 1077 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 1078 } |
| 1079 |
| 1080 |
| 1081 void MacroAssembler::JumpToBuiltin(Register target) { |
| 1082 // t9 already holds target address. |
| 1083 // Call and allocate arguments slots. |
| 1084 jr(t9); |
| 1085 // Use the branch delay slot to allocated argument slots. |
| 1086 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 1087 } |
| 1088 |
| 1089 |
| 1090 // ----------------------------------------------------------------------------- |
| 1091 // Runtime calls |
817 | 1092 |
818 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, | 1093 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, |
819 Register r1, const Operand& r2) { | 1094 Register r1, const Operand& r2) { |
820 UNIMPLEMENTED_MIPS(); | 1095 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
821 } | 1096 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); |
822 | 1097 } |
823 | 1098 |
| 1099 |
824 void MacroAssembler::StubReturn(int argc) { | 1100 void MacroAssembler::StubReturn(int argc) { |
825 UNIMPLEMENTED_MIPS(); | 1101 UNIMPLEMENTED_MIPS(); |
826 } | 1102 } |
827 | 1103 |
828 | 1104 |
| 1105 void MacroAssembler::IllegalOperation(int num_arguments) { |
| 1106 if (num_arguments > 0) { |
| 1107 addiu(sp, sp, num_arguments * kPointerSize); |
| 1108 } |
| 1109 LoadRoot(v0, Heap::kUndefinedValueRootIndex); |
| 1110 } |
| 1111 |
| 1112 |
829 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { | 1113 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { |
830 UNIMPLEMENTED_MIPS(); | 1114 // All parameters are on the stack. v0 has the return value after call. |
| 1115 |
| 1116 // If the expected number of arguments of the runtime function is |
| 1117 // constant, we check that the actual number of arguments match the |
| 1118 // expectation. |
| 1119 if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 1120 IllegalOperation(num_arguments); |
| 1121 return; |
| 1122 } |
| 1123 |
| 1124 // TODO(1236192): Most runtime routines don't need the number of |
| 1125 // arguments passed in because it is constant. At some point we |
| 1126 // should remove this need and make the runtime routine entry code |
| 1127 // smarter. |
| 1128 li(a0, num_arguments); |
| 1129 LoadExternalReference(a1, ExternalReference(f)); |
| 1130 CEntryStub stub(1); |
| 1131 CallStub(&stub); |
831 } | 1132 } |
832 | 1133 |
833 | 1134 |
834 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { | 1135 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
835 UNIMPLEMENTED_MIPS(); | 1136 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
836 } | 1137 } |
837 | 1138 |
838 | 1139 |
839 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | 1140 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
840 int num_arguments, | 1141 int num_arguments, |
841 int result_size) { | 1142 int result_size) { |
842 UNIMPLEMENTED_MIPS(); | 1143 UNIMPLEMENTED_MIPS(); |
843 } | 1144 } |
844 | 1145 |
845 | 1146 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
884 UNIMPLEMENTED_MIPS(); | 1185 UNIMPLEMENTED_MIPS(); |
885 } | 1186 } |
886 | 1187 |
887 | 1188 |
888 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 1189 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
889 Register scratch1, Register scratch2) { | 1190 Register scratch1, Register scratch2) { |
890 UNIMPLEMENTED_MIPS(); | 1191 UNIMPLEMENTED_MIPS(); |
891 } | 1192 } |
892 | 1193 |
893 | 1194 |
| 1195 // ----------------------------------------------------------------------------- |
| 1196 // Debugging |
894 | 1197 |
895 void MacroAssembler::Assert(Condition cc, const char* msg, | 1198 void MacroAssembler::Assert(Condition cc, const char* msg, |
896 Register rs, Operand rt) { | 1199 Register rs, Operand rt) { |
897 UNIMPLEMENTED_MIPS(); | 1200 UNIMPLEMENTED_MIPS(); |
898 } | 1201 } |
899 | 1202 |
900 | 1203 |
901 void MacroAssembler::Check(Condition cc, const char* msg, | 1204 void MacroAssembler::Check(Condition cc, const char* msg, |
902 Register rs, Operand rt) { | 1205 Register rs, Operand rt) { |
903 UNIMPLEMENTED_MIPS(); | 1206 UNIMPLEMENTED_MIPS(); |
904 } | 1207 } |
905 | 1208 |
906 | 1209 |
907 void MacroAssembler::Abort(const char* msg) { | 1210 void MacroAssembler::Abort(const char* msg) { |
908 UNIMPLEMENTED_MIPS(); | 1211 UNIMPLEMENTED_MIPS(); |
909 } | 1212 } |
910 | 1213 |
| 1214 |
| 1215 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 1216 addiu(sp, sp, -5 * kPointerSize); |
| 1217 li(t0, Operand(Smi::FromInt(type))); |
| 1218 li(t1, Operand(CodeObject())); |
| 1219 sw(ra, MemOperand(sp, 4 * kPointerSize)); |
| 1220 sw(fp, MemOperand(sp, 3 * kPointerSize)); |
| 1221 sw(cp, MemOperand(sp, 2 * kPointerSize)); |
| 1222 sw(t0, MemOperand(sp, 1 * kPointerSize)); |
| 1223 sw(t1, MemOperand(sp, 0 * kPointerSize)); |
| 1224 addiu(fp, sp, 3 * kPointerSize); |
| 1225 } |
| 1226 |
| 1227 |
| 1228 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 1229 mov(sp, fp); |
| 1230 lw(fp, MemOperand(sp, 0 * kPointerSize)); |
| 1231 lw(ra, MemOperand(sp, 1 * kPointerSize)); |
| 1232 addiu(sp, sp, 2 * kPointerSize); |
| 1233 } |
| 1234 |
| 1235 |
| 1236 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, |
| 1237 Register hold_argc, |
| 1238 Register hold_argv, |
| 1239 Register hold_function) { |
| 1240 // Compute the argv pointer and keep it in a callee-saved register. |
| 1241 // a0 is argc. |
| 1242 sll(t0, a0, kPointerSizeLog2); |
| 1243 add(hold_argv, sp, t0); |
| 1244 addi(hold_argv, hold_argv, -kPointerSize); |
| 1245 |
| 1246 // Compute callee's stack pointer before making changes and save it as |
| 1247 // t1 register so that it is restored as sp register on exit, thereby |
| 1248 // popping the args. |
| 1249 // t1 = sp + kPointerSize * #args |
| 1250 add(t1, sp, t0); |
| 1251 |
| 1252 // Align the stack at this point. |
| 1253 AlignStack(0); |
| 1254 |
| 1255 // Save registers. |
| 1256 addiu(sp, sp, -12); |
| 1257 sw(t1, MemOperand(sp, 8)); |
| 1258 sw(ra, MemOperand(sp, 4)); |
| 1259 sw(fp, MemOperand(sp, 0)); |
| 1260 mov(fp, sp); // Setup new frame pointer. |
| 1261 |
| 1262 // Push debug marker. |
| 1263 if (mode == ExitFrame::MODE_DEBUG) { |
| 1264 Push(zero_reg); |
| 1265 } else { |
| 1266 li(t0, Operand(CodeObject())); |
| 1267 Push(t0); |
| 1268 } |
| 1269 |
| 1270 // Save the frame pointer and the context in top. |
| 1271 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); |
| 1272 sw(fp, MemOperand(t0)); |
| 1273 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); |
| 1274 sw(cp, MemOperand(t0)); |
| 1275 |
| 1276 // Setup argc and the builtin function in callee-saved registers. |
| 1277 mov(hold_argc, a0); |
| 1278 mov(hold_function, a1); |
| 1279 } |
| 1280 |
| 1281 |
| 1282 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { |
| 1283 // Clear top frame. |
| 1284 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); |
| 1285 sw(zero_reg, MemOperand(t0)); |
| 1286 |
| 1287 // Restore current context from top and clear it in debug mode. |
| 1288 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); |
| 1289 lw(cp, MemOperand(t0)); |
| 1290 #ifdef DEBUG |
| 1291 sw(a3, MemOperand(t0)); |
| 1292 #endif |
| 1293 |
| 1294 // Pop the arguments, restore registers, and return. |
| 1295 mov(sp, fp); // Respect ABI stack constraint. |
| 1296 lw(fp, MemOperand(sp, 0)); |
| 1297 lw(ra, MemOperand(sp, 4)); |
| 1298 lw(sp, MemOperand(sp, 8)); |
| 1299 jr(ra); |
| 1300 nop(); // Branch delay slot nop. |
| 1301 } |
| 1302 |
| 1303 |
| 1304 void MacroAssembler::AlignStack(int offset) { |
| 1305 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, |
| 1306 // and an offset of 1 aligns to 4 modulo 8 bytes. |
| 1307 int activation_frame_alignment = OS::ActivationFrameAlignment(); |
| 1308 if (activation_frame_alignment != kPointerSize) { |
| 1309 // This code needs to be made more general if this assert doesn't hold. |
| 1310 ASSERT(activation_frame_alignment == 2 * kPointerSize); |
| 1311 if (offset == 0) { |
| 1312 andi(t0, sp, activation_frame_alignment - 1); |
| 1313 Push(zero_reg, eq, t0, zero_reg); |
| 1314 } else { |
| 1315 andi(t0, sp, activation_frame_alignment - 1); |
| 1316 addiu(t0, t0, -4); |
| 1317 Push(zero_reg, eq, t0, zero_reg); |
| 1318 } |
| 1319 } |
| 1320 } |
| 1321 |
911 } } // namespace v8::internal | 1322 } } // namespace v8::internal |
912 | 1323 |
OLD | NEW |