OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 750 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
761 srl(rd, rs, 0); | 761 srl(rd, rs, 0); |
762 } else { | 762 } else { |
763 srl(at, rs, rt.imm32_); | 763 srl(at, rs, rt.imm32_); |
764 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); | 764 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); |
765 or_(rd, rd, at); | 765 or_(rd, rd, at); |
766 } | 766 } |
767 } | 767 } |
768 } | 768 } |
769 } | 769 } |
770 | 770 |
| 771 |
771 //------------Pseudo-instructions------------- | 772 //------------Pseudo-instructions------------- |
772 | 773 |
773 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { | 774 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { |
774 ASSERT(!j.is_reg()); | 775 ASSERT(!j.is_reg()); |
775 BlockTrampolinePoolScope block_trampoline_pool(this); | 776 BlockTrampolinePoolScope block_trampoline_pool(this); |
776 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { | 777 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { |
777 // Normal load of an immediate value which does not need Relocation Info. | 778 // Normal load of an immediate value which does not need Relocation Info. |
778 if (is_int16(j.imm32_)) { | 779 if (is_int16(j.imm32_)) { |
779 addiu(rd, zero_reg, j.imm32_); | 780 addiu(rd, zero_reg, j.imm32_); |
780 } else if (!(j.imm32_ & kHiMask)) { | 781 } else if (!(j.imm32_ & kHiMask)) { |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1014 } | 1015 } |
1015 | 1016 |
1016 | 1017 |
1017 void MacroAssembler::Trunc_uw_d(FPURegister fd, | 1018 void MacroAssembler::Trunc_uw_d(FPURegister fd, |
1018 FPURegister fs, | 1019 FPURegister fs, |
1019 FPURegister scratch) { | 1020 FPURegister scratch) { |
1020 Trunc_uw_d(fs, t8, scratch); | 1021 Trunc_uw_d(fs, t8, scratch); |
1021 mtc1(t8, fd); | 1022 mtc1(t8, fd); |
1022 } | 1023 } |
1023 | 1024 |
| 1025 |
1024 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { | 1026 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { |
1025 if (kArchVariant == kLoongson && fd.is(fs)) { | 1027 if (kArchVariant == kLoongson && fd.is(fs)) { |
1026 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1028 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
1027 trunc_w_d(fd, fs); | 1029 trunc_w_d(fd, fs); |
1028 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1030 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
1029 } else { | 1031 } else { |
1030 trunc_w_d(fd, fs); | 1032 trunc_w_d(fd, fs); |
1031 } | 1033 } |
1032 } | 1034 } |
1033 | 1035 |
| 1036 |
1034 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { | 1037 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { |
1035 if (kArchVariant == kLoongson && fd.is(fs)) { | 1038 if (kArchVariant == kLoongson && fd.is(fs)) { |
1036 mfc1(t8, FPURegister::from_code(fs.code() + 1)); | 1039 mfc1(t8, FPURegister::from_code(fs.code() + 1)); |
1037 round_w_d(fd, fs); | 1040 round_w_d(fd, fs); |
1038 mtc1(t8, FPURegister::from_code(fs.code() + 1)); | 1041 mtc1(t8, FPURegister::from_code(fs.code() + 1)); |
1039 } else { | 1042 } else { |
1040 round_w_d(fd, fs); | 1043 round_w_d(fd, fs); |
1041 } | 1044 } |
1042 } | 1045 } |
1043 | 1046 |
(...skipping 1588 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2632 lui(at, (imm32 & kHiMask) >> kLuiShift); | 2635 lui(at, (imm32 & kHiMask) >> kLuiShift); |
2633 ori(at, at, (imm32 & kImm16Mask)); | 2636 ori(at, at, (imm32 & kImm16Mask)); |
2634 } | 2637 } |
2635 jalr(at); | 2638 jalr(at); |
2636 | 2639 |
2637 // Emit a nop in the branch delay slot if required. | 2640 // Emit a nop in the branch delay slot if required. |
2638 if (bdslot == PROTECT) | 2641 if (bdslot == PROTECT) |
2639 nop(); | 2642 nop(); |
2640 } | 2643 } |
2641 | 2644 |
| 2645 |
2642 void MacroAssembler::DropAndRet(int drop) { | 2646 void MacroAssembler::DropAndRet(int drop) { |
2643 Ret(USE_DELAY_SLOT); | 2647 Ret(USE_DELAY_SLOT); |
2644 addiu(sp, sp, drop * kPointerSize); | 2648 addiu(sp, sp, drop * kPointerSize); |
2645 } | 2649 } |
2646 | 2650 |
2647 void MacroAssembler::DropAndRet(int drop, | 2651 void MacroAssembler::DropAndRet(int drop, |
2648 Condition cond, | 2652 Condition cond, |
2649 Register r1, | 2653 Register r1, |
2650 const Operand& r2) { | 2654 const Operand& r2) { |
2651 // Both Drop and Ret need to be conditional. | 2655 // Both Drop and Ret need to be conditional. |
(...skipping 2905 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5557 opcode == BGTZL); | 5561 opcode == BGTZL); |
5558 opcode = (cond == eq) ? BEQ : BNE; | 5562 opcode = (cond == eq) ? BEQ : BNE; |
5559 instr = (instr & ~kOpcodeMask) | opcode; | 5563 instr = (instr & ~kOpcodeMask) | opcode; |
5560 masm_.emit(instr); | 5564 masm_.emit(instr); |
5561 } | 5565 } |
5562 | 5566 |
5563 | 5567 |
5564 } } // namespace v8::internal | 5568 } } // namespace v8::internal |
5565 | 5569 |
5566 #endif // V8_TARGET_ARCH_MIPS | 5570 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |