OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
229 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, | 229 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, |
230 OffsetAddend offset_addend) | 230 OffsetAddend offset_addend) |
231 : Operand(rm) { | 231 : Operand(rm) { |
232 offset_ = unit * multiplier + offset_addend; | 232 offset_ = unit * multiplier + offset_addend; |
233 } | 233 } |
234 | 234 |
235 | 235 |
236 // ----------------------------------------------------------------------------- | 236 // ----------------------------------------------------------------------------- |
237 // Specific instructions, constants, and masks. | 237 // Specific instructions, constants, and masks. |
238 | 238 |
239 static const int kNegOffset = 0x00008000; | |
240 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r) | 239 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r) |
241 // operations as post-increment of sp. | 240 // operations as post-increment of sp. |
242 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) | | 241 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) | |
243 (Register::kCode_sp << kRtShift) | | 242 (Register::kCode_sp << kRtShift) | |
244 (kPointerSize & kImm16Mask); // NOLINT | 243 (kPointerSize & kImm16Mask); // NOLINT |
245 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. | 244 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. |
246 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) | | 245 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) | |
247 (Register::kCode_sp << kRtShift) | | 246 (Register::kCode_sp << kRtShift) | |
248 (-kPointerSize & kImm16Mask); // NOLINT | 247 (-kPointerSize & kImm16Mask); // NOLINT |
249 // sd(r, MemOperand(sp, 0)) | 248 // Sd(r, MemOperand(sp, 0)) |
250 const Instr kPushRegPattern = | 249 const Instr kPushRegPattern = |
251 SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT | 250 SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT |
252 // ld(r, MemOperand(sp, 0)) | 251 // Ld(r, MemOperand(sp, 0)) |
253 const Instr kPopRegPattern = | 252 const Instr kPopRegPattern = |
254 LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT | 253 LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT |
255 | 254 |
256 const Instr kLwRegFpOffsetPattern = | 255 const Instr kLwRegFpOffsetPattern = |
257 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT | 256 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT |
258 | 257 |
259 const Instr kSwRegFpOffsetPattern = | 258 const Instr kSwRegFpOffsetPattern = |
260 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT | 259 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT |
261 | 260 |
262 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) | | 261 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) | |
(...skipping 1820 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2083 | 2082 |
2084 daui(at, src.rm(), hi); | 2083 daui(at, src.rm(), hi); |
2085 daddiu(at, at, src.offset_ & kImm16Mask); | 2084 daddiu(at, at, src.offset_ & kImm16Mask); |
2086 } else { | 2085 } else { |
2087 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); | 2086 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); |
2088 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. | 2087 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. |
2089 daddu(at, at, src.rm()); // Add base register. | 2088 daddu(at, at, src.rm()); // Add base register. |
2090 } | 2089 } |
2091 } | 2090 } |
2092 | 2091 |
2093 // Helper for base-reg + upper part of offset, when offset is larger than int16. | |
2094 // Loads higher part of the offset to AT register. | |
2095 // Returns lower part of the offset to be used as offset | |
2096 // in Load/Store instructions | |
2097 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) { | |
2098 DCHECK(!src.rm().is(at)); | |
2099 DCHECK(is_int32(src.offset_)); | |
2100 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask; | |
2101 // If the highest bit of the lower part of the offset is 1, this would make | |
2102 // the offset in the load/store instruction negative. We need to compensate | |
2103 // for this by adding 1 to the upper part of the offset. | |
2104 if (src.offset_ & kNegOffset) { | |
2105 if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) { | |
2106 LoadRegPlusOffsetToAt(src); | |
2107 return 0; | |
2108 } | |
2109 | |
2110 hi += 1; | |
2111 } | |
2112 | |
2113 if (kArchVariant == kMips64r6) { | |
2114 daui(at, src.rm(), hi); | |
2115 } else { | |
2116 lui(at, hi); | |
2117 daddu(at, at, src.rm()); | |
2118 } | |
2119 return (src.offset_ & kImm16Mask); | |
2120 } | |
2121 | |
2122 void Assembler::lb(Register rd, const MemOperand& rs) { | 2092 void Assembler::lb(Register rd, const MemOperand& rs) { |
2123 if (is_int16(rs.offset_)) { | 2093 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); |
2124 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); | |
2125 } else { // Offset > 16 bits, use multiple instructions to load. | |
2126 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2127 GenInstrImmediate(LB, at, rd, off16); | |
2128 } | |
2129 } | 2094 } |
2130 | 2095 |
2131 | 2096 |
2132 void Assembler::lbu(Register rd, const MemOperand& rs) { | 2097 void Assembler::lbu(Register rd, const MemOperand& rs) { |
2133 if (is_int16(rs.offset_)) { | 2098 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); |
2134 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); | |
2135 } else { // Offset > 16 bits, use multiple instructions to load. | |
2136 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2137 GenInstrImmediate(LBU, at, rd, off16); | |
2138 } | |
2139 } | 2099 } |
2140 | 2100 |
2141 | 2101 |
2142 void Assembler::lh(Register rd, const MemOperand& rs) { | 2102 void Assembler::lh(Register rd, const MemOperand& rs) { |
2143 if (is_int16(rs.offset_)) { | 2103 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); |
2144 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); | |
2145 } else { // Offset > 16 bits, use multiple instructions to load. | |
2146 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2147 GenInstrImmediate(LH, at, rd, off16); | |
2148 } | |
2149 } | 2104 } |
2150 | 2105 |
2151 | 2106 |
2152 void Assembler::lhu(Register rd, const MemOperand& rs) { | 2107 void Assembler::lhu(Register rd, const MemOperand& rs) { |
2153 if (is_int16(rs.offset_)) { | 2108 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); |
2154 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); | |
2155 } else { // Offset > 16 bits, use multiple instructions to load. | |
2156 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2157 GenInstrImmediate(LHU, at, rd, off16); | |
2158 } | |
2159 } | 2109 } |
2160 | 2110 |
2161 | 2111 |
2162 void Assembler::lw(Register rd, const MemOperand& rs) { | 2112 void Assembler::lw(Register rd, const MemOperand& rs) { |
2163 if (is_int16(rs.offset_)) { | 2113 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); |
2164 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); | |
2165 } else { // Offset > 16 bits, use multiple instructions to load. | |
2166 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2167 GenInstrImmediate(LW, at, rd, off16); | |
2168 } | |
2169 } | 2114 } |
2170 | 2115 |
2171 | 2116 |
2172 void Assembler::lwu(Register rd, const MemOperand& rs) { | 2117 void Assembler::lwu(Register rd, const MemOperand& rs) { |
2173 if (is_int16(rs.offset_)) { | 2118 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_); |
2174 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_); | |
2175 } else { // Offset > 16 bits, use multiple instructions to load. | |
2176 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2177 GenInstrImmediate(LWU, at, rd, off16); | |
2178 } | |
2179 } | 2119 } |
2180 | 2120 |
2181 | 2121 |
2182 void Assembler::lwl(Register rd, const MemOperand& rs) { | 2122 void Assembler::lwl(Register rd, const MemOperand& rs) { |
2183 DCHECK(is_int16(rs.offset_)); | 2123 DCHECK(is_int16(rs.offset_)); |
2184 DCHECK(kArchVariant == kMips64r2); | 2124 DCHECK(kArchVariant == kMips64r2); |
2185 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); | 2125 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); |
2186 } | 2126 } |
2187 | 2127 |
2188 | 2128 |
2189 void Assembler::lwr(Register rd, const MemOperand& rs) { | 2129 void Assembler::lwr(Register rd, const MemOperand& rs) { |
2190 DCHECK(is_int16(rs.offset_)); | 2130 DCHECK(is_int16(rs.offset_)); |
2191 DCHECK(kArchVariant == kMips64r2); | 2131 DCHECK(kArchVariant == kMips64r2); |
2192 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); | 2132 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); |
2193 } | 2133 } |
2194 | 2134 |
2195 | 2135 |
2196 void Assembler::sb(Register rd, const MemOperand& rs) { | 2136 void Assembler::sb(Register rd, const MemOperand& rs) { |
2197 if (is_int16(rs.offset_)) { | 2137 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); |
2198 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); | |
2199 } else { // Offset > 16 bits, use multiple instructions to store. | |
2200 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2201 GenInstrImmediate(SB, at, rd, off16); | |
2202 } | |
2203 } | 2138 } |
2204 | 2139 |
2205 | 2140 |
2206 void Assembler::sh(Register rd, const MemOperand& rs) { | 2141 void Assembler::sh(Register rd, const MemOperand& rs) { |
2207 if (is_int16(rs.offset_)) { | 2142 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); |
2208 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); | |
2209 } else { // Offset > 16 bits, use multiple instructions to store. | |
2210 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2211 GenInstrImmediate(SH, at, rd, off16); | |
2212 } | |
2213 } | 2143 } |
2214 | 2144 |
2215 | 2145 |
2216 void Assembler::sw(Register rd, const MemOperand& rs) { | 2146 void Assembler::sw(Register rd, const MemOperand& rs) { |
2217 if (is_int16(rs.offset_)) { | 2147 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); |
2218 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); | |
2219 } else { // Offset > 16 bits, use multiple instructions to store. | |
2220 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2221 GenInstrImmediate(SW, at, rd, off16); | |
2222 } | |
2223 } | 2148 } |
2224 | 2149 |
2225 | 2150 |
2226 void Assembler::swl(Register rd, const MemOperand& rs) { | 2151 void Assembler::swl(Register rd, const MemOperand& rs) { |
2227 DCHECK(is_int16(rs.offset_)); | 2152 DCHECK(is_int16(rs.offset_)); |
2228 DCHECK(kArchVariant == kMips64r2); | 2153 DCHECK(kArchVariant == kMips64r2); |
2229 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); | 2154 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
2230 } | 2155 } |
2231 | 2156 |
2232 | 2157 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2292 | 2217 |
2293 | 2218 |
2294 void Assembler::sdr(Register rd, const MemOperand& rs) { | 2219 void Assembler::sdr(Register rd, const MemOperand& rs) { |
2295 DCHECK(is_int16(rs.offset_)); | 2220 DCHECK(is_int16(rs.offset_)); |
2296 DCHECK(kArchVariant == kMips64r2); | 2221 DCHECK(kArchVariant == kMips64r2); |
2297 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); | 2222 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); |
2298 } | 2223 } |
2299 | 2224 |
2300 | 2225 |
2301 void Assembler::ld(Register rd, const MemOperand& rs) { | 2226 void Assembler::ld(Register rd, const MemOperand& rs) { |
2302 if (is_int16(rs.offset_)) { | 2227 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_); |
2303 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_); | |
2304 } else { // Offset > 16 bits, use multiple instructions to load. | |
2305 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2306 GenInstrImmediate(LD, at, rd, off16); | |
2307 } | |
2308 } | 2228 } |
2309 | 2229 |
2310 | 2230 |
2311 void Assembler::sd(Register rd, const MemOperand& rs) { | 2231 void Assembler::sd(Register rd, const MemOperand& rs) { |
2312 if (is_int16(rs.offset_)) { | 2232 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_); |
2313 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_); | |
2314 } else { // Offset > 16 bits, use multiple instructions to store. | |
2315 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); | |
2316 GenInstrImmediate(SD, at, rd, off16); | |
2317 } | |
2318 } | 2233 } |
2319 | 2234 |
2320 | 2235 |
2321 // ---------PC-Relative instructions----------- | 2236 // ---------PC-Relative instructions----------- |
2322 | 2237 |
2323 void Assembler::addiupc(Register rs, int32_t imm19) { | 2238 void Assembler::addiupc(Register rs, int32_t imm19) { |
2324 DCHECK(kArchVariant == kMips64r6); | 2239 DCHECK(kArchVariant == kMips64r6); |
2325 DCHECK(rs.is_valid() && is_int19(imm19)); | 2240 DCHECK(rs.is_valid() && is_int19(imm19)); |
2326 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); | 2241 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); |
2327 GenInstrImmediate(PCREL, rs, imm21); | 2242 GenInstrImmediate(PCREL, rs, imm21); |
(...skipping 377 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2705 | 2620 |
2706 void Assembler::seb(Register rd, Register rt) { | 2621 void Assembler::seb(Register rd, Register rt) { |
2707 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); | 2622 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6); |
2708 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); | 2623 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); |
2709 } | 2624 } |
2710 | 2625 |
2711 // --------Coprocessor-instructions---------------- | 2626 // --------Coprocessor-instructions---------------- |
2712 | 2627 |
2713 // Load, store, move. | 2628 // Load, store, move. |
2714 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 2629 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
2715 if (is_int16(src.offset_)) { | 2630 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); |
2716 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | |
2717 } else { // Offset > 16 bits, use multiple instructions to load. | |
2718 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); | |
2719 GenInstrImmediate(LWC1, at, fd, off16); | |
2720 } | |
2721 } | 2631 } |
2722 | 2632 |
2723 | 2633 |
2724 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 2634 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
2725 if (is_int16(src.offset_)) { | 2635 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); |
2726 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); | 2636 } |
2727 } else { // Offset > 16 bits, use multiple instructions to load. | 2637 |
2728 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); | 2638 void Assembler::swc1(FPURegister fs, const MemOperand& src) { |
2729 GenInstrImmediate(LDC1, at, fd, off16); | 2639 GenInstrImmediate(SWC1, src.rm(), fs, src.offset_); |
2730 } | 2640 } |
| 2641 |
| 2642 void Assembler::sdc1(FPURegister fs, const MemOperand& src) { |
| 2643 GenInstrImmediate(SDC1, src.rm(), fs, src.offset_); |
2731 } | 2644 } |
2732 | 2645 |
2733 | 2646 |
2734 void Assembler::swc1(FPURegister fd, const MemOperand& src) { | |
2735 if (is_int16(src.offset_)) { | |
2736 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | |
2737 } else { // Offset > 16 bits, use multiple instructions to load. | |
2738 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); | |
2739 GenInstrImmediate(SWC1, at, fd, off16); | |
2740 } | |
2741 } | |
2742 | |
2743 | |
2744 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | |
2745 DCHECK(!src.rm().is(at)); | |
2746 if (is_int16(src.offset_)) { | |
2747 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); | |
2748 } else { // Offset > 16 bits, use multiple instructions to load. | |
2749 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); | |
2750 GenInstrImmediate(SDC1, at, fd, off16); | |
2751 } | |
2752 } | |
2753 | |
2754 | |
2755 void Assembler::mtc1(Register rt, FPURegister fs) { | 2647 void Assembler::mtc1(Register rt, FPURegister fs) { |
2756 GenInstrRegister(COP1, MTC1, rt, fs, f0); | 2648 GenInstrRegister(COP1, MTC1, rt, fs, f0); |
2757 } | 2649 } |
2758 | 2650 |
2759 | 2651 |
2760 void Assembler::mthc1(Register rt, FPURegister fs) { | 2652 void Assembler::mthc1(Register rt, FPURegister fs) { |
2761 GenInstrRegister(COP1, MTHC1, rt, fs, f0); | 2653 GenInstrRegister(COP1, MTHC1, rt, fs, f0); |
2762 } | 2654 } |
2763 | 2655 |
2764 | 2656 |
(...skipping 1389 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4154 | 4046 |
4155 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { | 4047 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
4156 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize); | 4048 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize); |
4157 } | 4049 } |
4158 } | 4050 } |
4159 | 4051 |
4160 } // namespace internal | 4052 } // namespace internal |
4161 } // namespace v8 | 4053 } // namespace v8 |
4162 | 4054 |
4163 #endif // V8_TARGET_ARCH_MIPS64 | 4055 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |