| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 57 // generation even when generating snapshots. This won't work for cross | 57 // generation even when generating snapshots. This won't work for cross |
| 58 // compilation. | 58 // compilation. |
| 59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 | 59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 |
| 60 answer |= 1u << FPU; | 60 answer |= 1u << FPU; |
| 61 #endif | 61 #endif |
| 62 | 62 |
| 63 return answer; | 63 return answer; |
| 64 } | 64 } |
| 65 | 65 |
| 66 | 66 |
| 67 const char* DoubleRegister::AllocationIndexToString(int index) { | |
| 68 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); | |
| 69 const char* const names[] = { | |
| 70 "f0", | |
| 71 "f2", | |
| 72 "f4", | |
| 73 "f6", | |
| 74 "f8", | |
| 75 "f10", | |
| 76 "f12", | |
| 77 "f14", | |
| 78 "f16", | |
| 79 "f18", | |
| 80 "f20", | |
| 81 "f22", | |
| 82 "f24", | |
| 83 "f26" | |
| 84 }; | |
| 85 return names[index]; | |
| 86 } | |
| 87 | |
| 88 | |
| 89 void CpuFeatures::ProbeImpl(bool cross_compile) { | 67 void CpuFeatures::ProbeImpl(bool cross_compile) { |
| 90 supported_ |= CpuFeaturesImpliedByCompiler(); | 68 supported_ |= CpuFeaturesImpliedByCompiler(); |
| 91 | 69 |
| 92 // Only use statically determined features for cross compile (snapshot). | 70 // Only use statically determined features for cross compile (snapshot). |
| 93 if (cross_compile) return; | 71 if (cross_compile) return; |
| 94 | 72 |
| 95 // If the compiler is allowed to use fpu then we can use fpu too in our | 73 // If the compiler is allowed to use fpu then we can use fpu too in our |
| 96 // code generation. | 74 // code generation. |
| 97 #ifndef __mips__ | 75 #ifndef __mips__ |
| 98 // For the simulator build, use FPU. | 76 // For the simulator build, use FPU. |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 offset_ = unit * multiplier + offset_addend; | 200 offset_ = unit * multiplier + offset_addend; |
| 223 } | 201 } |
| 224 | 202 |
| 225 | 203 |
| 226 // ----------------------------------------------------------------------------- | 204 // ----------------------------------------------------------------------------- |
| 227 // Specific instructions, constants, and masks. | 205 // Specific instructions, constants, and masks. |
| 228 | 206 |
| 229 static const int kNegOffset = 0x00008000; | 207 static const int kNegOffset = 0x00008000; |
| 230 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r) | 208 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r) |
| 231 // operations as post-increment of sp. | 209 // operations as post-increment of sp. |
| 232 const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift) | 210 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) | |
| 233 | (kRegister_sp_Code << kRtShift) | 211 (Register::kCode_sp << kRtShift) | |
| 234 | (kPointerSize & kImm16Mask); // NOLINT | 212 (kPointerSize & kImm16Mask); // NOLINT |
| 235 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. | 213 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. |
| 236 const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift) | 214 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) | |
| 237 | (kRegister_sp_Code << kRtShift) | 215 (Register::kCode_sp << kRtShift) | |
| 238 | (-kPointerSize & kImm16Mask); // NOLINT | 216 (-kPointerSize & kImm16Mask); // NOLINT |
| 239 // sd(r, MemOperand(sp, 0)) | 217 // sd(r, MemOperand(sp, 0)) |
| 240 const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift) | 218 const Instr kPushRegPattern = |
| 241 | (0 & kImm16Mask); // NOLINT | 219 SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT |
| 242 // ld(r, MemOperand(sp, 0)) | 220 // ld(r, MemOperand(sp, 0)) |
| 243 const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift) | 221 const Instr kPopRegPattern = |
| 244 | (0 & kImm16Mask); // NOLINT | 222 LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT |
| 245 | 223 |
| 246 const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift) | 224 const Instr kLwRegFpOffsetPattern = |
| 247 | (0 & kImm16Mask); // NOLINT | 225 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT |
| 248 | 226 |
| 249 const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift) | 227 const Instr kSwRegFpOffsetPattern = |
| 250 | (0 & kImm16Mask); // NOLINT | 228 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT |
| 251 | 229 |
| 252 const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift) | 230 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) | |
| 253 | (kNegOffset & kImm16Mask); // NOLINT | 231 (kNegOffset & kImm16Mask); // NOLINT |
| 254 | 232 |
| 255 const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift) | 233 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) | |
| 256 | (kNegOffset & kImm16Mask); // NOLINT | 234 (kNegOffset & kImm16Mask); // NOLINT |
| 257 // A mask for the Rt register for push, pop, lw, sw instructions. | 235 // A mask for the Rt register for push, pop, lw, sw instructions. |
| 258 const Instr kRtMask = kRtFieldMask; | 236 const Instr kRtMask = kRtFieldMask; |
| 259 const Instr kLwSwInstrTypeMask = 0xffe00000; | 237 const Instr kLwSwInstrTypeMask = 0xffe00000; |
| 260 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; | 238 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; |
| 261 const Instr kLwSwOffsetMask = kImm16Mask; | 239 const Instr kLwSwOffsetMask = kImm16Mask; |
| 262 | 240 |
| 263 | 241 |
| 264 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | 242 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 265 : AssemblerBase(isolate, buffer, buffer_size), | 243 : AssemblerBase(isolate, buffer, buffer_size), |
| 266 recorded_ast_id_(TypeFeedbackId::None()), | 244 recorded_ast_id_(TypeFeedbackId::None()), |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 307 | 285 |
| 308 void Assembler::CodeTargetAlign() { | 286 void Assembler::CodeTargetAlign() { |
| 309 // No advantage to aligning branch/call targets to more than | 287 // No advantage to aligning branch/call targets to more than |
| 310 // single instruction, that I am aware of. | 288 // single instruction, that I am aware of. |
| 311 Align(4); | 289 Align(4); |
| 312 } | 290 } |
| 313 | 291 |
| 314 | 292 |
| 315 Register Assembler::GetRtReg(Instr instr) { | 293 Register Assembler::GetRtReg(Instr instr) { |
| 316 Register rt; | 294 Register rt; |
| 317 rt.code_ = (instr & kRtFieldMask) >> kRtShift; | 295 rt.reg_code = (instr & kRtFieldMask) >> kRtShift; |
| 318 return rt; | 296 return rt; |
| 319 } | 297 } |
| 320 | 298 |
| 321 | 299 |
| 322 Register Assembler::GetRsReg(Instr instr) { | 300 Register Assembler::GetRsReg(Instr instr) { |
| 323 Register rs; | 301 Register rs; |
| 324 rs.code_ = (instr & kRsFieldMask) >> kRsShift; | 302 rs.reg_code = (instr & kRsFieldMask) >> kRsShift; |
| 325 return rs; | 303 return rs; |
| 326 } | 304 } |
| 327 | 305 |
| 328 | 306 |
| 329 Register Assembler::GetRdReg(Instr instr) { | 307 Register Assembler::GetRdReg(Instr instr) { |
| 330 Register rd; | 308 Register rd; |
| 331 rd.code_ = (instr & kRdFieldMask) >> kRdShift; | 309 rd.reg_code = (instr & kRdFieldMask) >> kRdShift; |
| 332 return rd; | 310 return rd; |
| 333 } | 311 } |
| 334 | 312 |
| 335 | 313 |
| 336 uint32_t Assembler::GetRt(Instr instr) { | 314 uint32_t Assembler::GetRt(Instr instr) { |
| 337 return (instr & kRtFieldMask) >> kRtShift; | 315 return (instr & kRtFieldMask) >> kRtShift; |
| 338 } | 316 } |
| 339 | 317 |
| 340 | 318 |
| 341 uint32_t Assembler::GetRtField(Instr instr) { | 319 uint32_t Assembler::GetRtField(Instr instr) { |
| (...skipping 1862 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2204 } | 2182 } |
| 2205 | 2183 |
| 2206 | 2184 |
| 2207 void Assembler::movn(Register rd, Register rs, Register rt) { | 2185 void Assembler::movn(Register rd, Register rs, Register rt) { |
| 2208 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); | 2186 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); |
| 2209 } | 2187 } |
| 2210 | 2188 |
| 2211 | 2189 |
| 2212 void Assembler::movt(Register rd, Register rs, uint16_t cc) { | 2190 void Assembler::movt(Register rd, Register rs, uint16_t cc) { |
| 2213 Register rt; | 2191 Register rt; |
| 2214 rt.code_ = (cc & 0x0007) << 2 | 1; | 2192 rt.reg_code = (cc & 0x0007) << 2 | 1; |
| 2215 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); | 2193 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
| 2216 } | 2194 } |
| 2217 | 2195 |
| 2218 | 2196 |
| 2219 void Assembler::movf(Register rd, Register rs, uint16_t cc) { | 2197 void Assembler::movf(Register rd, Register rs, uint16_t cc) { |
| 2220 Register rt; | 2198 Register rt; |
| 2221 rt.code_ = (cc & 0x0007) << 2 | 0; | 2199 rt.reg_code = (cc & 0x0007) << 2 | 0; |
| 2222 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); | 2200 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
| 2223 } | 2201 } |
| 2224 | 2202 |
| 2225 | 2203 |
| 2226 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) { | 2204 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) { |
| 2227 min(S, fd, fs, ft); | 2205 min(S, fd, fs, ft); |
| 2228 } | 2206 } |
| 2229 | 2207 |
| 2230 | 2208 |
| 2231 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) { | 2209 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
| (...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2513 | 2491 |
| 2514 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) { | 2492 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) { |
| 2515 DCHECK(kArchVariant == kMips64r2); | 2493 DCHECK(kArchVariant == kMips64r2); |
| 2516 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C); | 2494 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C); |
| 2517 } | 2495 } |
| 2518 | 2496 |
| 2519 | 2497 |
| 2520 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { | 2498 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { |
| 2521 DCHECK(kArchVariant == kMips64r2); | 2499 DCHECK(kArchVariant == kMips64r2); |
| 2522 FPURegister ft; | 2500 FPURegister ft; |
| 2523 ft.code_ = (cc & 0x0007) << 2 | 1; | 2501 ft.reg_code = (cc & 0x0007) << 2 | 1; |
| 2524 GenInstrRegister(COP1, S, ft, fs, fd, MOVF); | 2502 GenInstrRegister(COP1, S, ft, fs, fd, MOVF); |
| 2525 } | 2503 } |
| 2526 | 2504 |
| 2527 | 2505 |
| 2528 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { | 2506 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { |
| 2529 DCHECK(kArchVariant == kMips64r2); | 2507 DCHECK(kArchVariant == kMips64r2); |
| 2530 FPURegister ft; | 2508 FPURegister ft; |
| 2531 ft.code_ = (cc & 0x0007) << 2 | 1; | 2509 ft.reg_code = (cc & 0x0007) << 2 | 1; |
| 2532 GenInstrRegister(COP1, D, ft, fs, fd, MOVF); | 2510 GenInstrRegister(COP1, D, ft, fs, fd, MOVF); |
| 2533 } | 2511 } |
| 2534 | 2512 |
| 2535 | 2513 |
| 2536 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { | 2514 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { |
| 2537 DCHECK(kArchVariant == kMips64r2); | 2515 DCHECK(kArchVariant == kMips64r2); |
| 2538 FPURegister ft; | 2516 FPURegister ft; |
| 2539 ft.code_ = (cc & 0x0007) << 2 | 0; | 2517 ft.reg_code = (cc & 0x0007) << 2 | 0; |
| 2540 GenInstrRegister(COP1, S, ft, fs, fd, MOVF); | 2518 GenInstrRegister(COP1, S, ft, fs, fd, MOVF); |
| 2541 } | 2519 } |
| 2542 | 2520 |
| 2543 | 2521 |
| 2544 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) { | 2522 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) { |
| 2545 DCHECK(kArchVariant == kMips64r2); | 2523 DCHECK(kArchVariant == kMips64r2); |
| 2546 FPURegister ft; | 2524 FPURegister ft; |
| 2547 ft.code_ = (cc & 0x0007) << 2 | 0; | 2525 ft.reg_code = (cc & 0x0007) << 2 | 0; |
| 2548 GenInstrRegister(COP1, D, ft, fs, fd, MOVF); | 2526 GenInstrRegister(COP1, D, ft, fs, fd, MOVF); |
| 2549 } | 2527 } |
| 2550 | 2528 |
| 2551 | 2529 |
| 2552 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) { | 2530 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) { |
| 2553 DCHECK(kArchVariant == kMips64r2); | 2531 DCHECK(kArchVariant == kMips64r2); |
| 2554 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C); | 2532 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C); |
| 2555 } | 2533 } |
| 2556 | 2534 |
| 2557 | 2535 |
| (...skipping 718 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3276 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { | 3254 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
| 3277 CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize); | 3255 CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize); |
| 3278 } | 3256 } |
| 3279 } | 3257 } |
| 3280 | 3258 |
| 3281 | 3259 |
| 3282 } // namespace internal | 3260 } // namespace internal |
| 3283 } // namespace v8 | 3261 } // namespace v8 |
| 3284 | 3262 |
| 3285 #endif // V8_TARGET_ARCH_MIPS64 | 3263 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |