Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(552)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 1534183002: MIPS64: r6 compact branch optimization. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebasing master to include the new changes Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 1837 matching lines...) Expand 10 before | Expand all | Expand 10 after
1848 DCHECK(nan || target); 1848 DCHECK(nan || target);
1849 // Check for unordered (NaN) cases. 1849 // Check for unordered (NaN) cases.
1850 if (nan) { 1850 if (nan) {
1851 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted(); 1851 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
1852 if (kArchVariant != kMips64r6) { 1852 if (kArchVariant != kMips64r6) {
1853 if (long_branch) { 1853 if (long_branch) {
1854 Label skip; 1854 Label skip;
1855 c(UN, sizeField, cmp1, cmp2); 1855 c(UN, sizeField, cmp1, cmp2);
1856 bc1f(&skip); 1856 bc1f(&skip);
1857 nop(); 1857 nop();
1858 J(nan, bd); 1858 BranchLong(nan, bd);
1859 bind(&skip); 1859 bind(&skip);
1860 } else { 1860 } else {
1861 c(UN, sizeField, cmp1, cmp2); 1861 c(UN, sizeField, cmp1, cmp2);
1862 bc1t(nan); 1862 bc1t(nan);
1863 if (bd == PROTECT) { 1863 if (bd == PROTECT) {
1864 nop(); 1864 nop();
1865 } 1865 }
1866 } 1866 }
1867 } else { 1867 } else {
1868 // Use kDoubleCompareReg for comparison result. It has to be unavailable 1868 // Use kDoubleCompareReg for comparison result. It has to be unavailable
1869 // to lithium 1869 // to lithium
1870 // register allocator. 1870 // register allocator.
1871 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg)); 1871 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
1872 if (long_branch) { 1872 if (long_branch) {
1873 Label skip; 1873 Label skip;
1874 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); 1874 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1875 bc1eqz(&skip, kDoubleCompareReg); 1875 bc1eqz(&skip, kDoubleCompareReg);
1876 nop(); 1876 nop();
1877 J(nan, bd); 1877 BranchLong(nan, bd);
1878 bind(&skip); 1878 bind(&skip);
1879 } else { 1879 } else {
1880 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); 1880 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
1881 bc1nez(nan, kDoubleCompareReg); 1881 bc1nez(nan, kDoubleCompareReg);
1882 if (bd == PROTECT) { 1882 if (bd == PROTECT) {
1883 nop(); 1883 nop();
1884 } 1884 }
1885 } 1885 }
1886 } 1886 }
1887 } 1887 }
1888 1888
1889 if (target) { 1889 if (target) {
1890 bool long_branch = 1890 bool long_branch =
1891 target->is_bound() ? is_near(target) : is_trampoline_emitted(); 1891 target->is_bound() ? is_near(target) : is_trampoline_emitted();
1892 if (long_branch) { 1892 if (long_branch) {
1893 Label skip; 1893 Label skip;
1894 Condition neg_cond = NegateFpuCondition(cond); 1894 Condition neg_cond = NegateFpuCondition(cond);
1895 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd); 1895 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
1896 J(target, bd); 1896 BranchLong(target, bd);
1897 bind(&skip); 1897 bind(&skip);
1898 } else { 1898 } else {
1899 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd); 1899 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
1900 } 1900 }
1901 } 1901 }
1902 } 1902 }
1903 1903
1904 1904
1905 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target, 1905 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
1906 Condition cc, FPURegister cmp1, 1906 Condition cc, FPURegister cmp1,
(...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after
2305 2305
2306 2306
2307 // Emulated condtional branches do not emit a nop in the branch delay slot. 2307 // Emulated condtional branches do not emit a nop in the branch delay slot.
2308 // 2308 //
2309 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 2309 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2310 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \ 2310 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2311 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 2311 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2312 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 2312 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2313 2313
2314 2314
2315 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 2315 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2316 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
2316 BranchShort(offset, bdslot); 2317 BranchShort(offset, bdslot);
2317 } 2318 }
2318 2319
2319 2320
2320 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, 2321 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2321 const Operand& rt, 2322 const Operand& rt, BranchDelaySlot bdslot) {
2322 BranchDelaySlot bdslot) { 2323 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2323 BranchShort(offset, cond, rs, rt, bdslot); 2324 DCHECK(is_near);
2325 USE(is_near);
2324 } 2326 }
2325 2327
2326 2328
2327 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { 2329 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2328 if (L->is_bound()) { 2330 if (L->is_bound()) {
2329 if (is_near(L)) { 2331 if (is_near_branch(L)) {
2330 BranchShort(L, bdslot); 2332 BranchShort(L, bdslot);
2331 } else { 2333 } else {
2332 J(L, bdslot); 2334 BranchLong(L, bdslot);
2333 } 2335 }
2334 } else { 2336 } else {
2335 if (is_trampoline_emitted()) { 2337 if (is_trampoline_emitted()) {
2336 J(L, bdslot); 2338 BranchLong(L, bdslot);
2337 } else { 2339 } else {
2338 BranchShort(L, bdslot); 2340 BranchShort(L, bdslot);
2339 } 2341 }
2340 } 2342 }
2341 } 2343 }
2342 2344
2343 2345
2344 void MacroAssembler::Branch(Label* L, Condition cond, Register rs, 2346 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2345 const Operand& rt, 2347 const Operand& rt,
2346 BranchDelaySlot bdslot) { 2348 BranchDelaySlot bdslot) {
2347 if (L->is_bound()) { 2349 if (L->is_bound()) {
2348 if (is_near(L)) { 2350 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2349 BranchShort(L, cond, rs, rt, bdslot);
2350 } else {
2351 if (cond != cc_always) { 2351 if (cond != cc_always) {
2352 Label skip; 2352 Label skip;
2353 Condition neg_cond = NegateCondition(cond); 2353 Condition neg_cond = NegateCondition(cond);
2354 BranchShort(&skip, neg_cond, rs, rt); 2354 BranchShort(&skip, neg_cond, rs, rt);
2355 J(L, bdslot); 2355 BranchLong(L, bdslot);
2356 bind(&skip); 2356 bind(&skip);
2357 } else { 2357 } else {
2358 J(L, bdslot); 2358 BranchLong(L, bdslot);
2359 } 2359 }
2360 } 2360 }
2361 } else { 2361 } else {
2362 if (is_trampoline_emitted()) { 2362 if (is_trampoline_emitted()) {
2363 if (cond != cc_always) { 2363 if (cond != cc_always) {
2364 Label skip; 2364 Label skip;
2365 Condition neg_cond = NegateCondition(cond); 2365 Condition neg_cond = NegateCondition(cond);
2366 BranchShort(&skip, neg_cond, rs, rt); 2366 BranchShort(&skip, neg_cond, rs, rt);
2367 J(L, bdslot); 2367 BranchLong(L, bdslot);
2368 bind(&skip); 2368 bind(&skip);
2369 } else { 2369 } else {
2370 J(L, bdslot); 2370 BranchLong(L, bdslot);
2371 } 2371 }
2372 } else { 2372 } else {
2373 BranchShort(L, cond, rs, rt, bdslot); 2373 BranchShort(L, cond, rs, rt, bdslot);
2374 } 2374 }
2375 } 2375 }
2376 } 2376 }
2377 2377
2378 2378
2379 void MacroAssembler::Branch(Label* L, 2379 void MacroAssembler::Branch(Label* L,
2380 Condition cond, 2380 Condition cond,
2381 Register rs, 2381 Register rs,
2382 Heap::RootListIndex index, 2382 Heap::RootListIndex index,
2383 BranchDelaySlot bdslot) { 2383 BranchDelaySlot bdslot) {
2384 LoadRoot(at, index); 2384 LoadRoot(at, index);
2385 Branch(L, cond, rs, Operand(at), bdslot); 2385 Branch(L, cond, rs, Operand(at), bdslot);
2386 } 2386 }
2387 2387
2388 2388
2389 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { 2389 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2390 BranchDelaySlot bdslot) {
2391 DCHECK(L == nullptr || offset == 0);
2392 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2390 b(offset); 2393 b(offset);
2391 2394
2392 // Emit a nop in the branch delay slot if required. 2395 // Emit a nop in the branch delay slot if required.
2393 if (bdslot == PROTECT) 2396 if (bdslot == PROTECT)
2394 nop(); 2397 nop();
2395 } 2398 }
2396 2399
2397 2400
2398 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, 2401 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2399 const Operand& rt, 2402 DCHECK(L == nullptr || offset == 0);
2400 BranchDelaySlot bdslot) { 2403 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2401 BRANCH_ARGS_CHECK(cond, rs, rt); 2404 bc(offset);
2402 DCHECK(!rs.is(zero_reg)); 2405 }
2406
2407
2408 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2409 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2410 DCHECK(is_int26(offset));
2411 BranchShortHelperR6(offset, nullptr);
2412 } else {
2413 DCHECK(is_int16(offset));
2414 BranchShortHelper(offset, nullptr, bdslot);
2415 }
2416 }
2417
2418
2419 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2420 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2421 BranchShortHelperR6(0, L);
2422 } else {
2423 BranchShortHelper(0, L, bdslot);
2424 }
2425 }
2426
2427
2428 static inline bool IsZero(const Operand& rt) {
2429 if (rt.is_reg()) {
2430 return rt.rm().is(zero_reg);
2431 } else {
2432 return rt.immediate() == 0;
2433 }
2434 }
2435
2436
2437 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2438 if (L) {
2439 offset = branch_offset_helper(L, bits) >> 2;
2440 } else {
2441 DCHECK(is_intn(offset, bits));
2442 }
2443 return offset;
2444 }
2445
2446
2447 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2448 Register scratch) {
2403 Register r2 = no_reg; 2449 Register r2 = no_reg;
2404 Register scratch = at;
2405
2406 if (rt.is_reg()) { 2450 if (rt.is_reg()) {
2407 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or 2451 r2 = rt.rm_;
2408 // rt. 2452 } else {
2453 r2 = scratch;
2454 li(r2, rt);
2455 }
2456
2457 return r2;
2458 }
2459
2460
2461 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2462 Condition cond, Register rs,
2463 const Operand& rt) {
2464 DCHECK(L == nullptr || offset == 0);
2465 Register scratch = rs.is(at) ? t8 : at;
2466 OffsetSize bits = OffsetSize::kOffset16;
2467
2468 // Be careful to always use shifted_branch_offset only just before the
2469 // branch instruction, as the location will be remember for patching the
2470 // target.
2471 {
2409 BlockTrampolinePoolScope block_trampoline_pool(this); 2472 BlockTrampolinePoolScope block_trampoline_pool(this);
2410 r2 = rt.rm_;
2411 switch (cond) { 2473 switch (cond) {
2412 case cc_always: 2474 case cc_always:
2413 b(offset); 2475 bits = OffsetSize::kOffset26;
2476 if (!is_near(L, bits)) return false;
2477 offset = GetOffset(offset, L, bits);
2478 bc(offset);
2414 break; 2479 break;
2415 case eq: 2480 case eq:
2416 beq(rs, r2, offset); 2481 if (rs.code() == rt.rm_.reg_code) {
2482 // Pre R6 beq is used here to make the code patchable. Otherwise bc
2483 // should be used which has no condition field so is not patchable.
2484 bits = OffsetSize::kOffset16;
2485 if (!is_near(L, bits)) return false;
2486 scratch = GetRtAsRegisterHelper(rt, scratch);
2487 offset = GetOffset(offset, L, bits);
2488 beq(rs, scratch, offset);
2489 nop();
2490 } else if (IsZero(rt)) {
2491 bits = OffsetSize::kOffset21;
2492 if (!is_near(L, bits)) return false;
2493 offset = GetOffset(offset, L, bits);
2494 beqzc(rs, offset);
2495 } else {
2496 // We don't want any other register but scratch clobbered.
2497 bits = OffsetSize::kOffset16;
2498 if (!is_near(L, bits)) return false;
2499 scratch = GetRtAsRegisterHelper(rt, scratch);
2500 offset = GetOffset(offset, L, bits);
2501 beqc(rs, scratch, offset);
2502 }
2417 break; 2503 break;
2418 case ne: 2504 case ne:
2419 bne(rs, r2, offset); 2505 if (rs.code() == rt.rm_.reg_code) {
2420 break; 2506 // Pre R6 bne is used here to make the code patchable. Otherwise we
2507 // should not generate any instruction.
2508 bits = OffsetSize::kOffset16;
2509 if (!is_near(L, bits)) return false;
2510 scratch = GetRtAsRegisterHelper(rt, scratch);
2511 offset = GetOffset(offset, L, bits);
2512 bne(rs, scratch, offset);
2513 nop();
2514 } else if (IsZero(rt)) {
2515 bits = OffsetSize::kOffset21;
2516 if (!is_near(L, bits)) return false;
2517 offset = GetOffset(offset, L, bits);
2518 bnezc(rs, offset);
2519 } else {
2520 // We don't want any other register but scratch clobbered.
2521 bits = OffsetSize::kOffset16;
2522 if (!is_near(L, bits)) return false;
2523 scratch = GetRtAsRegisterHelper(rt, scratch);
2524 offset = GetOffset(offset, L, bits);
2525 bnec(rs, scratch, offset);
2526 }
2527 break;
2528
2421 // Signed comparison. 2529 // Signed comparison.
2422 case greater: 2530 case greater:
2423 if (r2.is(zero_reg)) { 2531 // rs > rt
2424 bgtz(rs, offset); 2532 if (rs.code() == rt.rm_.reg_code) {
2425 } else { 2533 break; // No code needs to be emitted.
2426 slt(scratch, r2, rs); 2534 } else if (rs.is(zero_reg)) {
2427 bne(scratch, zero_reg, offset); 2535 bits = OffsetSize::kOffset16;
2536 if (!is_near(L, bits)) return false;
2537 scratch = GetRtAsRegisterHelper(rt, scratch);
2538 offset = GetOffset(offset, L, bits);
2539 bltzc(scratch, offset);
2540 } else if (IsZero(rt)) {
2541 bits = OffsetSize::kOffset16;
2542 if (!is_near(L, bits)) return false;
2543 offset = GetOffset(offset, L, bits);
2544 bgtzc(rs, offset);
2545 } else {
2546 bits = OffsetSize::kOffset16;
2547 if (!is_near(L, bits)) return false;
2548 scratch = GetRtAsRegisterHelper(rt, scratch);
2549 DCHECK(!rs.is(scratch));
2550 offset = GetOffset(offset, L, bits);
2551 bltc(scratch, rs, offset);
2428 } 2552 }
2429 break; 2553 break;
2430 case greater_equal: 2554 case greater_equal:
2431 if (r2.is(zero_reg)) { 2555 // rs >= rt
2432 bgez(rs, offset); 2556 if (rs.code() == rt.rm_.reg_code) {
2433 } else { 2557 bits = OffsetSize::kOffset26;
2434 slt(scratch, rs, r2); 2558 if (!is_near(L, bits)) return false;
2435 beq(scratch, zero_reg, offset); 2559 offset = GetOffset(offset, L, bits);
2560 bc(offset);
2561 } else if (rs.is(zero_reg)) {
2562 bits = OffsetSize::kOffset16;
2563 if (!is_near(L, bits)) return false;
2564 scratch = GetRtAsRegisterHelper(rt, scratch);
2565 offset = GetOffset(offset, L, bits);
2566 blezc(scratch, offset);
2567 } else if (IsZero(rt)) {
2568 bits = OffsetSize::kOffset16;
2569 if (!is_near(L, bits)) return false;
2570 offset = GetOffset(offset, L, bits);
2571 bgezc(rs, offset);
2572 } else {
2573 bits = OffsetSize::kOffset16;
2574 if (!is_near(L, bits)) return false;
2575 scratch = GetRtAsRegisterHelper(rt, scratch);
2576 DCHECK(!rs.is(scratch));
2577 offset = GetOffset(offset, L, bits);
2578 bgec(rs, scratch, offset);
2436 } 2579 }
2437 break; 2580 break;
2438 case less: 2581 case less:
2439 if (r2.is(zero_reg)) { 2582 // rs < rt
2440 bltz(rs, offset); 2583 if (rs.code() == rt.rm_.reg_code) {
2441 } else { 2584 break; // No code needs to be emitted.
2442 slt(scratch, rs, r2); 2585 } else if (rs.is(zero_reg)) {
2443 bne(scratch, zero_reg, offset); 2586 bits = OffsetSize::kOffset16;
2587 if (!is_near(L, bits)) return false;
2588 scratch = GetRtAsRegisterHelper(rt, scratch);
2589 offset = GetOffset(offset, L, bits);
2590 bgtzc(scratch, offset);
2591 } else if (IsZero(rt)) {
2592 bits = OffsetSize::kOffset16;
2593 if (!is_near(L, bits)) return false;
2594 offset = GetOffset(offset, L, bits);
2595 bltzc(rs, offset);
2596 } else {
2597 bits = OffsetSize::kOffset16;
2598 if (!is_near(L, bits)) return false;
2599 scratch = GetRtAsRegisterHelper(rt, scratch);
2600 DCHECK(!rs.is(scratch));
2601 offset = GetOffset(offset, L, bits);
2602 bltc(rs, scratch, offset);
2444 } 2603 }
2445 break; 2604 break;
2446 case less_equal: 2605 case less_equal:
2447 if (r2.is(zero_reg)) { 2606 // rs <= rt
2448 blez(rs, offset); 2607 if (rs.code() == rt.rm_.reg_code) {
2449 } else { 2608 bits = OffsetSize::kOffset26;
2450 slt(scratch, r2, rs); 2609 if (!is_near(L, bits)) return false;
2451 beq(scratch, zero_reg, offset); 2610 offset = GetOffset(offset, L, bits);
2452 } 2611 bc(offset);
2453 break; 2612 } else if (rs.is(zero_reg)) {
2613 bits = OffsetSize::kOffset16;
2614 if (!is_near(L, bits)) return false;
2615 scratch = GetRtAsRegisterHelper(rt, scratch);
2616 offset = GetOffset(offset, L, bits);
2617 bgezc(scratch, offset);
2618 } else if (IsZero(rt)) {
2619 bits = OffsetSize::kOffset16;
2620 if (!is_near(L, bits)) return false;
2621 offset = GetOffset(offset, L, bits);
2622 blezc(rs, offset);
2623 } else {
2624 bits = OffsetSize::kOffset16;
2625 if (!is_near(L, bits)) return false;
2626 scratch = GetRtAsRegisterHelper(rt, scratch);
2627 DCHECK(!rs.is(scratch));
2628 offset = GetOffset(offset, L, bits);
2629 bgec(scratch, rs, offset);
2630 }
2631 break;
2632
2454 // Unsigned comparison. 2633 // Unsigned comparison.
2455 case Ugreater: 2634 case Ugreater:
2456 if (r2.is(zero_reg)) { 2635 // rs > rt
2457 bne(rs, zero_reg, offset); 2636 if (rs.code() == rt.rm_.reg_code) {
2458 } else { 2637 break; // No code needs to be emitted.
2459 sltu(scratch, r2, rs); 2638 } else if (rs.is(zero_reg)) {
2460 bne(scratch, zero_reg, offset); 2639 bits = OffsetSize::kOffset21;
2640 if (!is_near(L, bits)) return false;
2641 scratch = GetRtAsRegisterHelper(rt, scratch);
2642 offset = GetOffset(offset, L, bits);
2643 bnezc(scratch, offset);
2644 } else if (IsZero(rt)) {
2645 bits = OffsetSize::kOffset21;
2646 if (!is_near(L, bits)) return false;
2647 offset = GetOffset(offset, L, bits);
2648 bnezc(rs, offset);
2649 } else {
2650 bits = OffsetSize::kOffset16;
2651 if (!is_near(L, bits)) return false;
2652 scratch = GetRtAsRegisterHelper(rt, scratch);
2653 DCHECK(!rs.is(scratch));
2654 offset = GetOffset(offset, L, bits);
2655 bltuc(scratch, rs, offset);
2461 } 2656 }
2462 break; 2657 break;
2463 case Ugreater_equal: 2658 case Ugreater_equal:
2464 if (r2.is(zero_reg)) { 2659 // rs >= rt
2465 b(offset); 2660 if (rs.code() == rt.rm_.reg_code) {
2466 } else { 2661 bits = OffsetSize::kOffset26;
2467 sltu(scratch, rs, r2); 2662 if (!is_near(L, bits)) return false;
2468 beq(scratch, zero_reg, offset); 2663 offset = GetOffset(offset, L, bits);
2664 bc(offset);
2665 } else if (rs.is(zero_reg)) {
2666 bits = OffsetSize::kOffset21;
2667 if (!is_near(L, bits)) return false;
2668 scratch = GetRtAsRegisterHelper(rt, scratch);
2669 offset = GetOffset(offset, L, bits);
2670 beqzc(scratch, offset);
2671 } else if (IsZero(rt)) {
2672 bits = OffsetSize::kOffset26;
2673 if (!is_near(L, bits)) return false;
2674 offset = GetOffset(offset, L, bits);
2675 bc(offset);
2676 } else {
2677 bits = OffsetSize::kOffset16;
2678 if (!is_near(L, bits)) return false;
2679 scratch = GetRtAsRegisterHelper(rt, scratch);
2680 DCHECK(!rs.is(scratch));
2681 offset = GetOffset(offset, L, bits);
2682 bgeuc(rs, scratch, offset);
2469 } 2683 }
2470 break; 2684 break;
2471 case Uless: 2685 case Uless:
2472 if (r2.is(zero_reg)) { 2686 // rs < rt
2473 // No code needs to be emitted. 2687 if (rs.code() == rt.rm_.reg_code) {
2474 return; 2688 break; // No code needs to be emitted.
2475 } else { 2689 } else if (rs.is(zero_reg)) {
2476 sltu(scratch, rs, r2); 2690 bits = OffsetSize::kOffset21;
2477 bne(scratch, zero_reg, offset); 2691 if (!is_near(L, bits)) return false;
2692 scratch = GetRtAsRegisterHelper(rt, scratch);
2693 offset = GetOffset(offset, L, bits);
2694 bnezc(scratch, offset);
2695 } else if (IsZero(rt)) {
2696 break; // No code needs to be emitted.
2697 } else {
2698 bits = OffsetSize::kOffset16;
2699 if (!is_near(L, bits)) return false;
2700 scratch = GetRtAsRegisterHelper(rt, scratch);
2701 DCHECK(!rs.is(scratch));
2702 offset = GetOffset(offset, L, bits);
2703 bltuc(rs, scratch, offset);
2478 } 2704 }
2479 break; 2705 break;
2480 case Uless_equal: 2706 case Uless_equal:
2481 if (r2.is(zero_reg)) { 2707 // rs <= rt
2482 beq(rs, zero_reg, offset); 2708 if (rs.code() == rt.rm_.reg_code) {
2483 } else { 2709 bits = OffsetSize::kOffset26;
2484 sltu(scratch, r2, rs); 2710 if (!is_near(L, bits)) return false;
2485 beq(scratch, zero_reg, offset); 2711 offset = GetOffset(offset, L, bits);
2712 bc(offset);
2713 } else if (rs.is(zero_reg)) {
2714 bits = OffsetSize::kOffset26;
2715 if (!is_near(L, bits)) return false;
2716 scratch = GetRtAsRegisterHelper(rt, scratch);
2717 offset = GetOffset(offset, L, bits);
2718 bc(offset);
2719 } else if (IsZero(rt)) {
2720 bits = OffsetSize::kOffset21;
2721 if (!is_near(L, bits)) return false;
2722 offset = GetOffset(offset, L, bits);
2723 beqzc(rs, offset);
2724 } else {
2725 bits = OffsetSize::kOffset16;
2726 if (!is_near(L, bits)) return false;
2727 scratch = GetRtAsRegisterHelper(rt, scratch);
2728 DCHECK(!rs.is(scratch));
2729 offset = GetOffset(offset, L, bits);
2730 bgeuc(scratch, rs, offset);
2486 } 2731 }
2487 break; 2732 break;
2488 default: 2733 default:
2489 UNREACHABLE(); 2734 UNREACHABLE();
2490 } 2735 }
2491 } else { 2736 }
2492 // Be careful to always use shifted_branch_offset only just before the 2737 CheckTrampolinePoolQuick(1);
2493 // branch instruction, as the location will be remember for patching the 2738 return true;
2494 // target. 2739 }
2740
2741
2742 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
2743 Register rs, const Operand& rt,
2744 BranchDelaySlot bdslot) {
2745 DCHECK(L == nullptr || offset == 0);
2746 if (!is_near(L, OffsetSize::kOffset16)) return false;
2747
2748 Register scratch = at;
2749 int32_t offset32;
2750
2751 // Be careful to always use shifted_branch_offset only just before the
2752 // branch instruction, as the location will be remember for patching the
2753 // target.
2754 {
2495 BlockTrampolinePoolScope block_trampoline_pool(this); 2755 BlockTrampolinePoolScope block_trampoline_pool(this);
2496 switch (cond) { 2756 switch (cond) {
2497 case cc_always: 2757 case cc_always:
2498 b(offset); 2758 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2759 b(offset32);
2499 break; 2760 break;
2500 case eq: 2761 case eq:
2501 if (rt.imm64_ == 0) { 2762 if (IsZero(rt)) {
2502 beq(rs, zero_reg, offset); 2763 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2764 beq(rs, zero_reg, offset32);
2503 } else { 2765 } else {
2504 // We don't want any other register but scratch clobbered. 2766 // We don't want any other register but scratch clobbered.
2505 DCHECK(!scratch.is(rs)); 2767 scratch = GetRtAsRegisterHelper(rt, scratch);
2506 r2 = scratch; 2768 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2507 li(r2, rt); 2769 beq(rs, scratch, offset32);
2508 beq(rs, r2, offset);
2509 } 2770 }
2510 break; 2771 break;
2511 case ne: 2772 case ne:
2512 if (rt.imm64_ == 0) { 2773 if (IsZero(rt)) {
2513 bne(rs, zero_reg, offset); 2774 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2775 bne(rs, zero_reg, offset32);
2514 } else { 2776 } else {
2515 // We don't want any other register but scratch clobbered. 2777 // We don't want any other register but scratch clobbered.
2516 DCHECK(!scratch.is(rs)); 2778 scratch = GetRtAsRegisterHelper(rt, scratch);
2517 r2 = scratch; 2779 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2518 li(r2, rt); 2780 bne(rs, scratch, offset32);
2519 bne(rs, r2, offset); 2781 }
2520 } 2782 break;
2521 break; 2783
2522 // Signed comparison. 2784 // Signed comparison.
2523 case greater: 2785 case greater:
2524 if (rt.imm64_ == 0) { 2786 if (IsZero(rt)) {
2525 bgtz(rs, offset); 2787 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2526 } else { 2788 bgtz(rs, offset32);
2527 r2 = scratch; 2789 } else {
2528 li(r2, rt); 2790 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2529 slt(scratch, r2, rs); 2791 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2530 bne(scratch, zero_reg, offset); 2792 bne(scratch, zero_reg, offset32);
2531 } 2793 }
2532 break; 2794 break;
2533 case greater_equal: 2795 case greater_equal:
2534 if (rt.imm64_ == 0) { 2796 if (IsZero(rt)) {
2535 bgez(rs, offset); 2797 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2536 } else if (is_int16(rt.imm64_)) { 2798 bgez(rs, offset32);
2537 slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); 2799 } else {
2538 beq(scratch, zero_reg, offset); 2800 Slt(scratch, rs, rt);
2539 } else { 2801 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2540 r2 = scratch; 2802 beq(scratch, zero_reg, offset32);
2541 li(r2, rt);
2542 slt(scratch, rs, r2);
2543 beq(scratch, zero_reg, offset);
2544 } 2803 }
2545 break; 2804 break;
2546 case less: 2805 case less:
2547 if (rt.imm64_ == 0) { 2806 if (IsZero(rt)) {
2548 bltz(rs, offset); 2807 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2549 } else if (is_int16(rt.imm64_)) { 2808 bltz(rs, offset32);
2550 slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); 2809 } else {
2551 bne(scratch, zero_reg, offset); 2810 Slt(scratch, rs, rt);
2552 } else { 2811 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2553 r2 = scratch; 2812 bne(scratch, zero_reg, offset32);
2554 li(r2, rt);
2555 slt(scratch, rs, r2);
2556 bne(scratch, zero_reg, offset);
2557 } 2813 }
2558 break; 2814 break;
2559 case less_equal: 2815 case less_equal:
2560 if (rt.imm64_ == 0) { 2816 if (IsZero(rt)) {
2561 blez(rs, offset); 2817 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2562 } else { 2818 blez(rs, offset32);
2563 r2 = scratch; 2819 } else {
2564 li(r2, rt); 2820 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2565 slt(scratch, r2, rs); 2821 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2566 beq(scratch, zero_reg, offset); 2822 beq(scratch, zero_reg, offset32);
2567 } 2823 }
2568 break; 2824 break;
2825
2569 // Unsigned comparison. 2826 // Unsigned comparison.
2570 case Ugreater: 2827 case Ugreater:
2571 if (rt.imm64_ == 0) { 2828 if (IsZero(rt)) {
2572 bne(rs, zero_reg, offset); 2829 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2573 } else { 2830 bne(rs, zero_reg, offset32);
2574 r2 = scratch; 2831 } else {
2575 li(r2, rt); 2832 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2576 sltu(scratch, r2, rs); 2833 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2577 bne(scratch, zero_reg, offset); 2834 bne(scratch, zero_reg, offset32);
2578 } 2835 }
2579 break; 2836 break;
2580 case Ugreater_equal: 2837 case Ugreater_equal:
2581 if (rt.imm64_ == 0) { 2838 if (IsZero(rt)) {
2582 b(offset); 2839 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2583 } else if (is_int16(rt.imm64_)) { 2840 b(offset32);
2584 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); 2841 } else {
2585 beq(scratch, zero_reg, offset); 2842 Sltu(scratch, rs, rt);
2586 } else { 2843 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2587 r2 = scratch; 2844 beq(scratch, zero_reg, offset32);
2588 li(r2, rt);
2589 sltu(scratch, rs, r2);
2590 beq(scratch, zero_reg, offset);
2591 } 2845 }
2592 break; 2846 break;
2593 case Uless: 2847 case Uless:
2594 if (rt.imm64_ == 0) { 2848 if (IsZero(rt)) {
2595 // No code needs to be emitted. 2849 return true; // No code needs to be emitted.
2596 return; 2850 } else {
2597 } else if (is_int16(rt.imm64_)) { 2851 Sltu(scratch, rs, rt);
2598 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); 2852 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2599 bne(scratch, zero_reg, offset); 2853 bne(scratch, zero_reg, offset32);
2600 } else {
2601 r2 = scratch;
2602 li(r2, rt);
2603 sltu(scratch, rs, r2);
2604 bne(scratch, zero_reg, offset);
2605 } 2854 }
2606 break; 2855 break;
2607 case Uless_equal: 2856 case Uless_equal:
2608 if (rt.imm64_ == 0) { 2857 if (IsZero(rt)) {
2609 beq(rs, zero_reg, offset); 2858 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2610 } else { 2859 beq(rs, zero_reg, offset32);
2611 r2 = scratch; 2860 } else {
2612 li(r2, rt); 2861 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
2613 sltu(scratch, r2, rs); 2862 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
2614 beq(scratch, zero_reg, offset); 2863 beq(scratch, zero_reg, offset32);
2615 } 2864 }
2616 break; 2865 break;
2617 default: 2866 default:
2618 UNREACHABLE(); 2867 UNREACHABLE();
2619 } 2868 }
2620 } 2869 }
2870
2621 // Emit a nop in the branch delay slot if required. 2871 // Emit a nop in the branch delay slot if required.
2622 if (bdslot == PROTECT) 2872 if (bdslot == PROTECT)
2623 nop(); 2873 nop();
2624 } 2874
2625 2875 return true;
2626 2876 }
2627 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { 2877
2628 // We use branch_offset as an argument for the branch instructions to be sure 2878
2629 // it is called just before generating the branch instruction, as needed. 2879 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
2630 2880 Register rs, const Operand& rt,
2631 b(shifted_branch_offset(L, false)); 2881 BranchDelaySlot bdslot) {
2632 2882 BRANCH_ARGS_CHECK(cond, rs, rt);
2633 // Emit a nop in the branch delay slot if required. 2883
2634 if (bdslot == PROTECT) 2884 if (!L) {
2635 nop(); 2885 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2886 DCHECK(is_int26(offset));
2887 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
2888 } else {
2889 DCHECK(is_int16(offset));
2890 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
2891 }
2892 } else {
2893 DCHECK(offset == 0);
2894 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2895 return BranchShortHelperR6(0, L, cond, rs, rt);
2896 } else {
2897 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
2898 }
2899 }
2900 return false;
2901 }
2902
2903
2904 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
2905 const Operand& rt, BranchDelaySlot bdslot) {
2906 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2636 } 2907 }
2637 2908
2638 2909
2639 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, 2910 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
2640 const Operand& rt, 2911 const Operand& rt, BranchDelaySlot bdslot) {
2641 BranchDelaySlot bdslot) { 2912 BranchShortCheck(0, L, cond, rs, rt, bdslot);
2642 BRANCH_ARGS_CHECK(cond, rs, rt); 2913 }
2643 2914
2644 int32_t offset = 0; 2915
2645 Register r2 = no_reg; 2916 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
2646 Register scratch = at;
2647 if (rt.is_reg()) {
2648 BlockTrampolinePoolScope block_trampoline_pool(this);
2649 r2 = rt.rm_;
2650 // Be careful to always use shifted_branch_offset only just before the
2651 // branch instruction, as the location will be remember for patching the
2652 // target.
2653 switch (cond) {
2654 case cc_always:
2655 offset = shifted_branch_offset(L, false);
2656 b(offset);
2657 break;
2658 case eq:
2659 offset = shifted_branch_offset(L, false);
2660 beq(rs, r2, offset);
2661 break;
2662 case ne:
2663 offset = shifted_branch_offset(L, false);
2664 bne(rs, r2, offset);
2665 break;
2666 // Signed comparison.
2667 case greater:
2668 if (r2.is(zero_reg)) {
2669 offset = shifted_branch_offset(L, false);
2670 bgtz(rs, offset);
2671 } else {
2672 slt(scratch, r2, rs);
2673 offset = shifted_branch_offset(L, false);
2674 bne(scratch, zero_reg, offset);
2675 }
2676 break;
2677 case greater_equal:
2678 if (r2.is(zero_reg)) {
2679 offset = shifted_branch_offset(L, false);
2680 bgez(rs, offset);
2681 } else {
2682 slt(scratch, rs, r2);
2683 offset = shifted_branch_offset(L, false);
2684 beq(scratch, zero_reg, offset);
2685 }
2686 break;
2687 case less:
2688 if (r2.is(zero_reg)) {
2689 offset = shifted_branch_offset(L, false);
2690 bltz(rs, offset);
2691 } else {
2692 slt(scratch, rs, r2);
2693 offset = shifted_branch_offset(L, false);
2694 bne(scratch, zero_reg, offset);
2695 }
2696 break;
2697 case less_equal:
2698 if (r2.is(zero_reg)) {
2699 offset = shifted_branch_offset(L, false);
2700 blez(rs, offset);
2701 } else {
2702 slt(scratch, r2, rs);
2703 offset = shifted_branch_offset(L, false);
2704 beq(scratch, zero_reg, offset);
2705 }
2706 break;
2707 // Unsigned comparison.
2708 case Ugreater:
2709 if (r2.is(zero_reg)) {
2710 offset = shifted_branch_offset(L, false);
2711 bne(rs, zero_reg, offset);
2712 } else {
2713 sltu(scratch, r2, rs);
2714 offset = shifted_branch_offset(L, false);
2715 bne(scratch, zero_reg, offset);
2716 }
2717 break;
2718 case Ugreater_equal:
2719 if (r2.is(zero_reg)) {
2720 offset = shifted_branch_offset(L, false);
2721 b(offset);
2722 } else {
2723 sltu(scratch, rs, r2);
2724 offset = shifted_branch_offset(L, false);
2725 beq(scratch, zero_reg, offset);
2726 }
2727 break;
2728 case Uless:
2729 if (r2.is(zero_reg)) {
2730 // No code needs to be emitted.
2731 return;
2732 } else {
2733 sltu(scratch, rs, r2);
2734 offset = shifted_branch_offset(L, false);
2735 bne(scratch, zero_reg, offset);
2736 }
2737 break;
2738 case Uless_equal:
2739 if (r2.is(zero_reg)) {
2740 offset = shifted_branch_offset(L, false);
2741 beq(rs, zero_reg, offset);
2742 } else {
2743 sltu(scratch, r2, rs);
2744 offset = shifted_branch_offset(L, false);
2745 beq(scratch, zero_reg, offset);
2746 }
2747 break;
2748 default:
2749 UNREACHABLE();
2750 }
2751 } else {
2752 // Be careful to always use shifted_branch_offset only just before the
2753 // branch instruction, as the location will be remember for patching the
2754 // target.
2755 BlockTrampolinePoolScope block_trampoline_pool(this);
2756 switch (cond) {
2757 case cc_always:
2758 offset = shifted_branch_offset(L, false);
2759 b(offset);
2760 break;
2761 case eq:
2762 if (rt.imm64_ == 0) {
2763 offset = shifted_branch_offset(L, false);
2764 beq(rs, zero_reg, offset);
2765 } else {
2766 DCHECK(!scratch.is(rs));
2767 r2 = scratch;
2768 li(r2, rt);
2769 offset = shifted_branch_offset(L, false);
2770 beq(rs, r2, offset);
2771 }
2772 break;
2773 case ne:
2774 if (rt.imm64_ == 0) {
2775 offset = shifted_branch_offset(L, false);
2776 bne(rs, zero_reg, offset);
2777 } else {
2778 DCHECK(!scratch.is(rs));
2779 r2 = scratch;
2780 li(r2, rt);
2781 offset = shifted_branch_offset(L, false);
2782 bne(rs, r2, offset);
2783 }
2784 break;
2785 // Signed comparison.
2786 case greater:
2787 if (rt.imm64_ == 0) {
2788 offset = shifted_branch_offset(L, false);
2789 bgtz(rs, offset);
2790 } else {
2791 DCHECK(!scratch.is(rs));
2792 r2 = scratch;
2793 li(r2, rt);
2794 slt(scratch, r2, rs);
2795 offset = shifted_branch_offset(L, false);
2796 bne(scratch, zero_reg, offset);
2797 }
2798 break;
2799 case greater_equal:
2800 if (rt.imm64_ == 0) {
2801 offset = shifted_branch_offset(L, false);
2802 bgez(rs, offset);
2803 } else if (is_int16(rt.imm64_)) {
2804 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2805 offset = shifted_branch_offset(L, false);
2806 beq(scratch, zero_reg, offset);
2807 } else {
2808 DCHECK(!scratch.is(rs));
2809 r2 = scratch;
2810 li(r2, rt);
2811 slt(scratch, rs, r2);
2812 offset = shifted_branch_offset(L, false);
2813 beq(scratch, zero_reg, offset);
2814 }
2815 break;
2816 case less:
2817 if (rt.imm64_ == 0) {
2818 offset = shifted_branch_offset(L, false);
2819 bltz(rs, offset);
2820 } else if (is_int16(rt.imm64_)) {
2821 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2822 offset = shifted_branch_offset(L, false);
2823 bne(scratch, zero_reg, offset);
2824 } else {
2825 DCHECK(!scratch.is(rs));
2826 r2 = scratch;
2827 li(r2, rt);
2828 slt(scratch, rs, r2);
2829 offset = shifted_branch_offset(L, false);
2830 bne(scratch, zero_reg, offset);
2831 }
2832 break;
2833 case less_equal:
2834 if (rt.imm64_ == 0) {
2835 offset = shifted_branch_offset(L, false);
2836 blez(rs, offset);
2837 } else {
2838 DCHECK(!scratch.is(rs));
2839 r2 = scratch;
2840 li(r2, rt);
2841 slt(scratch, r2, rs);
2842 offset = shifted_branch_offset(L, false);
2843 beq(scratch, zero_reg, offset);
2844 }
2845 break;
2846 // Unsigned comparison.
2847 case Ugreater:
2848 if (rt.imm64_ == 0) {
2849 offset = shifted_branch_offset(L, false);
2850 bne(rs, zero_reg, offset);
2851 } else {
2852 DCHECK(!scratch.is(rs));
2853 r2 = scratch;
2854 li(r2, rt);
2855 sltu(scratch, r2, rs);
2856 offset = shifted_branch_offset(L, false);
2857 bne(scratch, zero_reg, offset);
2858 }
2859 break;
2860 case Ugreater_equal:
2861 if (rt.imm64_ == 0) {
2862 offset = shifted_branch_offset(L, false);
2863 b(offset);
2864 } else if (is_int16(rt.imm64_)) {
2865 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2866 offset = shifted_branch_offset(L, false);
2867 beq(scratch, zero_reg, offset);
2868 } else {
2869 DCHECK(!scratch.is(rs));
2870 r2 = scratch;
2871 li(r2, rt);
2872 sltu(scratch, rs, r2);
2873 offset = shifted_branch_offset(L, false);
2874 beq(scratch, zero_reg, offset);
2875 }
2876 break;
2877 case Uless:
2878 if (rt.imm64_ == 0) {
2879 // No code needs to be emitted.
2880 return;
2881 } else if (is_int16(rt.imm64_)) {
2882 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2883 offset = shifted_branch_offset(L, false);
2884 bne(scratch, zero_reg, offset);
2885 } else {
2886 DCHECK(!scratch.is(rs));
2887 r2 = scratch;
2888 li(r2, rt);
2889 sltu(scratch, rs, r2);
2890 offset = shifted_branch_offset(L, false);
2891 bne(scratch, zero_reg, offset);
2892 }
2893 break;
2894 case Uless_equal:
2895 if (rt.imm64_ == 0) {
2896 offset = shifted_branch_offset(L, false);
2897 beq(rs, zero_reg, offset);
2898 } else {
2899 DCHECK(!scratch.is(rs));
2900 r2 = scratch;
2901 li(r2, rt);
2902 sltu(scratch, r2, rs);
2903 offset = shifted_branch_offset(L, false);
2904 beq(scratch, zero_reg, offset);
2905 }
2906 break;
2907 default:
2908 UNREACHABLE();
2909 }
2910 }
2911 // Check that offset could actually hold on an int16_t.
2912 DCHECK(is_int16(offset));
2913 // Emit a nop in the branch delay slot if required.
2914 if (bdslot == PROTECT)
2915 nop();
2916 }
2917
2918
2919 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2920 BranchAndLinkShort(offset, bdslot); 2917 BranchAndLinkShort(offset, bdslot);
2921 } 2918 }
2922 2919
2923 2920
2924 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, 2921 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
2925 const Operand& rt, 2922 const Operand& rt, BranchDelaySlot bdslot) {
2926 BranchDelaySlot bdslot) { 2923 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2927 BranchAndLinkShort(offset, cond, rs, rt, bdslot); 2924 DCHECK(is_near);
2925 USE(is_near);
2928 } 2926 }
2929 2927
2930 2928
2931 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { 2929 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2932 if (L->is_bound()) { 2930 if (L->is_bound()) {
2933 if (is_near(L)) { 2931 if (is_near_branch(L)) {
2934 BranchAndLinkShort(L, bdslot); 2932 BranchAndLinkShort(L, bdslot);
2935 } else { 2933 } else {
2936 Jal(L, bdslot); 2934 BranchAndLinkLong(L, bdslot);
2937 } 2935 }
2938 } else { 2936 } else {
2939 if (is_trampoline_emitted()) { 2937 if (is_trampoline_emitted()) {
2940 Jal(L, bdslot); 2938 BranchAndLinkLong(L, bdslot);
2941 } else { 2939 } else {
2942 BranchAndLinkShort(L, bdslot); 2940 BranchAndLinkShort(L, bdslot);
2943 } 2941 }
2944 } 2942 }
2945 } 2943 }
2946 2944
2947 2945
2948 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, 2946 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2949 const Operand& rt, 2947 const Operand& rt,
2950 BranchDelaySlot bdslot) { 2948 BranchDelaySlot bdslot) {
2951 if (L->is_bound()) { 2949 if (L->is_bound()) {
2952 if (is_near(L)) { 2950 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
2953 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2954 } else {
2955 Label skip; 2951 Label skip;
2956 Condition neg_cond = NegateCondition(cond); 2952 Condition neg_cond = NegateCondition(cond);
2957 BranchShort(&skip, neg_cond, rs, rt); 2953 BranchShort(&skip, neg_cond, rs, rt);
2958 Jal(L, bdslot); 2954 BranchAndLinkLong(L, bdslot);
2959 bind(&skip); 2955 bind(&skip);
2960 } 2956 }
2961 } else { 2957 } else {
2962 if (is_trampoline_emitted()) { 2958 if (is_trampoline_emitted()) {
2963 Label skip; 2959 Label skip;
2964 Condition neg_cond = NegateCondition(cond); 2960 Condition neg_cond = NegateCondition(cond);
2965 BranchShort(&skip, neg_cond, rs, rt); 2961 BranchShort(&skip, neg_cond, rs, rt);
2966 Jal(L, bdslot); 2962 BranchAndLinkLong(L, bdslot);
2967 bind(&skip); 2963 bind(&skip);
2968 } else { 2964 } else {
2969 BranchAndLinkShort(L, cond, rs, rt, bdslot); 2965 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
2970 } 2966 }
2971 } 2967 }
2972 } 2968 }
2973 2969
2974 2970
2975 // We need to use a bgezal or bltzal, but they can't be used directly with the 2971 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
2976 // slt instructions. We could use sub or add instead but we would miss overflow 2972 BranchDelaySlot bdslot) {
2977 // cases, so we keep slt and add an intermediate third instruction. 2973 DCHECK(L == nullptr || offset == 0);
2978 void MacroAssembler::BranchAndLinkShort(int16_t offset, 2974 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2979 BranchDelaySlot bdslot) {
2980 bal(offset); 2975 bal(offset);
2981 2976
2982 // Emit a nop in the branch delay slot if required. 2977 // Emit a nop in the branch delay slot if required.
2983 if (bdslot == PROTECT) 2978 if (bdslot == PROTECT)
2984 nop(); 2979 nop();
2985 } 2980 }
2986 2981
2987 2982
2988 void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, 2983 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
2989 Register rs, const Operand& rt, 2984 DCHECK(L == nullptr || offset == 0);
2985 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2986 balc(offset);
2987 }
2988
2989
2990 void MacroAssembler::BranchAndLinkShort(int32_t offset,
2990 BranchDelaySlot bdslot) { 2991 BranchDelaySlot bdslot) {
2991 BRANCH_ARGS_CHECK(cond, rs, rt); 2992 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2992 Register r2 = no_reg; 2993 DCHECK(is_int26(offset));
2993 Register scratch = at; 2994 BranchAndLinkShortHelperR6(offset, nullptr);
2994 2995 } else {
2995 if (rt.is_reg()) { 2996 DCHECK(is_int16(offset));
2996 r2 = rt.rm_; 2997 BranchAndLinkShortHelper(offset, nullptr, bdslot);
2997 } else if (cond != cc_always) { 2998 }
2998 r2 = scratch; 2999 }
2999 li(r2, rt); 3000
3000 } 3001
3001 3002 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3002 { 3003 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3003 BlockTrampolinePoolScope block_trampoline_pool(this); 3004 BranchAndLinkShortHelperR6(0, L);
3004 switch (cond) { 3005 } else {
3005 case cc_always: 3006 BranchAndLinkShortHelper(0, L, bdslot);
3006 bal(offset); 3007 }
3007 break; 3008 }
3008 case eq: 3009
3009 bne(rs, r2, 2); 3010
3010 nop(); 3011 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3011 bal(offset); 3012 Condition cond, Register rs,
3012 break; 3013 const Operand& rt) {
3013 case ne: 3014 DCHECK(L == nullptr || offset == 0);
3014 beq(rs, r2, 2); 3015 Register scratch = rs.is(at) ? t8 : at;
3015 nop(); 3016 OffsetSize bits = OffsetSize::kOffset16;
3016 bal(offset); 3017
3017 break; 3018 BlockTrampolinePoolScope block_trampoline_pool(this);
3018 3019 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3019 // Signed comparison. 3020 switch (cond) {
3020 case greater: 3021 case cc_always:
3021 // rs > rt 3022 bits = OffsetSize::kOffset26;
3022 slt(scratch, r2, rs); 3023 if (!is_near(L, bits)) return false;
3023 beq(scratch, zero_reg, 2); 3024 offset = GetOffset(offset, L, bits);
3024 nop(); 3025 balc(offset);
3025 bal(offset); 3026 break;
3026 break; 3027 case eq:
3027 case greater_equal: 3028 if (!is_near(L, bits)) return false;
3028 // rs >= rt 3029 Subu(scratch, rs, rt);
3029 slt(scratch, rs, r2); 3030 offset = GetOffset(offset, L, bits);
3030 bne(scratch, zero_reg, 2); 3031 beqzalc(scratch, offset);
3031 nop(); 3032 break;
3032 bal(offset); 3033 case ne:
3033 break; 3034 if (!is_near(L, bits)) return false;
3034 case less: 3035 Subu(scratch, rs, rt);
3035 // rs < r2 3036 offset = GetOffset(offset, L, bits);
3036 slt(scratch, rs, r2); 3037 bnezalc(scratch, offset);
3037 bne(scratch, zero_reg, 2); 3038 break;
3038 nop(); 3039
3039 bal(offset); 3040 // Signed comparison.
3040 break; 3041 case greater:
3041 case less_equal: 3042 // rs > rt
3042 // rs <= r2 3043 if (rs.code() == rt.rm_.reg_code) {
3043 slt(scratch, r2, rs); 3044 break; // No code needs to be emitted.
3044 bne(scratch, zero_reg, 2); 3045 } else if (rs.is(zero_reg)) {
3045 nop(); 3046 if (!is_near(L, bits)) return false;
3046 bal(offset); 3047 scratch = GetRtAsRegisterHelper(rt, scratch);
3047 break; 3048 offset = GetOffset(offset, L, bits);
3048 3049 bltzalc(scratch, offset);
3049 3050 } else if (IsZero(rt)) {
3050 // Unsigned comparison. 3051 if (!is_near(L, bits)) return false;
3051 case Ugreater: 3052 offset = GetOffset(offset, L, bits);
3052 // rs > rt 3053 bgtzalc(rs, offset);
3053 sltu(scratch, r2, rs); 3054 } else {
3054 beq(scratch, zero_reg, 2); 3055 if (!is_near(L, bits)) return false;
3055 nop(); 3056 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3056 bal(offset); 3057 offset = GetOffset(offset, L, bits);
3057 break; 3058 bnezalc(scratch, offset);
3058 case Ugreater_equal: 3059 }
3059 // rs >= rt 3060 break;
3060 sltu(scratch, rs, r2); 3061 case greater_equal:
3061 bne(scratch, zero_reg, 2); 3062 // rs >= rt
3062 nop(); 3063 if (rs.code() == rt.rm_.reg_code) {
3063 bal(offset); 3064 bits = OffsetSize::kOffset26;
3064 break; 3065 if (!is_near(L, bits)) return false;
3065 case Uless: 3066 offset = GetOffset(offset, L, bits);
3066 // rs < r2 3067 balc(offset);
3067 sltu(scratch, rs, r2); 3068 } else if (rs.is(zero_reg)) {
3068 bne(scratch, zero_reg, 2); 3069 if (!is_near(L, bits)) return false;
3069 nop(); 3070 scratch = GetRtAsRegisterHelper(rt, scratch);
3070 bal(offset); 3071 offset = GetOffset(offset, L, bits);
3071 break; 3072 blezalc(scratch, offset);
3072 case Uless_equal: 3073 } else if (IsZero(rt)) {
3073 // rs <= r2 3074 if (!is_near(L, bits)) return false;
3074 sltu(scratch, r2, rs); 3075 offset = GetOffset(offset, L, bits);
3075 bne(scratch, zero_reg, 2); 3076 bgezalc(rs, offset);
3076 nop(); 3077 } else {
3077 bal(offset); 3078 if (!is_near(L, bits)) return false;
3078 break; 3079 Slt(scratch, rs, rt);
3079 default: 3080 offset = GetOffset(offset, L, bits);
3080 UNREACHABLE(); 3081 beqzalc(scratch, offset);
3081 } 3082 }
3082 } 3083 break;
3084 case less:
3085 // rs < rt
3086 if (rs.code() == rt.rm_.reg_code) {
3087 break; // No code needs to be emitted.
3088 } else if (rs.is(zero_reg)) {
3089 if (!is_near(L, bits)) return false;
3090 scratch = GetRtAsRegisterHelper(rt, scratch);
3091 offset = GetOffset(offset, L, bits);
3092 bgtzalc(scratch, offset);
3093 } else if (IsZero(rt)) {
3094 if (!is_near(L, bits)) return false;
3095 offset = GetOffset(offset, L, bits);
3096 bltzalc(rs, offset);
3097 } else {
3098 if (!is_near(L, bits)) return false;
3099 Slt(scratch, rs, rt);
3100 offset = GetOffset(offset, L, bits);
3101 bnezalc(scratch, offset);
3102 }
3103 break;
3104 case less_equal:
3105 // rs <= r2
3106 if (rs.code() == rt.rm_.reg_code) {
3107 bits = OffsetSize::kOffset26;
3108 if (!is_near(L, bits)) return false;
3109 offset = GetOffset(offset, L, bits);
3110 balc(offset);
3111 } else if (rs.is(zero_reg)) {
3112 if (!is_near(L, bits)) return false;
3113 scratch = GetRtAsRegisterHelper(rt, scratch);
3114 offset = GetOffset(offset, L, bits);
3115 bgezalc(scratch, offset);
3116 } else if (IsZero(rt)) {
3117 if (!is_near(L, bits)) return false;
3118 offset = GetOffset(offset, L, bits);
3119 blezalc(rs, offset);
3120 } else {
3121 if (!is_near(L, bits)) return false;
3122 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3123 offset = GetOffset(offset, L, bits);
3124 beqzalc(scratch, offset);
3125 }
3126 break;
3127
3128
3129 // Unsigned comparison.
3130 case Ugreater:
3131 // rs > r2
3132 if (!is_near(L, bits)) return false;
3133 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3134 offset = GetOffset(offset, L, bits);
3135 bnezalc(scratch, offset);
3136 break;
3137 case Ugreater_equal:
3138 // rs >= r2
3139 if (!is_near(L, bits)) return false;
3140 Sltu(scratch, rs, rt);
3141 offset = GetOffset(offset, L, bits);
3142 beqzalc(scratch, offset);
3143 break;
3144 case Uless:
3145 // rs < r2
3146 if (!is_near(L, bits)) return false;
3147 Sltu(scratch, rs, rt);
3148 offset = GetOffset(offset, L, bits);
3149 bnezalc(scratch, offset);
3150 break;
3151 case Uless_equal:
3152 // rs <= r2
3153 if (!is_near(L, bits)) return false;
3154 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3155 offset = GetOffset(offset, L, bits);
3156 beqzalc(scratch, offset);
3157 break;
3158 default:
3159 UNREACHABLE();
3160 }
3161 return true;
3162 }
3163
3164
3165 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3166 // with the slt instructions. We could use sub or add instead but we would miss
3167 // overflow cases, so we keep slt and add an intermediate third instruction.
3168 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3169 Condition cond, Register rs,
3170 const Operand& rt,
3171 BranchDelaySlot bdslot) {
3172 DCHECK(L == nullptr || offset == 0);
3173 if (!is_near(L, OffsetSize::kOffset16)) return false;
3174
3175 Register scratch = t8;
3176 BlockTrampolinePoolScope block_trampoline_pool(this);
3177
3178 switch (cond) {
3179 case cc_always:
3180 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3181 bal(offset);
3182 break;
3183 case eq:
3184 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3185 nop();
3186 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3187 bal(offset);
3188 break;
3189 case ne:
3190 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3191 nop();
3192 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3193 bal(offset);
3194 break;
3195
3196 // Signed comparison.
3197 case greater:
3198 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3199 addiu(scratch, scratch, -1);
3200 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3201 bgezal(scratch, offset);
3202 break;
3203 case greater_equal:
3204 Slt(scratch, rs, rt);
3205 addiu(scratch, scratch, -1);
3206 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3207 bltzal(scratch, offset);
3208 break;
3209 case less:
3210 Slt(scratch, rs, rt);
3211 addiu(scratch, scratch, -1);
3212 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3213 bgezal(scratch, offset);
3214 break;
3215 case less_equal:
3216 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3217 addiu(scratch, scratch, -1);
3218 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3219 bltzal(scratch, offset);
3220 break;
3221
3222 // Unsigned comparison.
3223 case Ugreater:
3224 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3225 addiu(scratch, scratch, -1);
3226 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3227 bgezal(scratch, offset);
3228 break;
3229 case Ugreater_equal:
3230 Sltu(scratch, rs, rt);
3231 addiu(scratch, scratch, -1);
3232 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3233 bltzal(scratch, offset);
3234 break;
3235 case Uless:
3236 Sltu(scratch, rs, rt);
3237 addiu(scratch, scratch, -1);
3238 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3239 bgezal(scratch, offset);
3240 break;
3241 case Uless_equal:
3242 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3243 addiu(scratch, scratch, -1);
3244 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3245 bltzal(scratch, offset);
3246 break;
3247
3248 default:
3249 UNREACHABLE();
3250 }
3251
3083 // Emit a nop in the branch delay slot if required. 3252 // Emit a nop in the branch delay slot if required.
3084 if (bdslot == PROTECT) 3253 if (bdslot == PROTECT)
3085 nop(); 3254 nop();
3086 } 3255
3087 3256 return true;
3088 3257 }
3089 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { 3258
3090 bal(shifted_branch_offset(L, false)); 3259
3091 3260 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3092 // Emit a nop in the branch delay slot if required. 3261 Condition cond, Register rs,
3093 if (bdslot == PROTECT) 3262 const Operand& rt,
3094 nop(); 3263 BranchDelaySlot bdslot) {
3095 }
3096
3097
3098 void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
3099 const Operand& rt,
3100 BranchDelaySlot bdslot) {
3101 BRANCH_ARGS_CHECK(cond, rs, rt); 3264 BRANCH_ARGS_CHECK(cond, rs, rt);
3102 3265
3103 int32_t offset = 0; 3266 if (!L) {
3104 Register r2 = no_reg; 3267 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3105 Register scratch = at; 3268 DCHECK(is_int26(offset));
3106 if (rt.is_reg()) { 3269 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3107 r2 = rt.rm_; 3270 } else {
3108 } else if (cond != cc_always) { 3271 DCHECK(is_int16(offset));
3109 r2 = scratch; 3272 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3110 li(r2, rt);
3111 }
3112
3113 {
3114 BlockTrampolinePoolScope block_trampoline_pool(this);
3115 switch (cond) {
3116 case cc_always:
3117 offset = shifted_branch_offset(L, false);
3118 bal(offset);
3119 break;
3120 case eq:
3121 bne(rs, r2, 2);
3122 nop();
3123 offset = shifted_branch_offset(L, false);
3124 bal(offset);
3125 break;
3126 case ne:
3127 beq(rs, r2, 2);
3128 nop();
3129 offset = shifted_branch_offset(L, false);
3130 bal(offset);
3131 break;
3132
3133 // Signed comparison.
3134 case greater:
3135 // rs > rt
3136 slt(scratch, r2, rs);
3137 beq(scratch, zero_reg, 2);
3138 nop();
3139 offset = shifted_branch_offset(L, false);
3140 bal(offset);
3141 break;
3142 case greater_equal:
3143 // rs >= rt
3144 slt(scratch, rs, r2);
3145 bne(scratch, zero_reg, 2);
3146 nop();
3147 offset = shifted_branch_offset(L, false);
3148 bal(offset);
3149 break;
3150 case less:
3151 // rs < r2
3152 slt(scratch, rs, r2);
3153 bne(scratch, zero_reg, 2);
3154 nop();
3155 offset = shifted_branch_offset(L, false);
3156 bal(offset);
3157 break;
3158 case less_equal:
3159 // rs <= r2
3160 slt(scratch, r2, rs);
3161 bne(scratch, zero_reg, 2);
3162 nop();
3163 offset = shifted_branch_offset(L, false);
3164 bal(offset);
3165 break;
3166
3167
3168 // Unsigned comparison.
3169 case Ugreater:
3170 // rs > rt
3171 sltu(scratch, r2, rs);
3172 beq(scratch, zero_reg, 2);
3173 nop();
3174 offset = shifted_branch_offset(L, false);
3175 bal(offset);
3176 break;
3177 case Ugreater_equal:
3178 // rs >= rt
3179 sltu(scratch, rs, r2);
3180 bne(scratch, zero_reg, 2);
3181 nop();
3182 offset = shifted_branch_offset(L, false);
3183 bal(offset);
3184 break;
3185 case Uless:
3186 // rs < r2
3187 sltu(scratch, rs, r2);
3188 bne(scratch, zero_reg, 2);
3189 nop();
3190 offset = shifted_branch_offset(L, false);
3191 bal(offset);
3192 break;
3193 case Uless_equal:
3194 // rs <= r2
3195 sltu(scratch, r2, rs);
3196 bne(scratch, zero_reg, 2);
3197 nop();
3198 offset = shifted_branch_offset(L, false);
3199 bal(offset);
3200 break;
3201
3202 default:
3203 UNREACHABLE();
3204 } 3273 }
3205 } 3274 } else {
3206 // Check that offset could actually hold on an int16_t. 3275 DCHECK(offset == 0);
3207 DCHECK(is_int16(offset)); 3276 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3208 3277 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3209 // Emit a nop in the branch delay slot if required. 3278 } else {
3210 if (bdslot == PROTECT) 3279 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3211 nop(); 3280 }
3212 } 3281 }
3213 3282 return false;
3214 3283 }
3284
3285
3215 void MacroAssembler::Jump(Register target, 3286 void MacroAssembler::Jump(Register target,
3216 Condition cond, 3287 Condition cond,
3217 Register rs, 3288 Register rs,
3218 const Operand& rt, 3289 const Operand& rt,
3219 BranchDelaySlot bd) { 3290 BranchDelaySlot bd) {
3220 BlockTrampolinePoolScope block_trampoline_pool(this); 3291 BlockTrampolinePoolScope block_trampoline_pool(this);
3221 if (cond == cc_always) { 3292 if (cond == cc_always) {
3222 jr(target); 3293 jr(target);
3223 } else { 3294 } else {
3224 BRANCH_ARGS_CHECK(cond, rs, rt); 3295 BRANCH_ARGS_CHECK(cond, rs, rt);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
3291 return size * kInstrSize; 3362 return size * kInstrSize;
3292 } 3363 }
3293 3364
3294 3365
3295 // Note: To call gcc-compiled C code on mips, you must call thru t9. 3366 // Note: To call gcc-compiled C code on mips, you must call thru t9.
3296 void MacroAssembler::Call(Register target, 3367 void MacroAssembler::Call(Register target,
3297 Condition cond, 3368 Condition cond,
3298 Register rs, 3369 Register rs,
3299 const Operand& rt, 3370 const Operand& rt,
3300 BranchDelaySlot bd) { 3371 BranchDelaySlot bd) {
3372 #ifdef DEBUG
3373 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3374 #endif
3375
3301 BlockTrampolinePoolScope block_trampoline_pool(this); 3376 BlockTrampolinePoolScope block_trampoline_pool(this);
3302 Label start; 3377 Label start;
3303 bind(&start); 3378 bind(&start);
3304 if (cond == cc_always) { 3379 if (cond == cc_always) {
3305 jalr(target); 3380 jalr(target);
3306 } else { 3381 } else {
3307 BRANCH_ARGS_CHECK(cond, rs, rt); 3382 BRANCH_ARGS_CHECK(cond, rs, rt);
3308 Branch(2, NegateCondition(cond), rs, rt); 3383 Branch(2, NegateCondition(cond), rs, rt);
3309 jalr(target); 3384 jalr(target);
3310 } 3385 }
3311 // Emit a nop in the branch delay slot if required. 3386 // Emit a nop in the branch delay slot if required.
3312 if (bd == PROTECT) 3387 if (bd == PROTECT)
3313 nop(); 3388 nop();
3314 3389
3315 DCHECK_EQ(CallSize(target, cond, rs, rt, bd), 3390 #ifdef DEBUG
3316 SizeOfCodeGeneratedSince(&start)); 3391 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3392 SizeOfCodeGeneratedSince(&start));
3393 #endif
3317 } 3394 }
3318 3395
3319 3396
3320 int MacroAssembler::CallSize(Address target, 3397 int MacroAssembler::CallSize(Address target,
3321 RelocInfo::Mode rmode, 3398 RelocInfo::Mode rmode,
3322 Condition cond, 3399 Condition cond,
3323 Register rs, 3400 Register rs,
3324 const Operand& rt, 3401 const Operand& rt,
3325 BranchDelaySlot bd) { 3402 BranchDelaySlot bd) {
3326 int size = CallSize(t9, cond, rs, rt, bd); 3403 int size = CallSize(t9, cond, rs, rt, bd);
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
3384 3461
3385 3462
3386 void MacroAssembler::Ret(Condition cond, 3463 void MacroAssembler::Ret(Condition cond,
3387 Register rs, 3464 Register rs,
3388 const Operand& rt, 3465 const Operand& rt,
3389 BranchDelaySlot bd) { 3466 BranchDelaySlot bd) {
3390 Jump(ra, cond, rs, rt, bd); 3467 Jump(ra, cond, rs, rt, bd);
3391 } 3468 }
3392 3469
3393 3470
3394 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { 3471 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3395 BlockTrampolinePoolScope block_trampoline_pool(this); 3472 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3396 { 3473 (!L->is_bound() || is_near_r6(L))) {
3397 BlockGrowBufferScope block_buf_growth(this); 3474 BranchShortHelperR6(0, L);
3398 // Buffer growth (and relocation) must be blocked for internal references 3475 } else {
3399 // until associated instructions are emitted and available to be patched. 3476 EmitForbiddenSlotInstruction();
3400 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); 3477 BlockTrampolinePoolScope block_trampoline_pool(this);
3401 j(L); 3478 {
3479 BlockGrowBufferScope block_buf_growth(this);
3480 // Buffer growth (and relocation) must be blocked for internal references
3481 // until associated instructions are emitted and available to be patched.
3482 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3483 j(L);
3484 }
3485 // Emit a nop in the branch delay slot if required.
3486 if (bdslot == PROTECT) nop();
3402 } 3487 }
3403 // Emit a nop in the branch delay slot if required.
3404 if (bdslot == PROTECT) nop();
3405 } 3488 }
3406 3489
3407 3490
3408 void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) { 3491 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3409 BlockTrampolinePoolScope block_trampoline_pool(this); 3492 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3410 { 3493 (!L->is_bound() || is_near_r6(L))) {
3411 BlockGrowBufferScope block_buf_growth(this); 3494 BranchAndLinkShortHelperR6(0, L);
3412 // Buffer growth (and relocation) must be blocked for internal references 3495 } else {
3413 // until associated instructions are emitted and available to be patched. 3496 EmitForbiddenSlotInstruction();
3414 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); 3497 BlockTrampolinePoolScope block_trampoline_pool(this);
3415 jal(L); 3498 {
3499 BlockGrowBufferScope block_buf_growth(this);
3500 // Buffer growth (and relocation) must be blocked for internal references
3501 // until associated instructions are emitted and available to be patched.
3502 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3503 jal(L);
3504 }
3505 // Emit a nop in the branch delay slot if required.
3506 if (bdslot == PROTECT) nop();
3416 } 3507 }
3417 // Emit a nop in the branch delay slot if required.
3418 if (bdslot == PROTECT) nop();
3419 } 3508 }
3420 3509
3421 3510
3422 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) { 3511 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
3423 BlockTrampolinePoolScope block_trampoline_pool(this); 3512 BlockTrampolinePoolScope block_trampoline_pool(this);
3424 3513
3425 uint64_t imm64; 3514 uint64_t imm64;
3426 imm64 = jump_address(L); 3515 imm64 = jump_address(L);
3427 { BlockGrowBufferScope block_buf_growth(this); 3516 { BlockGrowBufferScope block_buf_growth(this);
3428 // Buffer growth (and relocation) must be blocked for internal references 3517 // Buffer growth (and relocation) must be blocked for internal references
(...skipping 2915 matching lines...) Expand 10 before | Expand all | Expand 10 after
6344 void CodePatcher::Emit(Instr instr) { 6433 void CodePatcher::Emit(Instr instr) {
6345 masm()->emit(instr); 6434 masm()->emit(instr);
6346 } 6435 }
6347 6436
6348 6437
6349 void CodePatcher::Emit(Address addr) { 6438 void CodePatcher::Emit(Address addr) {
6350 // masm()->emit(reinterpret_cast<Instr>(addr)); 6439 // masm()->emit(reinterpret_cast<Instr>(addr));
6351 } 6440 }
6352 6441
6353 6442
6354 void CodePatcher::ChangeBranchCondition(Condition cond) { 6443 void CodePatcher::ChangeBranchCondition(Instr current_instr,
6355 Instr instr = Assembler::instr_at(masm_.pc_); 6444 uint32_t new_opcode) {
6356 DCHECK(Assembler::IsBranch(instr)); 6445 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6357 uint32_t opcode = Assembler::GetOpcodeField(instr); 6446 masm_.emit(current_instr);
6358 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6359 // branch instructions (with opcode being the branch type).
6360 // There are some special cases (see Assembler::IsBranch()) so extending this
6361 // would be tricky.
6362 DCHECK(opcode == BEQ ||
6363 opcode == BNE ||
6364 opcode == BLEZ ||
6365 opcode == BGTZ ||
6366 opcode == BEQL ||
6367 opcode == BNEL ||
6368 opcode == BLEZL ||
6369 opcode == BGTZL);
6370 opcode = (cond == eq) ? BEQ : BNE;
6371 instr = (instr & ~kOpcodeMask) | opcode;
6372 masm_.emit(instr);
6373 } 6447 }
6374 6448
6375 6449
6376 void MacroAssembler::TruncatingDiv(Register result, 6450 void MacroAssembler::TruncatingDiv(Register result,
6377 Register dividend, 6451 Register dividend,
6378 int32_t divisor) { 6452 int32_t divisor) {
6379 DCHECK(!dividend.is(result)); 6453 DCHECK(!dividend.is(result));
6380 DCHECK(!dividend.is(at)); 6454 DCHECK(!dividend.is(at));
6381 DCHECK(!result.is(at)); 6455 DCHECK(!result.is(at));
6382 base::MagicNumbersForDivision<uint32_t> mag = 6456 base::MagicNumbersForDivision<uint32_t> mag =
(...skipping 10 matching lines...) Expand all
6393 if (mag.shift > 0) sra(result, result, mag.shift); 6467 if (mag.shift > 0) sra(result, result, mag.shift);
6394 srl(at, dividend, 31); 6468 srl(at, dividend, 31);
6395 Addu(result, result, Operand(at)); 6469 Addu(result, result, Operand(at));
6396 } 6470 }
6397 6471
6398 6472
6399 } // namespace internal 6473 } // namespace internal
6400 } // namespace v8 6474 } // namespace v8
6401 6475
6402 #endif // V8_TARGET_ARCH_MIPS64 6476 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698