OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // | 2 // |
3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
5 // met: | 5 // met: |
6 // | 6 // |
7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
(...skipping 2086 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2097 | 2097 |
2098 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; | 2098 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; |
2099 } | 2099 } |
2100 | 2100 |
2101 | 2101 |
2102 // Code generation helpers. | 2102 // Code generation helpers. |
2103 void Assembler::MoveWide(const Register& rd, | 2103 void Assembler::MoveWide(const Register& rd, |
2104 uint64_t imm, | 2104 uint64_t imm, |
2105 int shift, | 2105 int shift, |
2106 MoveWideImmediateOp mov_op) { | 2106 MoveWideImmediateOp mov_op) { |
| 2107 // Ignore the top 32 bits of an immediate if we're moving to a W register. |
| 2108 if (rd.Is32Bits()) { |
| 2109 // Check that the top 32 bits are zero (a positive 32-bit number) or top |
| 2110 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). |
| 2111 ASSERT(((imm >> kWRegSizeInBits) == 0) || |
| 2112 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); |
| 2113 imm &= kWRegMask; |
| 2114 } |
| 2115 |
2107 if (shift >= 0) { | 2116 if (shift >= 0) { |
2108 // Explicit shift specified. | 2117 // Explicit shift specified. |
2109 ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); | 2118 ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); |
2110 ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); | 2119 ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); |
2111 shift /= 16; | 2120 shift /= 16; |
2112 } else { | 2121 } else { |
2113 // Calculate a new immediate and shift combination to encode the immediate | 2122 // Calculate a new immediate and shift combination to encode the immediate |
2114 // argument. | 2123 // argument. |
2115 shift = 0; | 2124 shift = 0; |
2116 if ((imm & ~0xffffUL) == 0) { | 2125 if ((imm & ~0xffffUL) == 0) { |
(...skipping 385 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2502 // If it can not be encoded, the function returns false, and the values pointed | 2511 // If it can not be encoded, the function returns false, and the values pointed |
2503 // to by n, imm_s and imm_r are undefined. | 2512 // to by n, imm_s and imm_r are undefined. |
2504 bool Assembler::IsImmLogical(uint64_t value, | 2513 bool Assembler::IsImmLogical(uint64_t value, |
2505 unsigned width, | 2514 unsigned width, |
2506 unsigned* n, | 2515 unsigned* n, |
2507 unsigned* imm_s, | 2516 unsigned* imm_s, |
2508 unsigned* imm_r) { | 2517 unsigned* imm_r) { |
2509 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); | 2518 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); |
2510 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); | 2519 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); |
2511 | 2520 |
| 2521 bool negate = false; |
| 2522 |
2512 // Logical immediates are encoded using parameters n, imm_s and imm_r using | 2523 // Logical immediates are encoded using parameters n, imm_s and imm_r using |
2513 // the following table: | 2524 // the following table: |
2514 // | 2525 // |
2515 // N imms immr size S R | 2526 // N imms immr size S R |
2516 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) | 2527 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) |
2517 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) | 2528 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) |
2518 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) | 2529 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) |
2519 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) | 2530 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) |
2520 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) | 2531 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) |
2521 // 0 11110s xxxxxr 2 UInt(s) UInt(r) | 2532 // 0 11110s xxxxxr 2 UInt(s) UInt(r) |
2522 // (s bits must not be all set) | 2533 // (s bits must not be all set) |
2523 // | 2534 // |
2524 // A pattern is constructed of size bits, where the least significant S+1 | 2535 // A pattern is constructed of size bits, where the least significant S+1 bits |
2525 // bits are set. The pattern is rotated right by R, and repeated across a | 2536 // are set. The pattern is rotated right by R, and repeated across a 32 or |
2526 // 32 or 64-bit value, depending on destination register width. | 2537 // 64-bit value, depending on destination register width. |
2527 // | 2538 // |
2528 // To test if an arbitary immediate can be encoded using this scheme, an | 2539 // Put another way: the basic format of a logical immediate is a single |
2529 // iterative algorithm is used. | 2540 // contiguous stretch of 1 bits, repeated across the whole word at intervals |
| 2541 // given by a power of 2. To identify them quickly, we first locate the |
| 2542 // lowest stretch of 1 bits, then the next 1 bit above that; that combination |
| 2543 // is different for every logical immediate, so it gives us all the |
| 2544 // information we need to identify the only logical immediate that our input |
| 2545 // could be, and then we simply check if that's the value we actually have. |
2530 // | 2546 // |
2531 // TODO(mcapewel) This code does not consider using X/W register overlap to | 2547 // (The rotation parameter does give the possibility of the stretch of 1 bits |
2532 // support 64-bit immediates where the top 32-bits are zero, and the bottom | 2548 // going 'round the end' of the word. To deal with that, we observe that in |
2533 // 32-bits are an encodable logical immediate. | 2549 // any situation where that happens the bitwise NOT of the value is also a |
| 2550 // valid logical immediate. So we simply invert the input whenever its low bit |
| 2551 // is set, and then we know that the rotated case can't arise.) |
2534 | 2552 |
2535 // 1. If the value has all set or all clear bits, it can't be encoded. | 2553 if (value & 1) { |
2536 if ((value == 0) || (value == 0xffffffffffffffffUL) || | 2554 // If the low bit is 1, negate the value, and set a flag to remember that we |
2537 ((width == kWRegSizeInBits) && (value == 0xffffffff))) { | 2555 // did (so that we can adjust the return values appropriately). |
| 2556 negate = true; |
| 2557 value = ~value; |
| 2558 } |
| 2559 |
| 2560 if (width == kWRegSizeInBits) { |
| 2561 // To handle 32-bit logical immediates, the very easiest thing is to repeat |
| 2562 // the input value twice to make a 64-bit word. The correct encoding of that |
| 2563 // as a logical immediate will also be the correct encoding of the 32-bit |
| 2564 // value. |
| 2565 |
| 2566 // The most-significant 32 bits may not be zero (ie. negate is true) so |
| 2567 // shift the value left before duplicating it. |
| 2568 value <<= kWRegSizeInBits; |
| 2569 value |= value >> kWRegSizeInBits; |
| 2570 } |
| 2571 |
| 2572 // The basic analysis idea: imagine our input word looks like this. |
| 2573 // |
| 2574 // 0011111000111110001111100011111000111110001111100011111000111110 |
| 2575 // c b a |
| 2576 // |<--d-->| |
| 2577 // |
| 2578 // We find the lowest set bit (as an actual power-of-2 value, not its index) |
| 2579 // and call it a. Then we add a to our original number, which wipes out the |
| 2580 // bottommost stretch of set bits and replaces it with a 1 carried into the |
| 2581 // next zero bit. Then we look for the new lowest set bit, which is in |
| 2582 // position b, and subtract it, so now our number is just like the original |
| 2583 // but with the lowest stretch of set bits completely gone. Now we find the |
| 2584 // lowest set bit again, which is position c in the diagram above. Then we'll |
| 2585 // measure the distance d between bit positions a and c (using CLZ), and that |
| 2586 // tells us that the only valid logical immediate that could possibly be equal |
| 2587 // to this number is the one in which a stretch of bits running from a to just |
| 2588 // below b is replicated every d bits. |
| 2589 uint64_t a = LargestPowerOf2Divisor(value); |
| 2590 uint64_t value_plus_a = value + a; |
| 2591 uint64_t b = LargestPowerOf2Divisor(value_plus_a); |
| 2592 uint64_t value_plus_a_minus_b = value_plus_a - b; |
| 2593 uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); |
| 2594 |
| 2595 int d, clz_a, out_n; |
| 2596 uint64_t mask; |
| 2597 |
| 2598 if (c != 0) { |
| 2599 // The general case, in which there is more than one stretch of set bits. |
| 2600 // Compute the repeat distance d, and set up a bitmask covering the basic |
| 2601 // unit of repetition (i.e. a word with the bottom d bits set). Also, in all |
| 2602 // of these cases the N bit of the output will be zero. |
| 2603 clz_a = CountLeadingZeros(a, kXRegSizeInBits); |
| 2604 int clz_c = CountLeadingZeros(c, kXRegSizeInBits); |
| 2605 d = clz_a - clz_c; |
| 2606 mask = ((V8_UINT64_C(1) << d) - 1); |
| 2607 out_n = 0; |
| 2608 } else { |
| 2609 // Handle degenerate cases. |
| 2610 // |
| 2611 // If any of those 'find lowest set bit' operations didn't find a set bit at |
| 2612 // all, then the word will have been zero thereafter, so in particular the |
| 2613 // last lowest_set_bit operation will have returned zero. So we can test for |
| 2614 // all the special case conditions in one go by seeing if c is zero. |
| 2615 if (a == 0) { |
| 2616 // The input was zero (or all 1 bits, which will come to here too after we |
| 2617 // inverted it at the start of the function), for which we just return |
| 2618 // false. |
| 2619 return false; |
| 2620 } else { |
| 2621 // Otherwise, if c was zero but a was not, then there's just one stretch |
| 2622 // of set bits in our word, meaning that we have the trivial case of |
| 2623 // d == 64 and only one 'repetition'. Set up all the same variables as in |
| 2624 // the general case above, and set the N bit in the output. |
| 2625 clz_a = CountLeadingZeros(a, kXRegSizeInBits); |
| 2626 d = 64; |
| 2627 mask = ~V8_UINT64_C(0); |
| 2628 out_n = 1; |
| 2629 } |
| 2630 } |
| 2631 |
| 2632 // If the repeat period d is not a power of two, it can't be encoded. |
| 2633 if (!IS_POWER_OF_TWO(d)) { |
2538 return false; | 2634 return false; |
2539 } | 2635 } |
2540 | 2636 |
2541 unsigned lead_zero = CountLeadingZeros(value, width); | 2637 if (((b - a) & ~mask) != 0) { |
2542 unsigned lead_one = CountLeadingZeros(~value, width); | 2638 // If the bit stretch (b - a) does not fit within the mask derived from the |
2543 unsigned trail_zero = CountTrailingZeros(value, width); | 2639 // repeat period, then fail. |
2544 unsigned trail_one = CountTrailingZeros(~value, width); | |
2545 unsigned set_bits = CountSetBits(value, width); | |
2546 | |
2547 // The fixed bits in the immediate s field. | |
2548 // If width == 64 (X reg), start at 0xFFFFFF80. | |
2549 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit | |
2550 // widths won't be executed. | |
2551 int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64; | |
2552 int imm_s_mask = 0x3F; | |
2553 | |
2554 for (;;) { | |
2555 // 2. If the value is two bits wide, it can be encoded. | |
2556 if (width == 2) { | |
2557 *n = 0; | |
2558 *imm_s = 0x3C; | |
2559 *imm_r = (value & 3) - 1; | |
2560 return true; | |
2561 } | |
2562 | |
2563 *n = (width == 64) ? 1 : 0; | |
2564 *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask); | |
2565 if ((lead_zero + set_bits) == width) { | |
2566 *imm_r = 0; | |
2567 } else { | |
2568 *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one; | |
2569 } | |
2570 | |
2571 // 3. If the sum of leading zeros, trailing zeros and set bits is equal to | |
2572 // the bit width of the value, it can be encoded. | |
2573 if (lead_zero + trail_zero + set_bits == width) { | |
2574 return true; | |
2575 } | |
2576 | |
2577 // 4. If the sum of leading ones, trailing ones and unset bits in the | |
2578 // value is equal to the bit width of the value, it can be encoded. | |
2579 if (lead_one + trail_one + (width - set_bits) == width) { | |
2580 return true; | |
2581 } | |
2582 | |
2583 // 5. If the most-significant half of the bitwise value is equal to the | |
2584 // least-significant half, return to step 2 using the least-significant | |
2585 // half of the value. | |
2586 uint64_t mask = (1UL << (width >> 1)) - 1; | |
2587 if ((value & mask) == ((value >> (width >> 1)) & mask)) { | |
2588 width >>= 1; | |
2589 set_bits >>= 1; | |
2590 imm_s_fixed >>= 1; | |
2591 continue; | |
2592 } | |
2593 | |
2594 // 6. Otherwise, the value can't be encoded. | |
2595 return false; | 2640 return false; |
2596 } | 2641 } |
| 2642 |
| 2643 // The only possible option is b - a repeated every d bits. Now we're going to |
| 2644 // actually construct the valid logical immediate derived from that |
| 2645 // specification, and see if it equals our original input. |
| 2646 // |
| 2647 // To repeat a value every d bits, we multiply it by a number of the form |
| 2648 // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can |
| 2649 // be derived using a table lookup on CLZ(d). |
| 2650 static const uint64_t multipliers[] = { |
| 2651 0x0000000000000001UL, |
| 2652 0x0000000100000001UL, |
| 2653 0x0001000100010001UL, |
| 2654 0x0101010101010101UL, |
| 2655 0x1111111111111111UL, |
| 2656 0x5555555555555555UL, |
| 2657 }; |
| 2658 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; |
| 2659 // Ensure that the index to the multipliers array is within bounds. |
| 2660 ASSERT((multiplier_idx >= 0) && |
| 2661 (static_cast<size_t>(multiplier_idx) < |
| 2662 (sizeof(multipliers) / sizeof(multipliers[0])))); |
| 2663 uint64_t multiplier = multipliers[multiplier_idx]; |
| 2664 uint64_t candidate = (b - a) * multiplier; |
| 2665 |
| 2666 if (value != candidate) { |
| 2667 // The candidate pattern doesn't match our input value, so fail. |
| 2668 return false; |
| 2669 } |
| 2670 |
| 2671 // We have a match! This is a valid logical immediate, so now we have to |
| 2672 // construct the bits and pieces of the instruction encoding that generates |
| 2673 // it. |
| 2674 |
| 2675 // Count the set bits in our basic stretch. The special case of clz(0) == -1 |
| 2676 // makes the answer come out right for stretches that reach the very top of |
| 2677 // the word (e.g. numbers like 0xffffc00000000000). |
| 2678 int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits); |
| 2679 int s = clz_a - clz_b; |
| 2680 |
| 2681 // Decide how many bits to rotate right by, to put the low bit of that basic |
| 2682 // stretch in position a. |
| 2683 int r; |
| 2684 if (negate) { |
| 2685 // If we inverted the input right at the start of this function, here's |
| 2686 // where we compensate: the number of set bits becomes the number of clear |
| 2687 // bits, and the rotation count is based on position b rather than position |
| 2688 // a (since b is the location of the 'lowest' 1 bit after inversion). |
| 2689 s = d - s; |
| 2690 r = (clz_b + 1) & (d - 1); |
| 2691 } else { |
| 2692 r = (clz_a + 1) & (d - 1); |
| 2693 } |
| 2694 |
| 2695 // Now we're done, except for having to encode the S output in such a way that |
| 2696 // it gives both the number of set bits and the length of the repeated |
| 2697 // segment. The s field is encoded like this: |
| 2698 // |
| 2699 // imms size S |
| 2700 // ssssss 64 UInt(ssssss) |
| 2701 // 0sssss 32 UInt(sssss) |
| 2702 // 10ssss 16 UInt(ssss) |
| 2703 // 110sss 8 UInt(sss) |
| 2704 // 1110ss 4 UInt(ss) |
| 2705 // 11110s 2 UInt(s) |
| 2706 // |
| 2707 // So we 'or' (-d << 1) with our computed s to form imms. |
| 2708 *n = out_n; |
| 2709 *imm_s = ((-d << 1) | (s - 1)) & 0x3f; |
| 2710 *imm_r = r; |
| 2711 |
| 2712 return true; |
2597 } | 2713 } |
2598 | 2714 |
2599 | 2715 |
2600 bool Assembler::IsImmConditionalCompare(int64_t immediate) { | 2716 bool Assembler::IsImmConditionalCompare(int64_t immediate) { |
2601 return is_uint5(immediate); | 2717 return is_uint5(immediate); |
2602 } | 2718 } |
2603 | 2719 |
2604 | 2720 |
2605 bool Assembler::IsImmFP32(float imm) { | 2721 bool Assembler::IsImmFP32(float imm) { |
2606 // Valid values will have the form: | 2722 // Valid values will have the form: |
(...skipping 431 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3038 adr(rd, 0); | 3154 adr(rd, 0); |
3039 MovInt64(scratch, target_offset); | 3155 MovInt64(scratch, target_offset); |
3040 add(rd, rd, scratch); | 3156 add(rd, rd, scratch); |
3041 } | 3157 } |
3042 } | 3158 } |
3043 | 3159 |
3044 | 3160 |
3045 } } // namespace v8::internal | 3161 } } // namespace v8::internal |
3046 | 3162 |
3047 #endif // V8_TARGET_ARCH_ARM64 | 3163 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |