OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
6 | 6 |
7 #include "src/arm64/utils-arm64.h" | 7 #include "src/arm64/utils-arm64.h" |
8 | 8 |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
11 namespace internal { | 11 namespace internal { |
12 | 12 |
13 #define __ assm-> | 13 #define __ assm-> |
14 | 14 |
| 15 uint32_t float_sign(float val) { |
| 16 uint32_t bits = bit_cast<uint32_t>(val); |
| 17 return unsigned_bitextract_32(31, 31, bits); |
| 18 } |
| 19 |
| 20 uint32_t float_exp(float val) { |
| 21 uint32_t bits = bit_cast<uint32_t>(val); |
| 22 return unsigned_bitextract_32(30, 23, bits); |
| 23 } |
| 24 |
| 25 uint32_t float_mantissa(float val) { |
| 26 uint32_t bits = bit_cast<uint32_t>(val); |
| 27 return unsigned_bitextract_32(22, 0, bits); |
| 28 } |
| 29 |
| 30 uint32_t double_sign(double val) { |
| 31 uint64_t bits = bit_cast<uint64_t>(val); |
| 32 return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, bits)); |
| 33 } |
| 34 |
| 35 uint32_t double_exp(double val) { |
| 36 uint64_t bits = bit_cast<uint64_t>(val); |
| 37 return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, bits)); |
| 38 } |
| 39 |
| 40 uint64_t double_mantissa(double val) { |
| 41 uint64_t bits = bit_cast<uint64_t>(val); |
| 42 return unsigned_bitextract_64(51, 0, bits); |
| 43 } |
| 44 |
| 45 float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) { |
| 46 uint32_t bits = sign << kFloatExponentBits | exp; |
| 47 return bit_cast<float>((bits << kFloatMantissaBits) | mantissa); |
| 48 } |
| 49 |
| 50 double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) { |
| 51 uint64_t bits = sign << kDoubleExponentBits | exp; |
| 52 return bit_cast<double>((bits << kDoubleMantissaBits) | mantissa); |
| 53 } |
| 54 |
| 55 int float16classify(float16 value) { |
| 56 const uint16_t exponent_max = (1 << kFloat16ExponentBits) - 1; |
| 57 const uint16_t exponent_mask = exponent_max << kFloat16MantissaBits; |
| 58 const uint16_t mantissa_mask = (1 << kFloat16MantissaBits) - 1; |
| 59 |
| 60 const uint16_t exponent = (value & exponent_mask) >> kFloat16MantissaBits; |
| 61 const uint16_t mantissa = value & mantissa_mask; |
| 62 if (exponent == 0) { |
| 63 if (mantissa == 0) { |
| 64 return FP_ZERO; |
| 65 } |
| 66 return FP_SUBNORMAL; |
| 67 } else if (exponent == exponent_max) { |
| 68 if (mantissa == 0) { |
| 69 return FP_INFINITE; |
| 70 } |
| 71 return FP_NAN; |
| 72 } |
| 73 return FP_NORMAL; |
| 74 } |
15 | 75 |
16 int CountLeadingZeros(uint64_t value, int width) { | 76 int CountLeadingZeros(uint64_t value, int width) { |
17 // TODO(jbramley): Optimize this for ARM64 hosts. | 77 DCHECK(base::bits::IsPowerOfTwo32(width) && (width <= 64)); |
18 DCHECK((width == 32) || (width == 64)); | 78 if (value == 0) { |
19 int count = 0; | 79 return width; |
20 uint64_t bit_test = 1UL << (width - 1); | |
21 while ((count < width) && ((bit_test & value) == 0)) { | |
22 count++; | |
23 bit_test >>= 1; | |
24 } | 80 } |
25 return count; | 81 return base::bits::CountLeadingZeros64(value << (64 - width)); |
26 } | 82 } |
27 | 83 |
28 | 84 |
29 int CountLeadingSignBits(int64_t value, int width) { | 85 int CountLeadingSignBits(int64_t value, int width) { |
30 // TODO(jbramley): Optimize this for ARM64 hosts. | 86 DCHECK(base::bits::IsPowerOfTwo32(width) && (width <= 64)); |
31 DCHECK((width == 32) || (width == 64)); | |
32 if (value >= 0) { | 87 if (value >= 0) { |
33 return CountLeadingZeros(value, width) - 1; | 88 return CountLeadingZeros(value, width) - 1; |
34 } else { | 89 } else { |
35 return CountLeadingZeros(~value, width) - 1; | 90 return CountLeadingZeros(~value, width) - 1; |
36 } | 91 } |
37 } | 92 } |
38 | 93 |
39 | 94 |
40 int CountTrailingZeros(uint64_t value, int width) { | 95 int CountTrailingZeros(uint64_t value, int width) { |
41 // TODO(jbramley): Optimize this for ARM64 hosts. | |
42 DCHECK((width == 32) || (width == 64)); | 96 DCHECK((width == 32) || (width == 64)); |
43 int count = 0; | 97 if (width == 64) { |
44 while ((count < width) && (((value >> count) & 1) == 0)) { | 98 return static_cast<int>(base::bits::CountTrailingZeros64(value)); |
45 count++; | |
46 } | 99 } |
47 return count; | 100 return static_cast<int>(base::bits::CountTrailingZeros32( |
| 101 static_cast<uint32_t>(value & 0xfffffffff))); |
48 } | 102 } |
49 | 103 |
50 | 104 |
51 int CountSetBits(uint64_t value, int width) { | 105 int CountSetBits(uint64_t value, int width) { |
52 // TODO(jbramley): Would it be useful to allow other widths? The | |
53 // implementation already supports them. | |
54 DCHECK((width == 32) || (width == 64)); | 106 DCHECK((width == 32) || (width == 64)); |
| 107 if (width == 64) { |
| 108 return static_cast<int>(base::bits::CountPopulation64(value)); |
| 109 } |
| 110 return static_cast<int>(base::bits::CountPopulation32( |
| 111 static_cast<uint32_t>(value & 0xfffffffff))); |
| 112 } |
55 | 113 |
56 // Mask out unused bits to ensure that they are not counted. | 114 int LowestSetBitPosition(uint64_t value) { |
57 value &= (0xffffffffffffffffUL >> (64-width)); | 115 DCHECK_NE(value, 0U); |
| 116 return CountTrailingZeros(value, 64) + 1; |
| 117 } |
58 | 118 |
59 // Add up the set bits. | 119 int HighestSetBitPosition(uint64_t value) { |
60 // The algorithm works by adding pairs of bit fields together iteratively, | 120 DCHECK_NE(value, 0U); |
61 // where the size of each bit field doubles each time. | 121 return 63 - CountLeadingZeros(value, 64); |
62 // An example for an 8-bit value: | |
63 // Bits: h g f e d c b a | |
64 // \ | \ | \ | \ | | |
65 // value = h+g f+e d+c b+a | |
66 // \ | \ | | |
67 // value = h+g+f+e d+c+b+a | |
68 // \ | | |
69 // value = h+g+f+e+d+c+b+a | |
70 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); | |
71 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); | |
72 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); | |
73 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); | |
74 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); | |
75 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); | |
76 | |
77 return static_cast<int>(value); | |
78 } | 122 } |
79 | 123 |
80 | 124 |
81 uint64_t LargestPowerOf2Divisor(uint64_t value) { | 125 uint64_t LargestPowerOf2Divisor(uint64_t value) { |
82 return value & -value; | 126 return value & -value; |
83 } | 127 } |
84 | 128 |
85 | 129 |
86 int MaskToBit(uint64_t mask) { | 130 int MaskToBit(uint64_t mask) { |
87 DCHECK(CountSetBits(mask, 64) == 1); | 131 DCHECK_EQ(CountSetBits(mask, 64), 1); |
88 return CountTrailingZeros(mask, 64); | 132 return CountTrailingZeros(mask, 64); |
89 } | 133 } |
90 | 134 |
91 | 135 |
92 } // namespace internal | 136 } // namespace internal |
93 } // namespace v8 | 137 } // namespace v8 |
94 | 138 |
95 #endif // V8_TARGET_ARCH_ARM64 | 139 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |