OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2014 | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "SkTextureCompressor.h" | |
9 #include "SkTextureCompression_opts.h" | |
10 | |
11 #include <arm_neon.h> | |
12 | |
13 // Converts indices in each of the four bits of the register from | |
14 // 0, 1, 2, 3, 4, 5, 6, 7 | |
15 // to | |
16 // 3, 2, 1, 0, 4, 5, 6, 7 | |
17 // | |
18 // A more detailed explanation can be found in SkTextureCompressor::convert_indi
ces | |
19 static inline uint8x16_t convert_indices(const uint8x16_t &x) { | |
20 static const int8x16_t kThree = { | |
21 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, | |
22 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, | |
23 }; | |
24 | |
25 static const int8x16_t kZero = { | |
26 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
27 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
28 }; | |
29 | |
30 // Take top three bits | |
31 int8x16_t sx = vreinterpretq_s8_u8(x); | |
32 | |
33 // Negate ... | |
34 sx = vnegq_s8(sx); | |
35 | |
36 // Add three... | |
37 sx = vaddq_s8(sx, kThree); | |
38 | |
39 // Generate negatives mask | |
40 const int8x16_t mask = vreinterpretq_s8_u8(vcltq_s8(sx, kZero)); | |
41 | |
42 // Absolute value | |
43 sx = vabsq_s8(sx); | |
44 | |
45 // Add three to the values that were negative... | |
46 return vreinterpretq_u8_s8(vaddq_s8(sx, vandq_s8(mask, kThree))); | |
47 } | |
48 | |
49 template<unsigned shift> | |
50 static inline uint64x2_t shift_swap(const uint64x2_t &x, const uint64x2_t &mask)
{ | |
51 uint64x2_t t = vandq_u64(mask, veorq_u64(x, vshrq_n_u64(x, shift))); | |
52 return veorq_u64(x, veorq_u64(t, vshlq_n_u64(t, shift))); | |
53 } | |
54 | |
55 static inline uint64x2_t pack_indices(const uint64x2_t &x) { | |
56 // x: 00 a e 00 b f 00 c g 00 d h 00 i m 00 j n 00 k o 00 l p | |
57 | |
58 static const uint64x2_t kMask1 = { 0x3FC0003FC00000ULL, 0x3FC0003FC00000ULL
}; | |
59 uint64x2_t ret = shift_swap<10>(x, kMask1); | |
60 | |
61 // x: b f 00 00 00 a e c g i m 00 00 00 d h j n 00 k o 00 l p | |
62 static const uint64x2_t kMask2 = { (0x3FULL << 52), (0x3FULL << 52) }; | |
63 static const uint64x2_t kMask3 = { (0x3FULL << 28), (0x3FULL << 28) }; | |
64 const uint64x2_t x1 = vandq_u64(vshlq_n_u64(ret, 52), kMask2); | |
65 const uint64x2_t x2 = vandq_u64(vshlq_n_u64(ret, 20), kMask3); | |
66 ret = vshrq_n_u64(vorrq_u64(ret, vorrq_u64(x1, x2)), 16); | |
67 | |
68 // x: 00 00 00 00 00 00 00 00 b f l p a e c g i m k o d h j n | |
69 | |
70 static const uint64x2_t kMask4 = { 0xFC0000ULL, 0xFC0000ULL }; | |
71 ret = shift_swap<6>(ret, kMask4); | |
72 | |
73 #if defined (SK_CPU_BENDIAN) | |
74 // x: 00 00 00 00 00 00 00 00 b f l p a e i m c g k o d h j n | |
75 | |
76 static const uint64x2_t kMask5 = { 0x3FULL, 0x3FULL }; | |
77 ret = shift_swap<36>(ret, kMask5); | |
78 | |
79 // x: 00 00 00 00 00 00 00 00 b f j n a e i m c g k o d h l p | |
80 | |
81 static const uint64x2_t kMask6 = { 0xFFF000000ULL, 0xFFF000000ULL }; | |
82 ret = shift_swap<12>(ret, kMask6); | |
83 #else | |
84 // x: 00 00 00 00 00 00 00 00 c g i m d h l p b f j n a e k o | |
85 | |
86 static const uint64x2_t kMask5 = { 0xFC0ULL, 0xFC0ULL }; | |
87 ret = shift_swap<36>(ret, kMask5); | |
88 | |
89 // x: 00 00 00 00 00 00 00 00 a e i m d h l p b f j n c g k o | |
90 | |
91 static const uint64x2_t kMask6 = { (0xFFFULL << 36), (0xFFFULL << 36) }; | |
92 static const uint64x2_t kMask7 = { 0xFFFFFFULL, 0xFFFFFFULL }; | |
93 static const uint64x2_t kMask8 = { 0xFFFULL, 0xFFFULL }; | |
94 const uint64x2_t y1 = vandq_u64(ret, kMask6); | |
95 const uint64x2_t y2 = vshlq_n_u64(vandq_u64(ret, kMask7), 12); | |
96 const uint64x2_t y3 = vandq_u64(vshrq_n_u64(ret, 24), kMask8); | |
97 ret = vorrq_u64(y1, vorrq_u64(y2, y3)); | |
98 #endif | |
99 | |
100 // x: 00 00 00 00 00 00 00 00 a e i m b f j n c g k o d h l p | |
101 | |
102 // Set the header | |
103 static const uint64x2_t kHeader = { 0x8490000000000000ULL, 0x849000000000000
0ULL }; | |
104 return vorrq_u64(kHeader, ret); | |
105 } | |
106 | |
107 // Takes a row of alpha values and places the most significant three bits of eac
h byte into | |
108 // the least significant bits of the same byte | |
109 static inline uint8x16_t make_index_row(const uint8x16_t &x) { | |
110 static const uint8x16_t kTopThreeMask = { | |
111 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, | |
112 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, | |
113 }; | |
114 return vshrq_n_u8(vandq_u8(x, kTopThreeMask), 5); | |
115 } | |
116 | |
117 // Returns true if all of the bits in x are 0. | |
118 static inline bool is_zero(uint8x16_t x) { | |
119 // First experiments say that this is way slower than just examining the lanes | |
120 // but it might need a little more investigation. | |
121 #if 0 | |
122 // This code path tests the system register for overflow. We trigger | |
123 // overflow by adding x to a register with all of its bits set. The | |
124 // first instruction sets the bits. | |
125 int reg; | |
126 asm ("VTST.8 %%q0, %q1, %q1\n" | |
127 "VQADD.u8 %q1, %%q0\n" | |
128 "VMRS %0, FPSCR\n" | |
129 : "=r"(reg) : "w"(vreinterpretq_f32_u8(x)) : "q0", "q1"); | |
130 | |
131 // Bit 21 corresponds to the overflow flag. | |
132 return reg & (0x1 << 21); | |
133 #else | |
134 const uint64x2_t cvt = vreinterpretq_u64_u8(x); | |
135 const uint64_t l1 = vgetq_lane_u64(cvt, 0); | |
136 return (l1 == 0) && (l1 == vgetq_lane_u64(cvt, 1)); | |
137 #endif | |
138 } | |
139 | |
140 #if defined (SK_CPU_BENDIAN) | |
141 static inline uint64x2_t fix_endianness(uint64x2_t x) { | |
142 return x; | |
143 } | |
144 #else | |
145 static inline uint64x2_t fix_endianness(uint64x2_t x) { | |
146 return vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(x))); | |
147 } | |
148 #endif | |
149 | |
150 static void compress_r11eac_blocks(uint64_t* dst, const uint8_t* src, int rowByt
es) { | |
151 | |
152 // Try to avoid switching between vector and non-vector ops... | |
153 const uint8_t *const src1 = src; | |
154 const uint8_t *const src2 = src + rowBytes; | |
155 const uint8_t *const src3 = src + 2*rowBytes; | |
156 const uint8_t *const src4 = src + 3*rowBytes; | |
157 uint64_t *const dst1 = dst; | |
158 uint64_t *const dst2 = dst + 2; | |
159 | |
160 const uint8x16_t alphaRow1 = vld1q_u8(src1); | |
161 const uint8x16_t alphaRow2 = vld1q_u8(src2); | |
162 const uint8x16_t alphaRow3 = vld1q_u8(src3); | |
163 const uint8x16_t alphaRow4 = vld1q_u8(src4); | |
164 | |
165 const uint8x16_t cmp12 = vceqq_u8(alphaRow1, alphaRow2); | |
166 const uint8x16_t cmp34 = vceqq_u8(alphaRow3, alphaRow4); | |
167 const uint8x16_t cmp13 = vceqq_u8(alphaRow1, alphaRow3); | |
168 | |
169 const uint8x16_t cmp = vandq_u8(vandq_u8(cmp12, cmp34), cmp13); | |
170 const uint8x16_t ncmp = vmvnq_u8(cmp); | |
171 const uint8x16_t nAlphaRow1 = vmvnq_u8(alphaRow1); | |
172 if (is_zero(ncmp)) { | |
173 if (is_zero(alphaRow1)) { | |
174 static const uint64x2_t kTransparent = { 0x0020000000002000ULL, | |
175 0x0020000000002000ULL }; | |
176 vst1q_u64(dst1, kTransparent); | |
177 vst1q_u64(dst2, kTransparent); | |
178 return; | |
179 } else if (is_zero(nAlphaRow1)) { | |
180 vst1q_u64(dst1, vreinterpretq_u64_u8(cmp)); | |
181 vst1q_u64(dst2, vreinterpretq_u64_u8(cmp)); | |
182 return; | |
183 } | |
184 } | |
185 | |
186 const uint8x16_t indexRow1 = convert_indices(make_index_row(alphaRow1)); | |
187 const uint8x16_t indexRow2 = convert_indices(make_index_row(alphaRow2)); | |
188 const uint8x16_t indexRow3 = convert_indices(make_index_row(alphaRow3)); | |
189 const uint8x16_t indexRow4 = convert_indices(make_index_row(alphaRow4)); | |
190 | |
191 const uint64x2_t indexRow12 = vreinterpretq_u64_u8( | |
192 vorrq_u8(vshlq_n_u8(indexRow1, 3), indexRow2)); | |
193 const uint64x2_t indexRow34 = vreinterpretq_u64_u8( | |
194 vorrq_u8(vshlq_n_u8(indexRow3, 3), indexRow4)); | |
195 | |
196 const uint32x4x2_t blockIndices = vtrnq_u32(vreinterpretq_u32_u64(indexRow12
), | |
197 vreinterpretq_u32_u64(indexRow34
)); | |
198 const uint64x2_t blockIndicesLeft = vreinterpretq_u64_u32(vrev64q_u32(blockI
ndices.val[0])); | |
199 const uint64x2_t blockIndicesRight = vreinterpretq_u64_u32(vrev64q_u32(block
Indices.val[1])); | |
200 | |
201 const uint64x2_t indicesLeft = fix_endianness(pack_indices(blockIndicesLeft)
); | |
202 const uint64x2_t indicesRight = fix_endianness(pack_indices(blockIndicesRigh
t)); | |
203 | |
204 const uint64x2_t d1 = vcombine_u64(vget_low_u64(indicesLeft), vget_low_u64(i
ndicesRight)); | |
205 const uint64x2_t d2 = vcombine_u64(vget_high_u64(indicesLeft), vget_high_u64
(indicesRight)); | |
206 vst1q_u64(dst1, d1); | |
207 vst1q_u64(dst2, d2); | |
208 } | |
209 | |
210 static bool compress_a8_to_r11eac(uint8_t* dst, const uint8_t* src, | |
211 int width, int height, int rowBytes) { | |
212 | |
213 // Since we're going to operate on 4 blocks at a time, the src width | |
214 // must be a multiple of 16. However, the height only needs to be a | |
215 // multiple of 4 | |
216 if (0 == width || 0 == height || (width % 16) != 0 || (height % 4) != 0) { | |
217 return SkTextureCompressor::CompressBufferToFormat( | |
218 dst, src, | |
219 kAlpha_8_SkColorType, | |
220 width, height, rowBytes, | |
221 SkTextureCompressor::kR11_EAC_Format, false); | |
222 } | |
223 | |
224 const int blocksX = width >> 2; | |
225 const int blocksY = height >> 2; | |
226 | |
227 SkASSERT((blocksX % 4) == 0); | |
228 | |
229 uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst); | |
230 for (int y = 0; y < blocksY; ++y) { | |
231 for (int x = 0; x < blocksX; x+=4) { | |
232 // Compress it | |
233 compress_r11eac_blocks(encPtr, src + 4*x, rowBytes); | |
234 encPtr += 4; | |
235 } | |
236 src += 4 * rowBytes; | |
237 } | |
238 return true; | |
239 } | |
240 | |
241 SkTextureCompressor::CompressionProc | |
242 SkTextureCompressorGetPlatformProc(SkColorType colorType, SkTextureCompressor::F
ormat fmt) { | |
243 switch (colorType) { | |
244 case kAlpha_8_SkColorType: | |
245 { | |
246 switch (fmt) { | |
247 case SkTextureCompressor::kR11_EAC_Format: | |
248 return compress_a8_to_r11eac; | |
249 default: | |
250 return NULL; | |
251 } | |
252 } | |
253 break; | |
254 | |
255 default: | |
256 return NULL; | |
257 } | |
258 } | |
OLD | NEW |