| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkTextureCompressor_LATC.h" | 8 #include "SkTextureCompressor_LATC.h" |
| 9 #include "SkTextureCompressor_Blitter.h" | 9 #include "SkTextureCompressor_Blitter.h" |
| 10 #include "SkTextureCompressor_Utils.h" | 10 #include "SkTextureCompressor_Utils.h" |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 74 } | 74 } |
| 75 | 75 |
| 76 static bool is_extremal(uint8_t pixel) { | 76 static bool is_extremal(uint8_t pixel) { |
| 77 return 0 == pixel || 255 == pixel; | 77 return 0 == pixel || 255 == pixel; |
| 78 } | 78 } |
| 79 | 79 |
| 80 typedef uint64_t (*A84x4To64BitProc)(const uint8_t block[]); | 80 typedef uint64_t (*A84x4To64BitProc)(const uint8_t block[]); |
| 81 | 81 |
| 82 // This function is used by both R11 EAC and LATC to compress 4x4 blocks | 82 // This function is used by both R11 EAC and LATC to compress 4x4 blocks |
| 83 // of 8-bit alpha into 64-bit values that comprise the compressed data. | 83 // of 8-bit alpha into 64-bit values that comprise the compressed data. |
| 84 // For both formats, we need to make sure that the dimensions of the | 84 // For both formats, we need to make sure that the dimensions of the |
| 85 // src pixels are divisible by 4, and copy 4x4 blocks one at a time | 85 // src pixels are divisible by 4, and copy 4x4 blocks one at a time |
| 86 // for compression. | 86 // for compression. |
| 87 static bool compress_4x4_a8_to_64bit(uint8_t* dst, const uint8_t* src, | 87 static bool compress_4x4_a8_to_64bit(uint8_t* dst, const uint8_t* src, |
| 88 int width, int height, size_t rowBytes, | 88 int width, int height, size_t rowBytes, |
| 89 A84x4To64BitProc proc) { | 89 A84x4To64BitProc proc) { |
| 90 // Make sure that our data is well-formed enough to be considered for compre
ssion | 90 // Make sure that our data is well-formed enough to be considered for compre
ssion |
| 91 if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) { | 91 if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) { |
| 92 return false; | 92 return false; |
| 93 } | 93 } |
| 94 | 94 |
| (...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 277 if (!is_extremal(uniquePixels[i])) { | 277 if (!is_extremal(uniquePixels[i])) { |
| 278 ++nonExtremalPixels; | 278 ++nonExtremalPixels; |
| 279 } | 279 } |
| 280 } | 280 } |
| 281 | 281 |
| 282 // If all the pixels are nonmaximal then compute the palette using | 282 // If all the pixels are nonmaximal then compute the palette using |
| 283 // the bounding box of all the pixels. | 283 // the bounding box of all the pixels. |
| 284 if (nonExtremalPixels == nUniquePixels) { | 284 if (nonExtremalPixels == nUniquePixels) { |
| 285 // This is really just for correctness, in all of my tests we | 285 // This is really just for correctness, in all of my tests we |
| 286 // never take this step. We don't lose too much perf here because | 286 // never take this step. We don't lose too much perf here because |
| 287 // most of the processing in this function is worth it for the | 287 // most of the processing in this function is worth it for the |
| 288 // 1 == nUniquePixels optimization. | 288 // 1 == nUniquePixels optimization. |
| 289 return compress_latc_block_bb(pixels); | 289 return compress_latc_block_bb(pixels); |
| 290 } else { | 290 } else { |
| 291 return compress_latc_block_bb_ignore_extremal(pixels); | 291 return compress_latc_block_bb_ignore_extremal(pixels); |
| 292 } | 292 } |
| 293 } | 293 } |
| 294 | 294 |
| 295 #endif // COMPRESS_LATC_SLOW | 295 #endif // COMPRESS_LATC_SLOW |
| 296 | 296 |
| 297 //////////////////////////////////////////////////////////////////////////////// | 297 //////////////////////////////////////////////////////////////////////////////// |
| (...skipping 15 matching lines...) Expand all Loading... |
| 313 (x & 0x7) | | 313 (x & 0x7) | |
| 314 ((x >> 5) & 0x38) | | 314 ((x >> 5) & 0x38) | |
| 315 ((x >> 10) & 0x1C0) | | 315 ((x >> 10) & 0x1C0) | |
| 316 ((x >> 15) & 0xE00); | 316 ((x >> 15) & 0xE00); |
| 317 #endif | 317 #endif |
| 318 } | 318 } |
| 319 | 319 |
| 320 // Converts each 8-bit byte in the integer into an LATC index, and then packs | 320 // Converts each 8-bit byte in the integer into an LATC index, and then packs |
| 321 // the indices into the low 12 bits of the integer. | 321 // the indices into the low 12 bits of the integer. |
| 322 static inline uint32_t convert_index(uint32_t x) { | 322 static inline uint32_t convert_index(uint32_t x) { |
| 323 // Since the palette is | 323 // Since the palette is |
| 324 // 255, 0, 219, 182, 146, 109, 73, 36 | 324 // 255, 0, 219, 182, 146, 109, 73, 36 |
| 325 // we need to map the high three bits of each byte in the integer | 325 // we need to map the high three bits of each byte in the integer |
| 326 // from | 326 // from |
| 327 // 0 1 2 3 4 5 6 7 | 327 // 0 1 2 3 4 5 6 7 |
| 328 // to | 328 // to |
| 329 // 1 7 6 5 4 3 2 0 | 329 // 1 7 6 5 4 3 2 0 |
| 330 // | 330 // |
| 331 // This first operation takes the mapping from | 331 // This first operation takes the mapping from |
| 332 // 0 1 2 3 4 5 6 7 --> 7 6 5 4 3 2 1 0 | 332 // 0 1 2 3 4 5 6 7 --> 7 6 5 4 3 2 1 0 |
| 333 x = 0x07070707 - SkTextureCompressor::ConvertToThreeBitIndex(x); | 333 x = 0x07070707 - SkTextureCompressor::ConvertToThreeBitIndex(x); |
| 334 | 334 |
| 335 // mask is 1 if index is non-zero | 335 // mask is 1 if index is non-zero |
| 336 const uint32_t mask = (x | (x >> 1) | (x >> 2)) & 0x01010101; | 336 const uint32_t mask = (x | (x >> 1) | (x >> 2)) & 0x01010101; |
| 337 | 337 |
| 338 // add mask: | 338 // add mask: |
| 339 // 7 6 5 4 3 2 1 0 --> 8 7 6 5 4 3 2 0 | 339 // 7 6 5 4 3 2 1 0 --> 8 7 6 5 4 3 2 0 |
| 340 x = (x + mask); | 340 x = (x + mask); |
| 341 | 341 |
| 342 // Handle overflow: | 342 // Handle overflow: |
| 343 // 8 7 6 5 4 3 2 0 --> 9 7 6 5 4 3 2 0 | 343 // 8 7 6 5 4 3 2 0 --> 9 7 6 5 4 3 2 0 |
| 344 x |= (x >> 3) & 0x01010101; | 344 x |= (x >> 3) & 0x01010101; |
| 345 | 345 |
| 346 // Mask out high bits: | 346 // Mask out high bits: |
| 347 // 9 7 6 5 4 3 2 0 --> 1 7 6 5 4 3 2 0 | 347 // 9 7 6 5 4 3 2 0 --> 1 7 6 5 4 3 2 0 |
| 348 x &= 0x07070707; | 348 x &= 0x07070707; |
| 349 | 349 |
| 350 return pack_index(x); | 350 return pack_index(x); |
| 351 } | 351 } |
| 352 | 352 |
| 353 typedef uint64_t (*PackIndicesProc)(const uint8_t* alpha, size_t rowBytes); | 353 typedef uint64_t (*PackIndicesProc)(const uint8_t* alpha, size_t rowBytes); |
| 354 template<PackIndicesProc packIndicesProc> | 354 template<PackIndicesProc packIndicesProc> |
| 355 static void compress_a8_latc_block(uint8_t** dstPtr, const uint8_t* src, size_t
rowBytes) { | 355 static void compress_a8_latc_block(uint8_t** dstPtr, const uint8_t* src, size_t
rowBytes) { |
| 356 *(reinterpret_cast<uint64_t*>(*dstPtr)) = | 356 *(reinterpret_cast<uint64_t*>(*dstPtr)) = |
| 357 SkEndian_SwapLE64(0xFF | (packIndicesProc(src, rowBytes) << 16)); | 357 SkEndian_SwapLE64(0xFF | (packIndicesProc(src, rowBytes) << 16)); |
| 358 *dstPtr += 8; | 358 *dstPtr += 8; |
| 359 } | 359 } |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 510 for (int j = 0; j < height; j += 4) { | 510 for (int j = 0; j < height; j += 4) { |
| 511 for (int i = 0; i < width; i += 4) { | 511 for (int i = 0; i < width; i += 4) { |
| 512 decompress_latc_block(dst + i, dstRowBytes, src); | 512 decompress_latc_block(dst + i, dstRowBytes, src); |
| 513 src += 8; | 513 src += 8; |
| 514 } | 514 } |
| 515 dst += 4 * dstRowBytes; | 515 dst += 4 * dstRowBytes; |
| 516 } | 516 } |
| 517 } | 517 } |
| 518 | 518 |
| 519 } // SkTextureCompressor | 519 } // SkTextureCompressor |
| OLD | NEW |