Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkTextureCompressor.h" | 8 #include "SkTextureCompressor.h" |
| 9 | 9 |
| 10 #include "SkBitmap.h" | 10 #include "SkBitmap.h" |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 22 template <typename T> inline T abs_diff(const T &a, const T &b) { | 22 template <typename T> inline T abs_diff(const T &a, const T &b) { |
| 23 return (a > b) ? (a - b) : (b - a); | 23 return (a > b) ? (a - b) : (b - a); |
| 24 } | 24 } |
| 25 | 25 |
| 26 //////////////////////////////////////////////////////////////////////////////// | 26 //////////////////////////////////////////////////////////////////////////////// |
| 27 // | 27 // |
| 28 // LATC compressor | 28 // LATC compressor |
| 29 // | 29 // |
| 30 //////////////////////////////////////////////////////////////////////////////// | 30 //////////////////////////////////////////////////////////////////////////////// |
| 31 | 31 |
| 32 // Return the squared minimum error cost of approximating 'pixel' using the | 32 // LATC compressed texels down into square 4x4 blocks |
| 33 // provided palette. Return this in the middle 16 bits of the integer. Return | 33 static const int kPaletteSize = 8; |
| 34 // the best index in the palette for this pixel in the bottom 8 bits. | 34 static const int kLATCBlockSize = 4; |
| 35 static uint32_t compute_error(uint8_t pixel, uint8_t palette[8]) { | 35 static const int kPixelsPerBlock = kLATCBlockSize * kLATCBlockSize; |
| 36 int minIndex = 0; | |
| 37 uint8_t error = abs_diff(palette[0], pixel); | |
| 38 for (int i = 1; i < 8; ++i) { | |
| 39 uint8_t diff = abs_diff(palette[i], pixel); | |
| 40 if (diff < error) { | |
| 41 minIndex = i; | |
| 42 error = diff; | |
| 43 } | |
| 44 } | |
| 45 uint16_t errSq = static_cast<uint16_t>(error) * static_cast<uint16_t>(error) ; | |
| 46 SkASSERT(minIndex >= 0 && minIndex < 8); | |
| 47 return (static_cast<uint32_t>(errSq) << 8) | static_cast<uint32_t>(minIndex) ; | |
| 48 } | |
| 49 | 36 |
| 50 // Compress LATC block. Each 4x4 block of pixels is decompressed by LATC from tw o | 37 // Generates an LATC palette. LATC constructs |
| 51 // values LUM0 and LUM1, and an index into the generated palette. LATC construct s | |
| 52 // a palette of eight colors from LUM0 and LUM1 using the algorithm: | 38 // a palette of eight colors from LUM0 and LUM1 using the algorithm: |
| 53 // | 39 // |
| 54 // LUM0, if lum0 > lum1 and code(x,y) == 0 | 40 // LUM0, if lum0 > lum1 and code(x,y) == 0 |
| 55 // LUM1, if lum0 > lum1 and code(x,y) == 1 | 41 // LUM1, if lum0 > lum1 and code(x,y) == 1 |
| 56 // (6*LUM0+ LUM1)/7, if lum0 > lum1 and code(x,y) == 2 | 42 // (6*LUM0+ LUM1)/7, if lum0 > lum1 and code(x,y) == 2 |
| 57 // (5*LUM0+2*LUM1)/7, if lum0 > lum1 and code(x,y) == 3 | 43 // (5*LUM0+2*LUM1)/7, if lum0 > lum1 and code(x,y) == 3 |
| 58 // (4*LUM0+3*LUM1)/7, if lum0 > lum1 and code(x,y) == 4 | 44 // (4*LUM0+3*LUM1)/7, if lum0 > lum1 and code(x,y) == 4 |
| 59 // (3*LUM0+4*LUM1)/7, if lum0 > lum1 and code(x,y) == 5 | 45 // (3*LUM0+4*LUM1)/7, if lum0 > lum1 and code(x,y) == 5 |
| 60 // (2*LUM0+5*LUM1)/7, if lum0 > lum1 and code(x,y) == 6 | 46 // (2*LUM0+5*LUM1)/7, if lum0 > lum1 and code(x,y) == 6 |
| 61 // ( LUM0+6*LUM1)/7, if lum0 > lum1 and code(x,y) == 7 | 47 // ( LUM0+6*LUM1)/7, if lum0 > lum1 and code(x,y) == 7 |
| 62 // | 48 // |
| 63 // LUM0, if lum0 <= lum1 and code(x,y) == 0 | 49 // LUM0, if lum0 <= lum1 and code(x,y) == 0 |
| 64 // LUM1, if lum0 <= lum1 and code(x,y) == 1 | 50 // LUM1, if lum0 <= lum1 and code(x,y) == 1 |
| 65 // (4*LUM0+ LUM1)/5, if lum0 <= lum1 and code(x,y) == 2 | 51 // (4*LUM0+ LUM1)/5, if lum0 <= lum1 and code(x,y) == 2 |
| 66 // (3*LUM0+2*LUM1)/5, if lum0 <= lum1 and code(x,y) == 3 | 52 // (3*LUM0+2*LUM1)/5, if lum0 <= lum1 and code(x,y) == 3 |
| 67 // (2*LUM0+3*LUM1)/5, if lum0 <= lum1 and code(x,y) == 4 | 53 // (2*LUM0+3*LUM1)/5, if lum0 <= lum1 and code(x,y) == 4 |
| 68 // ( LUM0+4*LUM1)/5, if lum0 <= lum1 and code(x,y) == 5 | 54 // ( LUM0+4*LUM1)/5, if lum0 <= lum1 and code(x,y) == 5 |
| 69 // 0, if lum0 <= lum1 and code(x,y) == 6 | 55 // 0, if lum0 <= lum1 and code(x,y) == 6 |
| 70 // 255, if lum0 <= lum1 and code(x,y) == 7 | 56 // 255, if lum0 <= lum1 and code(x,y) == 7 |
| 57 | |
| 58 static void generatePalette(uint8_t palette[], uint8_t lum0, uint8_t lum1) { | |
| 59 palette[0] = lum0; | |
| 60 palette[1] = lum1; | |
| 61 if (lum0 > lum1) { | |
| 62 for (int i = 1; i < 7; i++) { | |
| 63 palette[i+1] = ((7-i)*lum0 + i*lum1) / 7; | |
| 64 } | |
| 65 } else { | |
| 66 for (int i = 1; i < 5; i++) { | |
| 67 palette[i+1] = ((5-i)*lum0 + i*lum1) / 5; | |
| 68 } | |
| 69 palette[6] = 0; | |
| 70 palette[7] = 255; | |
| 71 } | |
| 72 } | |
| 73 | |
| 74 static bool isExtremal(uint8_t pixel) { | |
| 75 return pixel == 0 || pixel == 255; | |
|
bsalomon
2014/06/19 19:19:05
we usually write "<rvalue> == <lvalue>"
krajcevski
2014/06/19 20:56:56
Done.
| |
| 76 } | |
| 77 | |
| 78 // Compress a block by using the bounding box of the pixels. It is assumed that | |
| 79 // there are no extremal pixels in this block otherwise we would have used | |
| 80 // compressBlockBBIgnoreExtremal. | |
| 81 static uint64_t compressBlockBB(const uint8_t pixels[]) { | |
| 82 uint8_t minVal = 255; | |
| 83 uint8_t maxVal = 0; | |
| 84 for (int i = 0; i < kPixelsPerBlock; ++i) { | |
| 85 minVal = SkTMin(pixels[i], minVal); | |
| 86 maxVal = SkTMax(pixels[i], maxVal); | |
| 87 } | |
| 88 | |
| 89 SkASSERT(!isExtremal(minVal)); | |
| 90 SkASSERT(!isExtremal(maxVal)); | |
| 91 | |
| 92 uint8_t palette[kPaletteSize]; | |
| 93 generatePalette(palette, maxVal, minVal); | |
| 94 | |
| 95 uint64_t indices = 0; | |
| 96 for (int i = kPixelsPerBlock - 1; i >= 0; --i) { | |
| 97 | |
| 98 // Find the best palette index | |
| 99 uint8_t bestError = abs_diff(pixels[i], palette[0]); | |
| 100 uint8_t idx = 0; | |
| 101 for (int j = 1; j < kPaletteSize; ++j) { | |
| 102 uint8_t error = abs_diff(pixels[i], palette[j]); | |
| 103 if (error < bestError) { | |
| 104 bestError = error; | |
| 105 idx = j; | |
| 106 } | |
| 107 } | |
| 108 | |
| 109 indices <<= 3; | |
| 110 indices |= idx; | |
| 111 } | |
| 112 | |
| 113 return | |
| 114 SkEndian_SwapLE64( | |
| 115 static_cast<uint64_t>(maxVal) | | |
| 116 (static_cast<uint64_t>(minVal) << 8) | | |
| 117 (indices << 16)); | |
| 118 } | |
| 119 | |
| 120 // Compress a block by using the bounding box of the pixels without taking into | |
| 121 // account the extremal values. The generated palette will contain extremal valu es | |
| 122 // and fewer points along the line segment to interpolate. | |
| 123 static uint64_t compressBlockBBIgnoreExtremal(const uint8_t pixels[]) { | |
| 124 uint8_t minVal = 255; | |
| 125 uint8_t maxVal = 0; | |
| 126 for (int i = 0; i < kPixelsPerBlock; ++i) { | |
| 127 minVal = SkTMin(pixels[i]-1, minVal-1)+1; | |
| 128 maxVal = SkTMax(pixels[i]+1, maxVal+1)-1; | |
| 129 } | |
| 130 | |
| 131 SkASSERT(!isExtremal(minVal)); | |
| 132 SkASSERT(!isExtremal(maxVal)); | |
| 133 | |
| 134 uint8_t palette[kPaletteSize]; | |
| 135 generatePalette(palette, minVal, maxVal); | |
| 136 | |
| 137 uint64_t indices = 0; | |
| 138 for (int i = kPixelsPerBlock - 1; i >= 0; --i) { | |
| 139 | |
| 140 // Find the best palette index | |
| 141 uint8_t idx = 0; | |
| 142 if (isExtremal(pixels[i])) { | |
| 143 if (0xFF == pixels[i]) { | |
| 144 idx = 7; | |
| 145 } else if (0 == pixels[i]) { | |
| 146 idx = 6; | |
| 147 } else { | |
| 148 SkFAIL("Pixel is extremal but not really?!"); | |
| 149 } | |
| 150 } else { | |
| 151 uint8_t bestError = abs_diff(pixels[i], palette[0]); | |
| 152 for (int j = 1; j < kPaletteSize - 2; ++j) { | |
| 153 uint8_t error = abs_diff(pixels[i], palette[j]); | |
| 154 if (error < bestError) { | |
| 155 bestError = error; | |
| 156 idx = j; | |
| 157 } | |
| 158 } | |
| 159 } | |
| 160 | |
| 161 indices <<= 3; | |
| 162 indices |= idx; | |
| 163 } | |
| 164 | |
| 165 return | |
| 166 SkEndian_SwapLE64( | |
| 167 static_cast<uint64_t>(minVal) | | |
| 168 (static_cast<uint64_t>(maxVal) << 8) | | |
| 169 (indices << 16)); | |
| 170 } | |
| 171 | |
| 172 | |
| 173 // Compress LATC block. Each 4x4 block of pixels is decompressed by LATC from tw o | |
| 174 // values LUM0 and LUM1, and an index into the generated palette. Details of how | |
| 175 // the palette is generated can be found in the comments of generatePalette abov e. | |
| 71 // | 176 // |
| 72 // We compute the LATC palette using the following simple algorithm: | 177 // We choose which palette type to use based on whether or not 'pixels' contains |
| 73 // 1. Choose the minimum and maximum values in the block as LUM0 and LUM1 | 178 // any extremal values (0 or 255). If there are extremal values, then we use the |
| 74 // 2. Figure out which of the two possible palettes is better. | 179 // palette that has the extremal values built in. Otherwise, we use the full bou nding |
| 75 | 180 // box. |
| 76 static uint64_t compress_latc_block(uint8_t block[16]) { | 181 |
| 77 // Just do a simple min/max but choose which of the | 182 static uint64_t compressBlock(const uint8_t pixels[]) { |
|
bsalomon
2014/06/19 19:19:05
our (mot consistently enforced) naming convention
krajcevski
2014/06/19 20:56:56
Done.
| |
| 78 // two palettes is better | 183 // Collect unique pixels |
| 79 uint8_t maxVal = 0; | 184 int nUniquePixels = 0; |
| 80 uint8_t minVal = 255; | 185 uint8_t uniquePixels[kPixelsPerBlock]; |
| 81 for (int i = 0; i < 16; ++i) { | 186 for (int i = 0; i < kPixelsPerBlock; ++i) { |
| 82 maxVal = SkMax32(maxVal, block[i]); | 187 bool foundPixel = false; |
| 83 minVal = SkMin32(minVal, block[i]); | 188 for (int j = 0; j < nUniquePixels; ++j) { |
| 84 } | 189 foundPixel = foundPixel || uniquePixels[j] == pixels[i]; |
| 85 | 190 } |
| 86 // Generate palettes | 191 |
| 87 uint8_t palettes[2][8]; | 192 if (!foundPixel) { |
| 88 | 193 uniquePixels[nUniquePixels] = pixels[i]; |
| 89 // Straight linear ramp | 194 ++nUniquePixels; |
| 90 palettes[0][0] = maxVal; | 195 } |
| 91 palettes[0][1] = minVal; | 196 } |
| 92 for (int i = 1; i < 7; ++i) { | 197 |
| 93 palettes[0][i+1] = ((7-i)*maxVal + i*minVal) / 7; | 198 // If there's only one unique pixel, then our compression is easy. |
| 94 } | 199 if (1 == nUniquePixels) { |
| 95 | 200 return SkEndian_SwapLE64(pixels[0] | (pixels[0] << 8)); |
| 96 // Smaller linear ramp with min and max byte values at the end. | 201 |
| 97 palettes[1][0] = minVal; | 202 // Similarly, if there are only two unique pixels, then our compression is |
| 98 palettes[1][1] = maxVal; | 203 // easy again: place the pixels in the block header, and assign the indices |
| 99 for (int i = 1; i < 5; ++i) { | 204 // with one or zero depending on which pixel they belong to. |
| 100 palettes[1][i+1] = ((5-i)*maxVal + i*minVal) / 5; | 205 } else if (2 == nUniquePixels) { |
| 101 } | 206 uint64_t outBlock = 0; |
| 102 palettes[1][6] = 0; | 207 for (int i = kPixelsPerBlock - 1; i >= 0; --i) { |
| 103 palettes[1][7] = 255; | 208 int idx = 0; |
| 104 | 209 if (pixels[i] == uniquePixels[1]) { |
| 105 // Figure out which of the two is better: | 210 idx = 1; |
| 106 // - accumError holds the accumulated error for each pixel from | 211 } |
| 107 // the associated palette | 212 |
| 108 // - indices holds the best indices for each palette in the | 213 outBlock <<= 3; |
| 109 // bottom 48 (16*3) bits. | 214 outBlock |= idx; |
| 110 uint32_t accumError[2] = { 0, 0 }; | 215 } |
| 111 uint64_t indices[2] = { 0, 0 }; | 216 outBlock <<= 16; |
| 112 for (int i = 15; i >= 0; --i) { | 217 outBlock |= (uniquePixels[0] | (uniquePixels[1] << 8)); |
| 113 // For each palette: | 218 return SkEndian_SwapLE64(outBlock); |
| 114 // 1. Retreive the result of this pixel | 219 } |
| 115 // 2. Store the error in accumError | 220 |
| 116 // 3. Store the minimum palette index in indices. | 221 // Count non-maximal pixel values |
| 117 for (int p = 0; p < 2; ++p) { | 222 int nonExtremalPixels = 0; |
| 118 uint32_t result = compute_error(block[i], palettes[p]); | 223 for (int i = 0; i < nUniquePixels; ++i) { |
| 119 accumError[p] += (result >> 8); | 224 if (!isExtremal(uniquePixels[i])) { |
| 120 indices[p] <<= 3; | 225 ++nonExtremalPixels; |
| 121 indices[p] |= result & 7; | 226 } |
| 122 } | 227 } |
| 123 } | 228 |
| 124 | 229 // If all the pixels are nonmaximal then compute the palette using |
| 125 SkASSERT(indices[0] < (static_cast<uint64_t>(1) << 48)); | 230 // the bounding box of all the pixels. |
| 126 SkASSERT(indices[1] < (static_cast<uint64_t>(1) << 48)); | 231 if (nonExtremalPixels == nUniquePixels) { |
| 127 | 232 // This is really just for correctness, in all of my tests we |
| 128 uint8_t paletteIdx = (accumError[0] > accumError[1]) ? 0 : 1; | 233 // never take this step. We don't lose too much perf here because |
| 129 | 234 // most of the processing in this function is worth it for the |
| 130 // Assemble the compressed block. | 235 // 1 == nUniquePixels optimization. |
| 131 uint64_t result = 0; | 236 return compressBlockBB(pixels); |
| 132 | 237 } else { |
| 133 // Jam the first two palette entries into the bottom 16 bits of | 238 return compressBlockBBIgnoreExtremal(pixels); |
| 134 // a 64 bit integer. Based on the palette that we chose, one will | 239 } |
| 135 // be larger than the other and it will select the proper palette. | 240 } |
| 136 result |= static_cast<uint64_t>(palettes[paletteIdx][0]); | 241 |
| 137 result |= static_cast<uint64_t>(palettes[paletteIdx][1]) << 8; | 242 static bool compress_a8_to_latc(uint8_t* dst, const uint8_t* src, |
| 138 | 243 int width, int height, int rowBytes) { |
| 139 // Jam the indices into the top 48 bits. | |
| 140 result |= indices[paletteIdx] << 16; | |
| 141 | |
| 142 // We assume everything is little endian, if it's not then make it so. | |
| 143 return SkEndian_SwapLE64(result); | |
| 144 } | |
| 145 | |
| 146 static SkData *compress_a8_to_latc(const SkBitmap &bm) { | |
| 147 // LATC compressed texels down into square 4x4 blocks | |
| 148 static const int kLATCBlockSize = 4; | |
| 149 | |
| 150 // Make sure that our data is well-formed enough to be | 244 // Make sure that our data is well-formed enough to be |
| 151 // considered for LATC compression | 245 // considered for LATC compression |
| 152 if (bm.width() == 0 || bm.height() == 0 || | 246 if (width == 0 || height == 0 || |
| 153 (bm.width() % kLATCBlockSize) != 0 || | 247 (width % kLATCBlockSize) != 0 || (height % kLATCBlockSize) != 0) { |
| 154 (bm.height() % kLATCBlockSize) != 0 || | 248 return false; |
| 155 (bm.colorType() != kAlpha_8_SkColorType)) { | 249 } |
| 156 return NULL; | 250 |
| 157 } | 251 int blocksX = width / kLATCBlockSize; |
| 158 | 252 int blocksY = height / kLATCBlockSize; |
| 159 // The LATC format is 64 bits per 4x4 block. | |
| 160 static const int kLATCEncodedBlockSize = 8; | |
| 161 | |
| 162 int blocksX = bm.width() / kLATCBlockSize; | |
| 163 int blocksY = bm.height() / kLATCBlockSize; | |
| 164 | |
| 165 int compressedDataSize = blocksX * blocksY * kLATCEncodedBlockSize; | |
| 166 uint64_t* dst = reinterpret_cast<uint64_t*>(sk_malloc_throw(compressedDataSi ze)); | |
| 167 | 253 |
| 168 uint8_t block[16]; | 254 uint8_t block[16]; |
| 169 const uint8_t* row = reinterpret_cast<const uint8_t*>(bm.getPixels()); | 255 uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst); |
| 170 uint64_t* encPtr = dst; | |
| 171 for (int y = 0; y < blocksY; ++y) { | 256 for (int y = 0; y < blocksY; ++y) { |
| 172 for (int x = 0; x < blocksX; ++x) { | 257 for (int x = 0; x < blocksX; ++x) { |
| 173 memcpy(block, row + (kLATCBlockSize * x), 4); | 258 // Load block |
| 174 memcpy(block + 4, row + bm.rowBytes() + (kLATCBlockSize * x), 4); | 259 static const int kBS = kLATCBlockSize; |
| 175 memcpy(block + 8, row + 2*bm.rowBytes() + (kLATCBlockSize * x), 4); | 260 for (int k = 0; k < kBS; ++k) { |
| 176 memcpy(block + 12, row + 3*bm.rowBytes() + (kLATCBlockSize * x), 4); | 261 memcpy(block + k*kBS, src + k*rowBytes + (kBS * x), kBS); |
| 177 | 262 } |
| 178 *encPtr = compress_latc_block(block); | 263 |
| 264 // Compress it | |
| 265 *encPtr = compressBlock(block); | |
| 179 ++encPtr; | 266 ++encPtr; |
| 180 } | 267 } |
| 181 row += kLATCBlockSize * bm.rowBytes(); | 268 src += kLATCBlockSize * rowBytes; |
| 182 } | 269 } |
| 183 | 270 |
| 184 return SkData::NewFromMalloc(dst, compressedDataSize); | 271 return true; |
| 185 } | 272 } |
| 186 | 273 |
| 187 //////////////////////////////////////////////////////////////////////////////// | 274 //////////////////////////////////////////////////////////////////////////////// |
| 188 | 275 |
| 189 namespace SkTextureCompressor { | 276 namespace SkTextureCompressor { |
| 190 | 277 |
| 191 typedef SkData *(*CompressBitmapProc)(const SkBitmap &bitmap); | 278 static size_t get_compressed_data_size(Format fmt, int width, int height) { |
| 279 switch (fmt) { | |
| 280 case kLATC_Format: | |
| 281 { | |
| 282 // The LATC format is 64 bits per 4x4 block. | |
| 283 static const int kLATCEncodedBlockSize = 8; | |
| 284 | |
| 285 int blocksX = width / kLATCBlockSize; | |
|
bsalomon
2014/06/19 19:19:05
Are the width and height always multiples of the b
krajcevski
2014/06/19 20:56:56
I check that the width/height are the proper size
| |
| 286 int blocksY = height / kLATCBlockSize; | |
| 287 | |
| 288 return blocksX * blocksY * kLATCEncodedBlockSize; | |
| 289 } | |
| 290 | |
| 291 default: | |
| 292 SkFAIL("Unknown compressed format!"); | |
| 293 return 0; | |
| 294 } | |
| 295 } | |
| 296 | |
| 297 typedef bool (*CompressBitmapProc)(uint8_t* dst, const uint8_t* src, | |
| 298 int width, int height, int rowBytes); | |
| 299 | |
| 300 bool CompressBufferToFormat(uint8_t* dst, const uint8_t* src, SkColorType srcCol orType, | |
| 301 int width, int height, int rowBytes, Format format) { | |
| 302 | |
| 303 CompressBitmapProc kProcMap[kFormatCnt][kLastEnum_SkColorType + 1]; | |
| 304 memset(kProcMap, 0, sizeof(kProcMap)); | |
| 305 | |
| 306 kProcMap[kLATC_Format][kAlpha_8_SkColorType] = compress_a8_to_latc; | |
| 307 | |
| 308 CompressBitmapProc proc = kProcMap[format][srcColorType]; | |
| 309 if (NULL != proc) { | |
| 310 return proc(dst, src, width, height, rowBytes); | |
| 311 } | |
| 312 | |
| 313 return false; | |
| 314 } | |
| 192 | 315 |
| 193 SkData *CompressBitmapToFormat(const SkBitmap &bitmap, Format format) { | 316 SkData *CompressBitmapToFormat(const SkBitmap &bitmap, Format format) { |
| 194 SkAutoLockPixels alp(bitmap); | 317 SkAutoLockPixels alp(bitmap); |
| 195 | 318 |
| 196 CompressBitmapProc kProcMap[kLastEnum_SkColorType + 1][kFormatCnt]; | 319 int compressedDataSize = get_compressed_data_size(format, bitmap.width(), bi tmap.height()); |
| 197 memset(kProcMap, 0, sizeof(kProcMap)); | 320 const uint8_t* src = reinterpret_cast<const uint8_t*>(bitmap.getPixels()); |
| 198 | 321 uint8_t* dst = reinterpret_cast<uint8_t*>(sk_malloc_throw(compressedDataSize )); |
| 199 // Map available bitmap configs to compression functions | 322 if (CompressBufferToFormat(dst, src, bitmap.colorType(), bitmap.width(), bit map.height(), |
| 200 kProcMap[kAlpha_8_SkColorType][kLATC_Format] = compress_a8_to_latc; | 323 bitmap.rowBytes(), format)) { |
| 201 | 324 return SkData::NewFromMalloc(dst, compressedDataSize); |
| 202 CompressBitmapProc proc = kProcMap[bitmap.colorType()][format]; | 325 } |
| 203 if (NULL != proc) { | 326 |
| 204 return proc(bitmap); | 327 sk_free(dst); |
| 205 } | |
| 206 | |
| 207 return NULL; | 328 return NULL; |
| 208 } | 329 } |
| 209 | 330 |
| 210 } // namespace SkTextureCompressor | 331 } // namespace SkTextureCompressor |
| OLD | NEW |