OLD | NEW |
(Empty) | |
| 1 // Copyright 2012 Google Inc. All Rights Reserved. |
| 2 // |
| 3 // Use of this source code is governed by a BSD-style license |
| 4 // that can be found in the COPYING file in the root of the source |
| 5 // tree. An additional intellectual property rights grant can be found |
| 6 // in the file PATENTS. All contributing project authors may |
| 7 // be found in the AUTHORS file in the root of the source tree. |
| 8 // ----------------------------------------------------------------------------- |
| 9 // |
| 10 // main entry for the decoder |
| 11 // |
| 12 // Authors: Vikas Arora (vikaas.arora@gmail.com) |
| 13 // Jyrki Alakuijala (jyrki@google.com) |
| 14 |
| 15 #include <stdlib.h> |
| 16 |
| 17 #include "./alphai.h" |
| 18 #include "./vp8li.h" |
| 19 #include "../dsp/dsp.h" |
| 20 #include "../dsp/lossless.h" |
| 21 #include "../dsp/yuv.h" |
| 22 #include "../utils/huffman.h" |
| 23 #include "../utils/utils.h" |
| 24 |
| 25 #define NUM_ARGB_CACHE_ROWS 16 |
| 26 |
| 27 static const int kCodeLengthLiterals = 16; |
| 28 static const int kCodeLengthRepeatCode = 16; |
| 29 static const int kCodeLengthExtraBits[3] = { 2, 3, 7 }; |
| 30 static const int kCodeLengthRepeatOffsets[3] = { 3, 3, 11 }; |
| 31 |
| 32 // ----------------------------------------------------------------------------- |
| 33 // Five Huffman codes are used at each meta code: |
| 34 // 1. green + length prefix codes + color cache codes, |
| 35 // 2. alpha, |
| 36 // 3. red, |
| 37 // 4. blue, and, |
| 38 // 5. distance prefix codes. |
| 39 typedef enum { |
| 40 GREEN = 0, |
| 41 RED = 1, |
| 42 BLUE = 2, |
| 43 ALPHA = 3, |
| 44 DIST = 4 |
| 45 } HuffIndex; |
| 46 |
| 47 static const uint16_t kAlphabetSize[HUFFMAN_CODES_PER_META_CODE] = { |
| 48 NUM_LITERAL_CODES + NUM_LENGTH_CODES, |
| 49 NUM_LITERAL_CODES, NUM_LITERAL_CODES, NUM_LITERAL_CODES, |
| 50 NUM_DISTANCE_CODES |
| 51 }; |
| 52 |
| 53 |
| 54 #define NUM_CODE_LENGTH_CODES 19 |
| 55 static const uint8_t kCodeLengthCodeOrder[NUM_CODE_LENGTH_CODES] = { |
| 56 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 |
| 57 }; |
| 58 |
| 59 #define CODE_TO_PLANE_CODES 120 |
| 60 static const uint8_t kCodeToPlane[CODE_TO_PLANE_CODES] = { |
| 61 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a, |
| 62 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a, |
| 63 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b, |
| 64 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03, |
| 65 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c, |
| 66 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e, |
| 67 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b, |
| 68 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f, |
| 69 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b, |
| 70 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41, |
| 71 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f, |
| 72 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70 |
| 73 }; |
| 74 |
| 75 static int DecodeImageStream(int xsize, int ysize, |
| 76 int is_level0, |
| 77 VP8LDecoder* const dec, |
| 78 uint32_t** const decoded_data); |
| 79 |
| 80 //------------------------------------------------------------------------------ |
| 81 |
| 82 int VP8LCheckSignature(const uint8_t* const data, size_t size) { |
| 83 return (size >= VP8L_FRAME_HEADER_SIZE && |
| 84 data[0] == VP8L_MAGIC_BYTE && |
| 85 (data[4] >> 5) == 0); // version |
| 86 } |
| 87 |
| 88 static int ReadImageInfo(VP8LBitReader* const br, |
| 89 int* const width, int* const height, |
| 90 int* const has_alpha) { |
| 91 if (VP8LReadBits(br, 8) != VP8L_MAGIC_BYTE) return 0; |
| 92 *width = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; |
| 93 *height = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; |
| 94 *has_alpha = VP8LReadBits(br, 1); |
| 95 if (VP8LReadBits(br, VP8L_VERSION_BITS) != 0) return 0; |
| 96 return 1; |
| 97 } |
| 98 |
| 99 int VP8LGetInfo(const uint8_t* data, size_t data_size, |
| 100 int* const width, int* const height, int* const has_alpha) { |
| 101 if (data == NULL || data_size < VP8L_FRAME_HEADER_SIZE) { |
| 102 return 0; // not enough data |
| 103 } else if (!VP8LCheckSignature(data, data_size)) { |
| 104 return 0; // bad signature |
| 105 } else { |
| 106 int w, h, a; |
| 107 VP8LBitReader br; |
| 108 VP8LInitBitReader(&br, data, data_size); |
| 109 if (!ReadImageInfo(&br, &w, &h, &a)) { |
| 110 return 0; |
| 111 } |
| 112 if (width != NULL) *width = w; |
| 113 if (height != NULL) *height = h; |
| 114 if (has_alpha != NULL) *has_alpha = a; |
| 115 return 1; |
| 116 } |
| 117 } |
| 118 |
| 119 //------------------------------------------------------------------------------ |
| 120 |
| 121 static WEBP_INLINE int GetCopyDistance(int distance_symbol, |
| 122 VP8LBitReader* const br) { |
| 123 int extra_bits, offset; |
| 124 if (distance_symbol < 4) { |
| 125 return distance_symbol + 1; |
| 126 } |
| 127 extra_bits = (distance_symbol - 2) >> 1; |
| 128 offset = (2 + (distance_symbol & 1)) << extra_bits; |
| 129 return offset + VP8LReadBits(br, extra_bits) + 1; |
| 130 } |
| 131 |
| 132 static WEBP_INLINE int GetCopyLength(int length_symbol, |
| 133 VP8LBitReader* const br) { |
| 134 // Length and distance prefixes are encoded the same way. |
| 135 return GetCopyDistance(length_symbol, br); |
| 136 } |
| 137 |
| 138 static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) { |
| 139 if (plane_code > CODE_TO_PLANE_CODES) { |
| 140 return plane_code - CODE_TO_PLANE_CODES; |
| 141 } else { |
| 142 const int dist_code = kCodeToPlane[plane_code - 1]; |
| 143 const int yoffset = dist_code >> 4; |
| 144 const int xoffset = 8 - (dist_code & 0xf); |
| 145 const int dist = yoffset * xsize + xoffset; |
| 146 return (dist >= 1) ? dist : 1; // dist<1 can happen if xsize is very small |
| 147 } |
| 148 } |
| 149 |
| 150 //------------------------------------------------------------------------------ |
| 151 // Decodes the next Huffman code from bit-stream. |
| 152 // FillBitWindow(br) needs to be called at minimum every second call |
| 153 // to ReadSymbol, in order to pre-fetch enough bits. |
| 154 static WEBP_INLINE int ReadSymbol(const HuffmanTree* tree, |
| 155 VP8LBitReader* const br) { |
| 156 const HuffmanTreeNode* node = tree->root_; |
| 157 uint32_t bits = VP8LPrefetchBits(br); |
| 158 int bitpos = br->bit_pos_; |
| 159 // Check if we find the bit combination from the Huffman lookup table. |
| 160 const int lut_ix = bits & (HUFF_LUT - 1); |
| 161 const int lut_bits = tree->lut_bits_[lut_ix]; |
| 162 if (lut_bits <= HUFF_LUT_BITS) { |
| 163 VP8LSetBitPos(br, bitpos + lut_bits); |
| 164 return tree->lut_symbol_[lut_ix]; |
| 165 } |
| 166 node += tree->lut_jump_[lut_ix]; |
| 167 bitpos += HUFF_LUT_BITS; |
| 168 bits >>= HUFF_LUT_BITS; |
| 169 |
| 170 // Decode the value from a binary tree. |
| 171 assert(node != NULL); |
| 172 do { |
| 173 node = HuffmanTreeNextNode(node, bits & 1); |
| 174 bits >>= 1; |
| 175 ++bitpos; |
| 176 } while (HuffmanTreeNodeIsNotLeaf(node)); |
| 177 VP8LSetBitPos(br, bitpos); |
| 178 return node->symbol_; |
| 179 } |
| 180 |
| 181 static int ReadHuffmanCodeLengths( |
| 182 VP8LDecoder* const dec, const int* const code_length_code_lengths, |
| 183 int num_symbols, int* const code_lengths) { |
| 184 int ok = 0; |
| 185 VP8LBitReader* const br = &dec->br_; |
| 186 int symbol; |
| 187 int max_symbol; |
| 188 int prev_code_len = DEFAULT_CODE_LENGTH; |
| 189 HuffmanTree tree; |
| 190 int huff_codes[NUM_CODE_LENGTH_CODES] = { 0 }; |
| 191 |
| 192 if (!VP8LHuffmanTreeBuildImplicit(&tree, code_length_code_lengths, |
| 193 huff_codes, NUM_CODE_LENGTH_CODES)) { |
| 194 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 195 return 0; |
| 196 } |
| 197 |
| 198 if (VP8LReadBits(br, 1)) { // use length |
| 199 const int length_nbits = 2 + 2 * VP8LReadBits(br, 3); |
| 200 max_symbol = 2 + VP8LReadBits(br, length_nbits); |
| 201 if (max_symbol > num_symbols) { |
| 202 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 203 goto End; |
| 204 } |
| 205 } else { |
| 206 max_symbol = num_symbols; |
| 207 } |
| 208 |
| 209 symbol = 0; |
| 210 while (symbol < num_symbols) { |
| 211 int code_len; |
| 212 if (max_symbol-- == 0) break; |
| 213 VP8LFillBitWindow(br); |
| 214 code_len = ReadSymbol(&tree, br); |
| 215 if (code_len < kCodeLengthLiterals) { |
| 216 code_lengths[symbol++] = code_len; |
| 217 if (code_len != 0) prev_code_len = code_len; |
| 218 } else { |
| 219 const int use_prev = (code_len == kCodeLengthRepeatCode); |
| 220 const int slot = code_len - kCodeLengthLiterals; |
| 221 const int extra_bits = kCodeLengthExtraBits[slot]; |
| 222 const int repeat_offset = kCodeLengthRepeatOffsets[slot]; |
| 223 int repeat = VP8LReadBits(br, extra_bits) + repeat_offset; |
| 224 if (symbol + repeat > num_symbols) { |
| 225 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 226 goto End; |
| 227 } else { |
| 228 const int length = use_prev ? prev_code_len : 0; |
| 229 while (repeat-- > 0) code_lengths[symbol++] = length; |
| 230 } |
| 231 } |
| 232 } |
| 233 ok = 1; |
| 234 |
| 235 End: |
| 236 VP8LHuffmanTreeFree(&tree); |
| 237 if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 238 return ok; |
| 239 } |
| 240 |
| 241 // 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman |
| 242 // tree. |
| 243 static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec, |
| 244 int* const code_lengths, int* const huff_codes, |
| 245 HuffmanTree* const tree) { |
| 246 int ok = 0; |
| 247 VP8LBitReader* const br = &dec->br_; |
| 248 const int simple_code = VP8LReadBits(br, 1); |
| 249 |
| 250 if (simple_code) { // Read symbols, codes & code lengths directly. |
| 251 int symbols[2]; |
| 252 int codes[2]; |
| 253 const int num_symbols = VP8LReadBits(br, 1) + 1; |
| 254 const int first_symbol_len_code = VP8LReadBits(br, 1); |
| 255 // The first code is either 1 bit or 8 bit code. |
| 256 symbols[0] = VP8LReadBits(br, (first_symbol_len_code == 0) ? 1 : 8); |
| 257 codes[0] = 0; |
| 258 code_lengths[0] = num_symbols - 1; |
| 259 // The second code (if present), is always 8 bit long. |
| 260 if (num_symbols == 2) { |
| 261 symbols[1] = VP8LReadBits(br, 8); |
| 262 codes[1] = 1; |
| 263 code_lengths[1] = num_symbols - 1; |
| 264 } |
| 265 ok = VP8LHuffmanTreeBuildExplicit(tree, code_lengths, codes, symbols, |
| 266 alphabet_size, num_symbols); |
| 267 } else { // Decode Huffman-coded code lengths. |
| 268 int i; |
| 269 int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 }; |
| 270 const int num_codes = VP8LReadBits(br, 4) + 4; |
| 271 if (num_codes > NUM_CODE_LENGTH_CODES) { |
| 272 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 273 return 0; |
| 274 } |
| 275 |
| 276 memset(code_lengths, 0, alphabet_size * sizeof(*code_lengths)); |
| 277 |
| 278 for (i = 0; i < num_codes; ++i) { |
| 279 code_length_code_lengths[kCodeLengthCodeOrder[i]] = VP8LReadBits(br, 3); |
| 280 } |
| 281 ok = ReadHuffmanCodeLengths(dec, code_length_code_lengths, alphabet_size, |
| 282 code_lengths); |
| 283 ok = ok && VP8LHuffmanTreeBuildImplicit(tree, code_lengths, huff_codes, |
| 284 alphabet_size); |
| 285 } |
| 286 ok = ok && !br->error_; |
| 287 if (!ok) { |
| 288 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 289 return 0; |
| 290 } |
| 291 return 1; |
| 292 } |
| 293 |
| 294 static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize, |
| 295 int color_cache_bits, int allow_recursion) { |
| 296 int i, j; |
| 297 VP8LBitReader* const br = &dec->br_; |
| 298 VP8LMetadata* const hdr = &dec->hdr_; |
| 299 uint32_t* huffman_image = NULL; |
| 300 HTreeGroup* htree_groups = NULL; |
| 301 int num_htree_groups = 1; |
| 302 int max_alphabet_size = 0; |
| 303 int* code_lengths = NULL; |
| 304 int* huff_codes = NULL; |
| 305 |
| 306 if (allow_recursion && VP8LReadBits(br, 1)) { |
| 307 // use meta Huffman codes. |
| 308 const int huffman_precision = VP8LReadBits(br, 3) + 2; |
| 309 const int huffman_xsize = VP8LSubSampleSize(xsize, huffman_precision); |
| 310 const int huffman_ysize = VP8LSubSampleSize(ysize, huffman_precision); |
| 311 const int huffman_pixs = huffman_xsize * huffman_ysize; |
| 312 if (!DecodeImageStream(huffman_xsize, huffman_ysize, 0, dec, |
| 313 &huffman_image)) { |
| 314 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 315 goto Error; |
| 316 } |
| 317 hdr->huffman_subsample_bits_ = huffman_precision; |
| 318 for (i = 0; i < huffman_pixs; ++i) { |
| 319 // The huffman data is stored in red and green bytes. |
| 320 const int group = (huffman_image[i] >> 8) & 0xffff; |
| 321 huffman_image[i] = group; |
| 322 if (group >= num_htree_groups) { |
| 323 num_htree_groups = group + 1; |
| 324 } |
| 325 } |
| 326 } |
| 327 |
| 328 if (br->error_) goto Error; |
| 329 |
| 330 // Find maximum alphabet size for the htree group. |
| 331 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { |
| 332 int alphabet_size = kAlphabetSize[j]; |
| 333 if (j == 0 && color_cache_bits > 0) { |
| 334 alphabet_size += 1 << color_cache_bits; |
| 335 } |
| 336 if (max_alphabet_size < alphabet_size) { |
| 337 max_alphabet_size = alphabet_size; |
| 338 } |
| 339 } |
| 340 |
| 341 htree_groups = VP8LHtreeGroupsNew(num_htree_groups); |
| 342 code_lengths = |
| 343 (int*)WebPSafeCalloc((uint64_t)max_alphabet_size, sizeof(*code_lengths)); |
| 344 huff_codes = |
| 345 (int*)WebPSafeMalloc((uint64_t)max_alphabet_size, sizeof(*huff_codes)); |
| 346 |
| 347 if (htree_groups == NULL || code_lengths == NULL || huff_codes == NULL) { |
| 348 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 349 goto Error; |
| 350 } |
| 351 |
| 352 for (i = 0; i < num_htree_groups; ++i) { |
| 353 HuffmanTree* const htrees = htree_groups[i].htrees_; |
| 354 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { |
| 355 int alphabet_size = kAlphabetSize[j]; |
| 356 HuffmanTree* const htree = htrees + j; |
| 357 if (j == 0 && color_cache_bits > 0) { |
| 358 alphabet_size += 1 << color_cache_bits; |
| 359 } |
| 360 if (!ReadHuffmanCode(alphabet_size, dec, code_lengths, huff_codes, |
| 361 htree)) { |
| 362 goto Error; |
| 363 } |
| 364 } |
| 365 } |
| 366 WebPSafeFree(huff_codes); |
| 367 WebPSafeFree(code_lengths); |
| 368 |
| 369 // All OK. Finalize pointers and return. |
| 370 hdr->huffman_image_ = huffman_image; |
| 371 hdr->num_htree_groups_ = num_htree_groups; |
| 372 hdr->htree_groups_ = htree_groups; |
| 373 return 1; |
| 374 |
| 375 Error: |
| 376 WebPSafeFree(huff_codes); |
| 377 WebPSafeFree(code_lengths); |
| 378 WebPSafeFree(huffman_image); |
| 379 VP8LHtreeGroupsFree(htree_groups, num_htree_groups); |
| 380 return 0; |
| 381 } |
| 382 |
| 383 //------------------------------------------------------------------------------ |
| 384 // Scaling. |
| 385 |
| 386 static int AllocateAndInitRescaler(VP8LDecoder* const dec, VP8Io* const io) { |
| 387 const int num_channels = 4; |
| 388 const int in_width = io->mb_w; |
| 389 const int out_width = io->scaled_width; |
| 390 const int in_height = io->mb_h; |
| 391 const int out_height = io->scaled_height; |
| 392 const uint64_t work_size = 2 * num_channels * (uint64_t)out_width; |
| 393 int32_t* work; // Rescaler work area. |
| 394 const uint64_t scaled_data_size = num_channels * (uint64_t)out_width; |
| 395 uint32_t* scaled_data; // Temporary storage for scaled BGRA data. |
| 396 const uint64_t memory_size = sizeof(*dec->rescaler) + |
| 397 work_size * sizeof(*work) + |
| 398 scaled_data_size * sizeof(*scaled_data); |
| 399 uint8_t* memory = (uint8_t*)WebPSafeCalloc(memory_size, sizeof(*memory)); |
| 400 if (memory == NULL) { |
| 401 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 402 return 0; |
| 403 } |
| 404 assert(dec->rescaler_memory == NULL); |
| 405 dec->rescaler_memory = memory; |
| 406 |
| 407 dec->rescaler = (WebPRescaler*)memory; |
| 408 memory += sizeof(*dec->rescaler); |
| 409 work = (int32_t*)memory; |
| 410 memory += work_size * sizeof(*work); |
| 411 scaled_data = (uint32_t*)memory; |
| 412 |
| 413 WebPRescalerInit(dec->rescaler, in_width, in_height, (uint8_t*)scaled_data, |
| 414 out_width, out_height, 0, num_channels, |
| 415 in_width, out_width, in_height, out_height, work); |
| 416 return 1; |
| 417 } |
| 418 |
| 419 //------------------------------------------------------------------------------ |
| 420 // Export to ARGB |
| 421 |
| 422 // We have special "export" function since we need to convert from BGRA |
| 423 static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace, |
| 424 int rgba_stride, uint8_t* const rgba) { |
| 425 uint32_t* const src = (uint32_t*)rescaler->dst; |
| 426 const int dst_width = rescaler->dst_width; |
| 427 int num_lines_out = 0; |
| 428 while (WebPRescalerHasPendingOutput(rescaler)) { |
| 429 uint8_t* const dst = rgba + num_lines_out * rgba_stride; |
| 430 WebPRescalerExportRow(rescaler, 0); |
| 431 WebPMultARGBRow(src, dst_width, 1); |
| 432 VP8LConvertFromBGRA(src, dst_width, colorspace, dst); |
| 433 ++num_lines_out; |
| 434 } |
| 435 return num_lines_out; |
| 436 } |
| 437 |
| 438 // Emit scaled rows. |
| 439 static int EmitRescaledRowsRGBA(const VP8LDecoder* const dec, |
| 440 uint8_t* in, int in_stride, int mb_h, |
| 441 uint8_t* const out, int out_stride) { |
| 442 const WEBP_CSP_MODE colorspace = dec->output_->colorspace; |
| 443 int num_lines_in = 0; |
| 444 int num_lines_out = 0; |
| 445 while (num_lines_in < mb_h) { |
| 446 uint8_t* const row_in = in + num_lines_in * in_stride; |
| 447 uint8_t* const row_out = out + num_lines_out * out_stride; |
| 448 const int lines_left = mb_h - num_lines_in; |
| 449 const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left); |
| 450 assert(needed_lines > 0 && needed_lines <= lines_left); |
| 451 WebPMultARGBRows(row_in, in_stride, |
| 452 dec->rescaler->src_width, needed_lines, 0); |
| 453 WebPRescalerImport(dec->rescaler, lines_left, row_in, in_stride); |
| 454 num_lines_in += needed_lines; |
| 455 num_lines_out += Export(dec->rescaler, colorspace, out_stride, row_out); |
| 456 } |
| 457 return num_lines_out; |
| 458 } |
| 459 |
| 460 // Emit rows without any scaling. |
| 461 static int EmitRows(WEBP_CSP_MODE colorspace, |
| 462 const uint8_t* row_in, int in_stride, |
| 463 int mb_w, int mb_h, |
| 464 uint8_t* const out, int out_stride) { |
| 465 int lines = mb_h; |
| 466 uint8_t* row_out = out; |
| 467 while (lines-- > 0) { |
| 468 VP8LConvertFromBGRA((const uint32_t*)row_in, mb_w, colorspace, row_out); |
| 469 row_in += in_stride; |
| 470 row_out += out_stride; |
| 471 } |
| 472 return mb_h; // Num rows out == num rows in. |
| 473 } |
| 474 |
| 475 //------------------------------------------------------------------------------ |
| 476 // Export to YUVA |
| 477 |
| 478 // TODO(skal): should be in yuv.c |
| 479 static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos, |
| 480 const WebPDecBuffer* const output) { |
| 481 const WebPYUVABuffer* const buf = &output->u.YUVA; |
| 482 // first, the luma plane |
| 483 { |
| 484 int i; |
| 485 uint8_t* const y = buf->y + y_pos * buf->y_stride; |
| 486 for (i = 0; i < width; ++i) { |
| 487 const uint32_t p = src[i]; |
| 488 y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, |
| 489 YUV_HALF); |
| 490 } |
| 491 } |
| 492 |
| 493 // then U/V planes |
| 494 { |
| 495 uint8_t* const u = buf->u + (y_pos >> 1) * buf->u_stride; |
| 496 uint8_t* const v = buf->v + (y_pos >> 1) * buf->v_stride; |
| 497 const int uv_width = width >> 1; |
| 498 int i; |
| 499 for (i = 0; i < uv_width; ++i) { |
| 500 const uint32_t v0 = src[2 * i + 0]; |
| 501 const uint32_t v1 = src[2 * i + 1]; |
| 502 // VP8RGBToU/V expects four accumulated pixels. Hence we need to |
| 503 // scale r/g/b value by a factor 2. We just shift v0/v1 one bit less. |
| 504 const int r = ((v0 >> 15) & 0x1fe) + ((v1 >> 15) & 0x1fe); |
| 505 const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe); |
| 506 const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe); |
| 507 if (!(y_pos & 1)) { // even lines: store values |
| 508 u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2); |
| 509 v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2); |
| 510 } else { // odd lines: average with previous values |
| 511 const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2); |
| 512 const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2); |
| 513 // Approximated average-of-four. But it's an acceptable diff. |
| 514 u[i] = (u[i] + tmp_u + 1) >> 1; |
| 515 v[i] = (v[i] + tmp_v + 1) >> 1; |
| 516 } |
| 517 } |
| 518 if (width & 1) { // last pixel |
| 519 const uint32_t v0 = src[2 * i + 0]; |
| 520 const int r = (v0 >> 14) & 0x3fc; |
| 521 const int g = (v0 >> 6) & 0x3fc; |
| 522 const int b = (v0 << 2) & 0x3fc; |
| 523 if (!(y_pos & 1)) { // even lines |
| 524 u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2); |
| 525 v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2); |
| 526 } else { // odd lines (note: we could just skip this) |
| 527 const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2); |
| 528 const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2); |
| 529 u[i] = (u[i] + tmp_u + 1) >> 1; |
| 530 v[i] = (v[i] + tmp_v + 1) >> 1; |
| 531 } |
| 532 } |
| 533 } |
| 534 // Lastly, store alpha if needed. |
| 535 if (buf->a != NULL) { |
| 536 int i; |
| 537 uint8_t* const a = buf->a + y_pos * buf->a_stride; |
| 538 for (i = 0; i < width; ++i) a[i] = (src[i] >> 24); |
| 539 } |
| 540 } |
| 541 |
| 542 static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) { |
| 543 WebPRescaler* const rescaler = dec->rescaler; |
| 544 uint32_t* const src = (uint32_t*)rescaler->dst; |
| 545 const int dst_width = rescaler->dst_width; |
| 546 int num_lines_out = 0; |
| 547 while (WebPRescalerHasPendingOutput(rescaler)) { |
| 548 WebPRescalerExportRow(rescaler, 0); |
| 549 WebPMultARGBRow(src, dst_width, 1); |
| 550 ConvertToYUVA(src, dst_width, y_pos, dec->output_); |
| 551 ++y_pos; |
| 552 ++num_lines_out; |
| 553 } |
| 554 return num_lines_out; |
| 555 } |
| 556 |
| 557 static int EmitRescaledRowsYUVA(const VP8LDecoder* const dec, |
| 558 uint8_t* in, int in_stride, int mb_h) { |
| 559 int num_lines_in = 0; |
| 560 int y_pos = dec->last_out_row_; |
| 561 while (num_lines_in < mb_h) { |
| 562 const int lines_left = mb_h - num_lines_in; |
| 563 const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left); |
| 564 WebPMultARGBRows(in, in_stride, dec->rescaler->src_width, needed_lines, 0); |
| 565 WebPRescalerImport(dec->rescaler, lines_left, in, in_stride); |
| 566 num_lines_in += needed_lines; |
| 567 in += needed_lines * in_stride; |
| 568 y_pos += ExportYUVA(dec, y_pos); |
| 569 } |
| 570 return y_pos; |
| 571 } |
| 572 |
| 573 static int EmitRowsYUVA(const VP8LDecoder* const dec, |
| 574 const uint8_t* in, int in_stride, |
| 575 int mb_w, int num_rows) { |
| 576 int y_pos = dec->last_out_row_; |
| 577 while (num_rows-- > 0) { |
| 578 ConvertToYUVA((const uint32_t*)in, mb_w, y_pos, dec->output_); |
| 579 in += in_stride; |
| 580 ++y_pos; |
| 581 } |
| 582 return y_pos; |
| 583 } |
| 584 |
| 585 //------------------------------------------------------------------------------ |
| 586 // Cropping. |
| 587 |
| 588 // Sets io->mb_y, io->mb_h & io->mb_w according to start row, end row and |
| 589 // crop options. Also updates the input data pointer, so that it points to the |
| 590 // start of the cropped window. Note that pixels are in ARGB format even if |
| 591 // 'in_data' is uint8_t*. |
| 592 // Returns true if the crop window is not empty. |
| 593 static int SetCropWindow(VP8Io* const io, int y_start, int y_end, |
| 594 uint8_t** const in_data, int pixel_stride) { |
| 595 assert(y_start < y_end); |
| 596 assert(io->crop_left < io->crop_right); |
| 597 if (y_end > io->crop_bottom) { |
| 598 y_end = io->crop_bottom; // make sure we don't overflow on last row. |
| 599 } |
| 600 if (y_start < io->crop_top) { |
| 601 const int delta = io->crop_top - y_start; |
| 602 y_start = io->crop_top; |
| 603 *in_data += delta * pixel_stride; |
| 604 } |
| 605 if (y_start >= y_end) return 0; // Crop window is empty. |
| 606 |
| 607 *in_data += io->crop_left * sizeof(uint32_t); |
| 608 |
| 609 io->mb_y = y_start - io->crop_top; |
| 610 io->mb_w = io->crop_right - io->crop_left; |
| 611 io->mb_h = y_end - y_start; |
| 612 return 1; // Non-empty crop window. |
| 613 } |
| 614 |
| 615 //------------------------------------------------------------------------------ |
| 616 |
| 617 static WEBP_INLINE int GetMetaIndex( |
| 618 const uint32_t* const image, int xsize, int bits, int x, int y) { |
| 619 if (bits == 0) return 0; |
| 620 return image[xsize * (y >> bits) + (x >> bits)]; |
| 621 } |
| 622 |
| 623 static WEBP_INLINE HTreeGroup* GetHtreeGroupForPos(VP8LMetadata* const hdr, |
| 624 int x, int y) { |
| 625 const int meta_index = GetMetaIndex(hdr->huffman_image_, hdr->huffman_xsize_, |
| 626 hdr->huffman_subsample_bits_, x, y); |
| 627 assert(meta_index < hdr->num_htree_groups_); |
| 628 return hdr->htree_groups_ + meta_index; |
| 629 } |
| 630 |
| 631 //------------------------------------------------------------------------------ |
| 632 // Main loop, with custom row-processing function |
| 633 |
| 634 typedef void (*ProcessRowsFunc)(VP8LDecoder* const dec, int row); |
| 635 |
| 636 static void ApplyInverseTransforms(VP8LDecoder* const dec, int num_rows, |
| 637 const uint32_t* const rows) { |
| 638 int n = dec->next_transform_; |
| 639 const int cache_pixs = dec->width_ * num_rows; |
| 640 const int start_row = dec->last_row_; |
| 641 const int end_row = start_row + num_rows; |
| 642 const uint32_t* rows_in = rows; |
| 643 uint32_t* const rows_out = dec->argb_cache_; |
| 644 |
| 645 // Inverse transforms. |
| 646 // TODO: most transforms only need to operate on the cropped region only. |
| 647 memcpy(rows_out, rows_in, cache_pixs * sizeof(*rows_out)); |
| 648 while (n-- > 0) { |
| 649 VP8LTransform* const transform = &dec->transforms_[n]; |
| 650 VP8LInverseTransform(transform, start_row, end_row, rows_in, rows_out); |
| 651 rows_in = rows_out; |
| 652 } |
| 653 } |
| 654 |
| 655 // Special method for paletted alpha data. |
| 656 static void ApplyInverseTransformsAlpha(VP8LDecoder* const dec, int num_rows, |
| 657 const uint8_t* const rows) { |
| 658 const int start_row = dec->last_row_; |
| 659 const int end_row = start_row + num_rows; |
| 660 const uint8_t* rows_in = rows; |
| 661 uint8_t* rows_out = (uint8_t*)dec->io_->opaque + dec->io_->width * start_row; |
| 662 VP8LTransform* const transform = &dec->transforms_[0]; |
| 663 assert(dec->next_transform_ == 1); |
| 664 assert(transform->type_ == COLOR_INDEXING_TRANSFORM); |
| 665 VP8LColorIndexInverseTransformAlpha(transform, start_row, end_row, rows_in, |
| 666 rows_out); |
| 667 } |
| 668 |
| 669 // Processes (transforms, scales & color-converts) the rows decoded after the |
| 670 // last call. |
| 671 static void ProcessRows(VP8LDecoder* const dec, int row) { |
| 672 const uint32_t* const rows = dec->pixels_ + dec->width_ * dec->last_row_; |
| 673 const int num_rows = row - dec->last_row_; |
| 674 |
| 675 if (num_rows <= 0) return; // Nothing to be done. |
| 676 ApplyInverseTransforms(dec, num_rows, rows); |
| 677 |
| 678 // Emit output. |
| 679 { |
| 680 VP8Io* const io = dec->io_; |
| 681 uint8_t* rows_data = (uint8_t*)dec->argb_cache_; |
| 682 const int in_stride = io->width * sizeof(uint32_t); // in unit of RGBA |
| 683 if (!SetCropWindow(io, dec->last_row_, row, &rows_data, in_stride)) { |
| 684 // Nothing to output (this time). |
| 685 } else { |
| 686 const WebPDecBuffer* const output = dec->output_; |
| 687 if (output->colorspace < MODE_YUV) { // convert to RGBA |
| 688 const WebPRGBABuffer* const buf = &output->u.RGBA; |
| 689 uint8_t* const rgba = buf->rgba + dec->last_out_row_ * buf->stride; |
| 690 const int num_rows_out = io->use_scaling ? |
| 691 EmitRescaledRowsRGBA(dec, rows_data, in_stride, io->mb_h, |
| 692 rgba, buf->stride) : |
| 693 EmitRows(output->colorspace, rows_data, in_stride, |
| 694 io->mb_w, io->mb_h, rgba, buf->stride); |
| 695 // Update 'last_out_row_'. |
| 696 dec->last_out_row_ += num_rows_out; |
| 697 } else { // convert to YUVA |
| 698 dec->last_out_row_ = io->use_scaling ? |
| 699 EmitRescaledRowsYUVA(dec, rows_data, in_stride, io->mb_h) : |
| 700 EmitRowsYUVA(dec, rows_data, in_stride, io->mb_w, io->mb_h); |
| 701 } |
| 702 assert(dec->last_out_row_ <= output->height); |
| 703 } |
| 704 } |
| 705 |
| 706 // Update 'last_row_'. |
| 707 dec->last_row_ = row; |
| 708 assert(dec->last_row_ <= dec->height_); |
| 709 } |
| 710 |
| 711 // Row-processing for the special case when alpha data contains only one |
| 712 // transform (color indexing), and trivial non-green literals. |
| 713 static int Is8bOptimizable(const VP8LMetadata* const hdr) { |
| 714 int i; |
| 715 if (hdr->color_cache_size_ > 0) return 0; |
| 716 // When the Huffman tree contains only one symbol, we can skip the |
| 717 // call to ReadSymbol() for red/blue/alpha channels. |
| 718 for (i = 0; i < hdr->num_htree_groups_; ++i) { |
| 719 const HuffmanTree* const htrees = hdr->htree_groups_[i].htrees_; |
| 720 if (htrees[RED].num_nodes_ > 1) return 0; |
| 721 if (htrees[BLUE].num_nodes_ > 1) return 0; |
| 722 if (htrees[ALPHA].num_nodes_ > 1) return 0; |
| 723 } |
| 724 return 1; |
| 725 } |
| 726 |
| 727 static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int row) { |
| 728 const int num_rows = row - dec->last_row_; |
| 729 const uint8_t* const in = |
| 730 (uint8_t*)dec->pixels_ + dec->width_ * dec->last_row_; |
| 731 if (num_rows > 0) { |
| 732 ApplyInverseTransformsAlpha(dec, num_rows, in); |
| 733 } |
| 734 dec->last_row_ = dec->last_out_row_ = row; |
| 735 } |
| 736 |
| 737 static int DecodeAlphaData(VP8LDecoder* const dec, uint8_t* const data, |
| 738 int width, int height, int last_row) { |
| 739 int ok = 1; |
| 740 int row = dec->last_pixel_ / width; |
| 741 int col = dec->last_pixel_ % width; |
| 742 VP8LBitReader* const br = &dec->br_; |
| 743 VP8LMetadata* const hdr = &dec->hdr_; |
| 744 const HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 745 int pos = dec->last_pixel_; // current position |
| 746 const int end = width * height; // End of data |
| 747 const int last = width * last_row; // Last pixel to decode |
| 748 const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES; |
| 749 const int mask = hdr->huffman_mask_; |
| 750 assert(htree_group != NULL); |
| 751 assert(pos < end); |
| 752 assert(last_row <= height); |
| 753 assert(Is8bOptimizable(hdr)); |
| 754 |
| 755 while (!br->eos_ && pos < last) { |
| 756 int code; |
| 757 // Only update when changing tile. |
| 758 if ((col & mask) == 0) { |
| 759 htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 760 } |
| 761 VP8LFillBitWindow(br); |
| 762 code = ReadSymbol(&htree_group->htrees_[GREEN], br); |
| 763 if (code < NUM_LITERAL_CODES) { // Literal |
| 764 data[pos] = code; |
| 765 ++pos; |
| 766 ++col; |
| 767 if (col >= width) { |
| 768 col = 0; |
| 769 ++row; |
| 770 if (row % NUM_ARGB_CACHE_ROWS == 0) { |
| 771 ExtractPalettedAlphaRows(dec, row); |
| 772 } |
| 773 } |
| 774 } else if (code < len_code_limit) { // Backward reference |
| 775 int dist_code, dist; |
| 776 const int length_sym = code - NUM_LITERAL_CODES; |
| 777 const int length = GetCopyLength(length_sym, br); |
| 778 const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br); |
| 779 VP8LFillBitWindow(br); |
| 780 dist_code = GetCopyDistance(dist_symbol, br); |
| 781 dist = PlaneCodeToDistance(width, dist_code); |
| 782 if (pos >= dist && end - pos >= length) { |
| 783 int i; |
| 784 for (i = 0; i < length; ++i) data[pos + i] = data[pos + i - dist]; |
| 785 } else { |
| 786 ok = 0; |
| 787 goto End; |
| 788 } |
| 789 pos += length; |
| 790 col += length; |
| 791 while (col >= width) { |
| 792 col -= width; |
| 793 ++row; |
| 794 if (row % NUM_ARGB_CACHE_ROWS == 0) { |
| 795 ExtractPalettedAlphaRows(dec, row); |
| 796 } |
| 797 } |
| 798 if (pos < last && (col & mask)) { |
| 799 htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 800 } |
| 801 } else { // Not reached |
| 802 ok = 0; |
| 803 goto End; |
| 804 } |
| 805 assert(br->eos_ == VP8LIsEndOfStream(br)); |
| 806 ok = !br->error_; |
| 807 if (!ok) goto End; |
| 808 } |
| 809 // Process the remaining rows corresponding to last row-block. |
| 810 ExtractPalettedAlphaRows(dec, row); |
| 811 |
| 812 End: |
| 813 if (br->error_ || !ok || (br->eos_ && pos < end)) { |
| 814 ok = 0; |
| 815 dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED |
| 816 : VP8_STATUS_BITSTREAM_ERROR; |
| 817 } else { |
| 818 dec->last_pixel_ = (int)pos; |
| 819 if (pos == end) dec->state_ = READ_DATA; |
| 820 } |
| 821 return ok; |
| 822 } |
| 823 |
| 824 static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data, |
| 825 int width, int height, int last_row, |
| 826 ProcessRowsFunc process_func) { |
| 827 int ok = 1; |
| 828 int row = dec->last_pixel_ / width; |
| 829 int col = dec->last_pixel_ % width; |
| 830 VP8LBitReader* const br = &dec->br_; |
| 831 VP8LMetadata* const hdr = &dec->hdr_; |
| 832 HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 833 uint32_t* src = data + dec->last_pixel_; |
| 834 uint32_t* last_cached = src; |
| 835 uint32_t* const src_end = data + width * height; // End of data |
| 836 uint32_t* const src_last = data + width * last_row; // Last pixel to decode |
| 837 const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES; |
| 838 const int color_cache_limit = len_code_limit + hdr->color_cache_size_; |
| 839 VP8LColorCache* const color_cache = |
| 840 (hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL; |
| 841 const int mask = hdr->huffman_mask_; |
| 842 assert(htree_group != NULL); |
| 843 assert(src < src_end); |
| 844 assert(src_last <= src_end); |
| 845 |
| 846 while (!br->eos_ && src < src_last) { |
| 847 int code; |
| 848 // Only update when changing tile. Note we could use this test: |
| 849 // if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed |
| 850 // but that's actually slower and needs storing the previous col/row. |
| 851 if ((col & mask) == 0) { |
| 852 htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 853 } |
| 854 VP8LFillBitWindow(br); |
| 855 code = ReadSymbol(&htree_group->htrees_[GREEN], br); |
| 856 if (code < NUM_LITERAL_CODES) { // Literal |
| 857 int red, green, blue, alpha; |
| 858 red = ReadSymbol(&htree_group->htrees_[RED], br); |
| 859 green = code; |
| 860 VP8LFillBitWindow(br); |
| 861 blue = ReadSymbol(&htree_group->htrees_[BLUE], br); |
| 862 alpha = ReadSymbol(&htree_group->htrees_[ALPHA], br); |
| 863 *src = ((uint32_t)alpha << 24) | (red << 16) | (green << 8) | blue; |
| 864 AdvanceByOne: |
| 865 ++src; |
| 866 ++col; |
| 867 if (col >= width) { |
| 868 col = 0; |
| 869 ++row; |
| 870 if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) { |
| 871 process_func(dec, row); |
| 872 } |
| 873 if (color_cache != NULL) { |
| 874 while (last_cached < src) { |
| 875 VP8LColorCacheInsert(color_cache, *last_cached++); |
| 876 } |
| 877 } |
| 878 } |
| 879 } else if (code < len_code_limit) { // Backward reference |
| 880 int dist_code, dist; |
| 881 const int length_sym = code - NUM_LITERAL_CODES; |
| 882 const int length = GetCopyLength(length_sym, br); |
| 883 const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br); |
| 884 VP8LFillBitWindow(br); |
| 885 dist_code = GetCopyDistance(dist_symbol, br); |
| 886 dist = PlaneCodeToDistance(width, dist_code); |
| 887 if (src - data < (ptrdiff_t)dist || src_end - src < (ptrdiff_t)length) { |
| 888 ok = 0; |
| 889 goto End; |
| 890 } else { |
| 891 int i; |
| 892 for (i = 0; i < length; ++i) src[i] = src[i - dist]; |
| 893 src += length; |
| 894 } |
| 895 col += length; |
| 896 while (col >= width) { |
| 897 col -= width; |
| 898 ++row; |
| 899 if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) { |
| 900 process_func(dec, row); |
| 901 } |
| 902 } |
| 903 if (src < src_end) { |
| 904 if (col & mask) htree_group = GetHtreeGroupForPos(hdr, col, row); |
| 905 if (color_cache != NULL) { |
| 906 while (last_cached < src) { |
| 907 VP8LColorCacheInsert(color_cache, *last_cached++); |
| 908 } |
| 909 } |
| 910 } |
| 911 } else if (code < color_cache_limit) { // Color cache |
| 912 const int key = code - len_code_limit; |
| 913 assert(color_cache != NULL); |
| 914 while (last_cached < src) { |
| 915 VP8LColorCacheInsert(color_cache, *last_cached++); |
| 916 } |
| 917 *src = VP8LColorCacheLookup(color_cache, key); |
| 918 goto AdvanceByOne; |
| 919 } else { // Not reached |
| 920 ok = 0; |
| 921 goto End; |
| 922 } |
| 923 assert(br->eos_ == VP8LIsEndOfStream(br)); |
| 924 ok = !br->error_; |
| 925 if (!ok) goto End; |
| 926 } |
| 927 // Process the remaining rows corresponding to last row-block. |
| 928 if (process_func != NULL) process_func(dec, row); |
| 929 |
| 930 End: |
| 931 if (br->error_ || !ok || (br->eos_ && src < src_end)) { |
| 932 ok = 0; |
| 933 dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED |
| 934 : VP8_STATUS_BITSTREAM_ERROR; |
| 935 } else { |
| 936 dec->last_pixel_ = (int)(src - data); |
| 937 if (src == src_end) dec->state_ = READ_DATA; |
| 938 } |
| 939 return ok; |
| 940 } |
| 941 |
| 942 // ----------------------------------------------------------------------------- |
| 943 // VP8LTransform |
| 944 |
| 945 static void ClearTransform(VP8LTransform* const transform) { |
| 946 WebPSafeFree(transform->data_); |
| 947 transform->data_ = NULL; |
| 948 } |
| 949 |
| 950 // For security reason, we need to remap the color map to span |
| 951 // the total possible bundled values, and not just the num_colors. |
| 952 static int ExpandColorMap(int num_colors, VP8LTransform* const transform) { |
| 953 int i; |
| 954 const int final_num_colors = 1 << (8 >> transform->bits_); |
| 955 uint32_t* const new_color_map = |
| 956 (uint32_t*)WebPSafeMalloc((uint64_t)final_num_colors, |
| 957 sizeof(*new_color_map)); |
| 958 if (new_color_map == NULL) { |
| 959 return 0; |
| 960 } else { |
| 961 uint8_t* const data = (uint8_t*)transform->data_; |
| 962 uint8_t* const new_data = (uint8_t*)new_color_map; |
| 963 new_color_map[0] = transform->data_[0]; |
| 964 for (i = 4; i < 4 * num_colors; ++i) { |
| 965 // Equivalent to AddPixelEq(), on a byte-basis. |
| 966 new_data[i] = (data[i] + new_data[i - 4]) & 0xff; |
| 967 } |
| 968 for (; i < 4 * final_num_colors; ++i) |
| 969 new_data[i] = 0; // black tail. |
| 970 WebPSafeFree(transform->data_); |
| 971 transform->data_ = new_color_map; |
| 972 } |
| 973 return 1; |
| 974 } |
| 975 |
| 976 static int ReadTransform(int* const xsize, int const* ysize, |
| 977 VP8LDecoder* const dec) { |
| 978 int ok = 1; |
| 979 VP8LBitReader* const br = &dec->br_; |
| 980 VP8LTransform* transform = &dec->transforms_[dec->next_transform_]; |
| 981 const VP8LImageTransformType type = |
| 982 (VP8LImageTransformType)VP8LReadBits(br, 2); |
| 983 |
| 984 // Each transform type can only be present once in the stream. |
| 985 if (dec->transforms_seen_ & (1U << type)) { |
| 986 return 0; // Already there, let's not accept the second same transform. |
| 987 } |
| 988 dec->transforms_seen_ |= (1U << type); |
| 989 |
| 990 transform->type_ = type; |
| 991 transform->xsize_ = *xsize; |
| 992 transform->ysize_ = *ysize; |
| 993 transform->data_ = NULL; |
| 994 ++dec->next_transform_; |
| 995 assert(dec->next_transform_ <= NUM_TRANSFORMS); |
| 996 |
| 997 switch (type) { |
| 998 case PREDICTOR_TRANSFORM: |
| 999 case CROSS_COLOR_TRANSFORM: |
| 1000 transform->bits_ = VP8LReadBits(br, 3) + 2; |
| 1001 ok = DecodeImageStream(VP8LSubSampleSize(transform->xsize_, |
| 1002 transform->bits_), |
| 1003 VP8LSubSampleSize(transform->ysize_, |
| 1004 transform->bits_), |
| 1005 0, dec, &transform->data_); |
| 1006 break; |
| 1007 case COLOR_INDEXING_TRANSFORM: { |
| 1008 const int num_colors = VP8LReadBits(br, 8) + 1; |
| 1009 const int bits = (num_colors > 16) ? 0 |
| 1010 : (num_colors > 4) ? 1 |
| 1011 : (num_colors > 2) ? 2 |
| 1012 : 3; |
| 1013 *xsize = VP8LSubSampleSize(transform->xsize_, bits); |
| 1014 transform->bits_ = bits; |
| 1015 ok = DecodeImageStream(num_colors, 1, 0, dec, &transform->data_); |
| 1016 ok = ok && ExpandColorMap(num_colors, transform); |
| 1017 break; |
| 1018 } |
| 1019 case SUBTRACT_GREEN: |
| 1020 break; |
| 1021 default: |
| 1022 assert(0); // can't happen |
| 1023 break; |
| 1024 } |
| 1025 |
| 1026 return ok; |
| 1027 } |
| 1028 |
| 1029 // ----------------------------------------------------------------------------- |
| 1030 // VP8LMetadata |
| 1031 |
| 1032 static void InitMetadata(VP8LMetadata* const hdr) { |
| 1033 assert(hdr); |
| 1034 memset(hdr, 0, sizeof(*hdr)); |
| 1035 } |
| 1036 |
| 1037 static void ClearMetadata(VP8LMetadata* const hdr) { |
| 1038 assert(hdr); |
| 1039 |
| 1040 WebPSafeFree(hdr->huffman_image_); |
| 1041 VP8LHtreeGroupsFree(hdr->htree_groups_, hdr->num_htree_groups_); |
| 1042 VP8LColorCacheClear(&hdr->color_cache_); |
| 1043 InitMetadata(hdr); |
| 1044 } |
| 1045 |
| 1046 // ----------------------------------------------------------------------------- |
| 1047 // VP8LDecoder |
| 1048 |
| 1049 VP8LDecoder* VP8LNew(void) { |
| 1050 VP8LDecoder* const dec = (VP8LDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); |
| 1051 if (dec == NULL) return NULL; |
| 1052 dec->status_ = VP8_STATUS_OK; |
| 1053 dec->action_ = READ_DIM; |
| 1054 dec->state_ = READ_DIM; |
| 1055 |
| 1056 VP8LDspInit(); // Init critical function pointers. |
| 1057 |
| 1058 return dec; |
| 1059 } |
| 1060 |
| 1061 void VP8LClear(VP8LDecoder* const dec) { |
| 1062 int i; |
| 1063 if (dec == NULL) return; |
| 1064 ClearMetadata(&dec->hdr_); |
| 1065 |
| 1066 WebPSafeFree(dec->pixels_); |
| 1067 dec->pixels_ = NULL; |
| 1068 for (i = 0; i < dec->next_transform_; ++i) { |
| 1069 ClearTransform(&dec->transforms_[i]); |
| 1070 } |
| 1071 dec->next_transform_ = 0; |
| 1072 dec->transforms_seen_ = 0; |
| 1073 |
| 1074 WebPSafeFree(dec->rescaler_memory); |
| 1075 dec->rescaler_memory = NULL; |
| 1076 |
| 1077 dec->output_ = NULL; // leave no trace behind |
| 1078 } |
| 1079 |
| 1080 void VP8LDelete(VP8LDecoder* const dec) { |
| 1081 if (dec != NULL) { |
| 1082 VP8LClear(dec); |
| 1083 WebPSafeFree(dec); |
| 1084 } |
| 1085 } |
| 1086 |
| 1087 static void UpdateDecoder(VP8LDecoder* const dec, int width, int height) { |
| 1088 VP8LMetadata* const hdr = &dec->hdr_; |
| 1089 const int num_bits = hdr->huffman_subsample_bits_; |
| 1090 dec->width_ = width; |
| 1091 dec->height_ = height; |
| 1092 |
| 1093 hdr->huffman_xsize_ = VP8LSubSampleSize(width, num_bits); |
| 1094 hdr->huffman_mask_ = (num_bits == 0) ? ~0 : (1 << num_bits) - 1; |
| 1095 } |
| 1096 |
| 1097 static int DecodeImageStream(int xsize, int ysize, |
| 1098 int is_level0, |
| 1099 VP8LDecoder* const dec, |
| 1100 uint32_t** const decoded_data) { |
| 1101 int ok = 1; |
| 1102 int transform_xsize = xsize; |
| 1103 int transform_ysize = ysize; |
| 1104 VP8LBitReader* const br = &dec->br_; |
| 1105 VP8LMetadata* const hdr = &dec->hdr_; |
| 1106 uint32_t* data = NULL; |
| 1107 int color_cache_bits = 0; |
| 1108 |
| 1109 // Read the transforms (may recurse). |
| 1110 if (is_level0) { |
| 1111 while (ok && VP8LReadBits(br, 1)) { |
| 1112 ok = ReadTransform(&transform_xsize, &transform_ysize, dec); |
| 1113 } |
| 1114 } |
| 1115 |
| 1116 // Color cache |
| 1117 if (ok && VP8LReadBits(br, 1)) { |
| 1118 color_cache_bits = VP8LReadBits(br, 4); |
| 1119 ok = (color_cache_bits >= 1 && color_cache_bits <= MAX_CACHE_BITS); |
| 1120 if (!ok) { |
| 1121 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 1122 goto End; |
| 1123 } |
| 1124 } |
| 1125 |
| 1126 // Read the Huffman codes (may recurse). |
| 1127 ok = ok && ReadHuffmanCodes(dec, transform_xsize, transform_ysize, |
| 1128 color_cache_bits, is_level0); |
| 1129 if (!ok) { |
| 1130 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 1131 goto End; |
| 1132 } |
| 1133 |
| 1134 // Finish setting up the color-cache |
| 1135 if (color_cache_bits > 0) { |
| 1136 hdr->color_cache_size_ = 1 << color_cache_bits; |
| 1137 if (!VP8LColorCacheInit(&hdr->color_cache_, color_cache_bits)) { |
| 1138 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 1139 ok = 0; |
| 1140 goto End; |
| 1141 } |
| 1142 } else { |
| 1143 hdr->color_cache_size_ = 0; |
| 1144 } |
| 1145 UpdateDecoder(dec, transform_xsize, transform_ysize); |
| 1146 |
| 1147 if (is_level0) { // level 0 complete |
| 1148 dec->state_ = READ_HDR; |
| 1149 goto End; |
| 1150 } |
| 1151 |
| 1152 { |
| 1153 const uint64_t total_size = (uint64_t)transform_xsize * transform_ysize; |
| 1154 data = (uint32_t*)WebPSafeMalloc(total_size, sizeof(*data)); |
| 1155 if (data == NULL) { |
| 1156 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 1157 ok = 0; |
| 1158 goto End; |
| 1159 } |
| 1160 } |
| 1161 |
| 1162 // Use the Huffman trees to decode the LZ77 encoded data. |
| 1163 ok = DecodeImageData(dec, data, transform_xsize, transform_ysize, |
| 1164 transform_ysize, NULL); |
| 1165 ok = ok && !br->error_; |
| 1166 |
| 1167 End: |
| 1168 |
| 1169 if (!ok) { |
| 1170 WebPSafeFree(data); |
| 1171 ClearMetadata(hdr); |
| 1172 // If not enough data (br.eos_) resulted in BIT_STREAM_ERROR, update the |
| 1173 // status appropriately. |
| 1174 if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR && dec->br_.eos_) { |
| 1175 dec->status_ = VP8_STATUS_SUSPENDED; |
| 1176 } |
| 1177 } else { |
| 1178 if (decoded_data != NULL) { |
| 1179 *decoded_data = data; |
| 1180 } else { |
| 1181 // We allocate image data in this function only for transforms. At level 0 |
| 1182 // (that is: not the transforms), we shouldn't have allocated anything. |
| 1183 assert(data == NULL); |
| 1184 assert(is_level0); |
| 1185 } |
| 1186 dec->last_pixel_ = 0; // Reset for future DECODE_DATA_FUNC() calls. |
| 1187 if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind. |
| 1188 } |
| 1189 return ok; |
| 1190 } |
| 1191 |
| 1192 //------------------------------------------------------------------------------ |
| 1193 // Allocate internal buffers dec->pixels_ and dec->argb_cache_. |
| 1194 static int AllocateInternalBuffers32b(VP8LDecoder* const dec, int final_width) { |
| 1195 const uint64_t num_pixels = (uint64_t)dec->width_ * dec->height_; |
| 1196 // Scratch buffer corresponding to top-prediction row for transforming the |
| 1197 // first row in the row-blocks. Not needed for paletted alpha. |
| 1198 const uint64_t cache_top_pixels = (uint16_t)final_width; |
| 1199 // Scratch buffer for temporary BGRA storage. Not needed for paletted alpha. |
| 1200 const uint64_t cache_pixels = (uint64_t)final_width * NUM_ARGB_CACHE_ROWS; |
| 1201 const uint64_t total_num_pixels = |
| 1202 num_pixels + cache_top_pixels + cache_pixels; |
| 1203 |
| 1204 assert(dec->width_ <= final_width); |
| 1205 dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint32_t)); |
| 1206 if (dec->pixels_ == NULL) { |
| 1207 dec->argb_cache_ = NULL; // for sanity check |
| 1208 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 1209 return 0; |
| 1210 } |
| 1211 dec->argb_cache_ = dec->pixels_ + num_pixels + cache_top_pixels; |
| 1212 return 1; |
| 1213 } |
| 1214 |
| 1215 static int AllocateInternalBuffers8b(VP8LDecoder* const dec) { |
| 1216 const uint64_t total_num_pixels = (uint64_t)dec->width_ * dec->height_; |
| 1217 dec->argb_cache_ = NULL; // for sanity check |
| 1218 dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint8_t)); |
| 1219 if (dec->pixels_ == NULL) { |
| 1220 dec->status_ = VP8_STATUS_OUT_OF_MEMORY; |
| 1221 return 0; |
| 1222 } |
| 1223 return 1; |
| 1224 } |
| 1225 |
| 1226 //------------------------------------------------------------------------------ |
| 1227 |
| 1228 // Special row-processing that only stores the alpha data. |
| 1229 static void ExtractAlphaRows(VP8LDecoder* const dec, int row) { |
| 1230 const int num_rows = row - dec->last_row_; |
| 1231 const uint32_t* const in = dec->pixels_ + dec->width_ * dec->last_row_; |
| 1232 |
| 1233 if (num_rows <= 0) return; // Nothing to be done. |
| 1234 ApplyInverseTransforms(dec, num_rows, in); |
| 1235 |
| 1236 // Extract alpha (which is stored in the green plane). |
| 1237 { |
| 1238 const int width = dec->io_->width; // the final width (!= dec->width_) |
| 1239 const int cache_pixs = width * num_rows; |
| 1240 uint8_t* const dst = (uint8_t*)dec->io_->opaque + width * dec->last_row_; |
| 1241 const uint32_t* const src = dec->argb_cache_; |
| 1242 int i; |
| 1243 for (i = 0; i < cache_pixs; ++i) dst[i] = (src[i] >> 8) & 0xff; |
| 1244 } |
| 1245 dec->last_row_ = dec->last_out_row_ = row; |
| 1246 } |
| 1247 |
| 1248 int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec, |
| 1249 const uint8_t* const data, size_t data_size, |
| 1250 uint8_t* const output) { |
| 1251 int ok = 0; |
| 1252 VP8LDecoder* dec; |
| 1253 VP8Io* io; |
| 1254 assert(alph_dec != NULL); |
| 1255 alph_dec->vp8l_dec_ = VP8LNew(); |
| 1256 if (alph_dec->vp8l_dec_ == NULL) return 0; |
| 1257 dec = alph_dec->vp8l_dec_; |
| 1258 |
| 1259 dec->width_ = alph_dec->width_; |
| 1260 dec->height_ = alph_dec->height_; |
| 1261 dec->io_ = &alph_dec->io_; |
| 1262 io = dec->io_; |
| 1263 |
| 1264 VP8InitIo(io); |
| 1265 WebPInitCustomIo(NULL, io); // Just a sanity Init. io won't be used. |
| 1266 io->opaque = output; |
| 1267 io->width = alph_dec->width_; |
| 1268 io->height = alph_dec->height_; |
| 1269 |
| 1270 dec->status_ = VP8_STATUS_OK; |
| 1271 VP8LInitBitReader(&dec->br_, data, data_size); |
| 1272 |
| 1273 dec->action_ = READ_HDR; |
| 1274 if (!DecodeImageStream(alph_dec->width_, alph_dec->height_, 1, dec, NULL)) { |
| 1275 goto Err; |
| 1276 } |
| 1277 |
| 1278 // Special case: if alpha data uses only the color indexing transform and |
| 1279 // doesn't use color cache (a frequent case), we will use DecodeAlphaData() |
| 1280 // method that only needs allocation of 1 byte per pixel (alpha channel). |
| 1281 if (dec->next_transform_ == 1 && |
| 1282 dec->transforms_[0].type_ == COLOR_INDEXING_TRANSFORM && |
| 1283 Is8bOptimizable(&dec->hdr_)) { |
| 1284 alph_dec->use_8b_decode = 1; |
| 1285 ok = AllocateInternalBuffers8b(dec); |
| 1286 } else { |
| 1287 // Allocate internal buffers (note that dec->width_ may have changed here). |
| 1288 alph_dec->use_8b_decode = 0; |
| 1289 ok = AllocateInternalBuffers32b(dec, alph_dec->width_); |
| 1290 } |
| 1291 |
| 1292 if (!ok) goto Err; |
| 1293 |
| 1294 dec->action_ = READ_DATA; |
| 1295 return 1; |
| 1296 |
| 1297 Err: |
| 1298 VP8LDelete(alph_dec->vp8l_dec_); |
| 1299 alph_dec->vp8l_dec_ = NULL; |
| 1300 return 0; |
| 1301 } |
| 1302 |
| 1303 int VP8LDecodeAlphaImageStream(ALPHDecoder* const alph_dec, int last_row) { |
| 1304 VP8LDecoder* const dec = alph_dec->vp8l_dec_; |
| 1305 assert(dec != NULL); |
| 1306 assert(dec->action_ == READ_DATA); |
| 1307 assert(last_row <= dec->height_); |
| 1308 |
| 1309 if (dec->last_pixel_ == dec->width_ * dec->height_) { |
| 1310 return 1; // done |
| 1311 } |
| 1312 |
| 1313 // Decode (with special row processing). |
| 1314 return alph_dec->use_8b_decode ? |
| 1315 DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_, |
| 1316 last_row) : |
| 1317 DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, |
| 1318 last_row, ExtractAlphaRows); |
| 1319 } |
| 1320 |
| 1321 //------------------------------------------------------------------------------ |
| 1322 |
| 1323 int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io) { |
| 1324 int width, height, has_alpha; |
| 1325 |
| 1326 if (dec == NULL) return 0; |
| 1327 if (io == NULL) { |
| 1328 dec->status_ = VP8_STATUS_INVALID_PARAM; |
| 1329 return 0; |
| 1330 } |
| 1331 |
| 1332 dec->io_ = io; |
| 1333 dec->status_ = VP8_STATUS_OK; |
| 1334 VP8LInitBitReader(&dec->br_, io->data, io->data_size); |
| 1335 if (!ReadImageInfo(&dec->br_, &width, &height, &has_alpha)) { |
| 1336 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 1337 goto Error; |
| 1338 } |
| 1339 dec->state_ = READ_DIM; |
| 1340 io->width = width; |
| 1341 io->height = height; |
| 1342 |
| 1343 dec->action_ = READ_HDR; |
| 1344 if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Error; |
| 1345 return 1; |
| 1346 |
| 1347 Error: |
| 1348 VP8LClear(dec); |
| 1349 assert(dec->status_ != VP8_STATUS_OK); |
| 1350 return 0; |
| 1351 } |
| 1352 |
| 1353 int VP8LDecodeImage(VP8LDecoder* const dec) { |
| 1354 VP8Io* io = NULL; |
| 1355 WebPDecParams* params = NULL; |
| 1356 |
| 1357 // Sanity checks. |
| 1358 if (dec == NULL) return 0; |
| 1359 |
| 1360 dec->status_ = VP8_STATUS_BITSTREAM_ERROR; |
| 1361 assert(dec->hdr_.htree_groups_ != NULL); |
| 1362 assert(dec->hdr_.num_htree_groups_ > 0); |
| 1363 |
| 1364 io = dec->io_; |
| 1365 assert(io != NULL); |
| 1366 params = (WebPDecParams*)io->opaque; |
| 1367 assert(params != NULL); |
| 1368 dec->output_ = params->output; |
| 1369 assert(dec->output_ != NULL); |
| 1370 |
| 1371 // Initialization. |
| 1372 if (!WebPIoInitFromOptions(params->options, io, MODE_BGRA)) { |
| 1373 dec->status_ = VP8_STATUS_INVALID_PARAM; |
| 1374 goto Err; |
| 1375 } |
| 1376 |
| 1377 if (!AllocateInternalBuffers32b(dec, io->width)) goto Err; |
| 1378 |
| 1379 if (io->use_scaling && !AllocateAndInitRescaler(dec, io)) goto Err; |
| 1380 |
| 1381 if (io->use_scaling || WebPIsPremultipliedMode(dec->output_->colorspace)) { |
| 1382 // need the alpha-multiply functions for premultiplied output or rescaling |
| 1383 WebPInitAlphaProcessing(); |
| 1384 } |
| 1385 |
| 1386 // Decode. |
| 1387 dec->action_ = READ_DATA; |
| 1388 if (!DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, |
| 1389 dec->height_, ProcessRows)) { |
| 1390 goto Err; |
| 1391 } |
| 1392 |
| 1393 // Cleanup. |
| 1394 params->last_y = dec->last_out_row_; |
| 1395 VP8LClear(dec); |
| 1396 return 1; |
| 1397 |
| 1398 Err: |
| 1399 VP8LClear(dec); |
| 1400 assert(dec->status_ != VP8_STATUS_OK); |
| 1401 return 0; |
| 1402 } |
| 1403 |
| 1404 //------------------------------------------------------------------------------ |
OLD | NEW |