| OLD | NEW |
| 1 // Copyright 2012 Google Inc. All Rights Reserved. | 1 // Copyright 2012 Google Inc. All Rights Reserved. |
| 2 // | 2 // |
| 3 // Use of this source code is governed by a BSD-style license | 3 // Use of this source code is governed by a BSD-style license |
| 4 // that can be found in the COPYING file in the root of the source | 4 // that can be found in the COPYING file in the root of the source |
| 5 // tree. An additional intellectual property rights grant can be found | 5 // tree. An additional intellectual property rights grant can be found |
| 6 // in the file PATENTS. All contributing project authors may | 6 // in the file PATENTS. All contributing project authors may |
| 7 // be found in the AUTHORS file in the root of the source tree. | 7 // be found in the AUTHORS file in the root of the source tree. |
| 8 // ----------------------------------------------------------------------------- | 8 // ----------------------------------------------------------------------------- |
| 9 // | 9 // |
| 10 // Author: Jyrki Alakuijala (jyrki@google.com) | 10 // Author: Jyrki Alakuijala (jyrki@google.com) |
| 11 // | 11 // |
| 12 #ifdef HAVE_CONFIG_H | 12 #ifdef HAVE_CONFIG_H |
| 13 #include "../webp/config.h" | 13 #include "../webp/config.h" |
| 14 #endif | 14 #endif |
| 15 | 15 |
| 16 #include <math.h> | 16 #include <math.h> |
| 17 | 17 |
| 18 #include "./backward_references.h" | 18 #include "./backward_references_enc.h" |
| 19 #include "./histogram.h" | 19 #include "./histogram_enc.h" |
| 20 #include "../dsp/lossless.h" | 20 #include "../dsp/lossless.h" |
| 21 #include "../dsp/lossless_common.h" |
| 21 #include "../utils/utils.h" | 22 #include "../utils/utils.h" |
| 22 | 23 |
| 23 #define MAX_COST 1.e38 | 24 #define MAX_COST 1.e38 |
| 24 | 25 |
| 25 // Number of partitions for the three dominant (literal, red and blue) symbol | 26 // Number of partitions for the three dominant (literal, red and blue) symbol |
| 26 // costs. | 27 // costs. |
| 27 #define NUM_PARTITIONS 4 | 28 #define NUM_PARTITIONS 4 |
| 28 // The size of the bin-hash corresponding to the three dominant costs. | 29 // The size of the bin-hash corresponding to the three dominant costs. |
| 29 #define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS) | 30 #define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS) |
| 30 // Maximum number of histograms allowed in greedy combining algorithm. | 31 // Maximum number of histograms allowed in greedy combining algorithm. |
| (...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 206 static double InitialHuffmanCost(void) { | 207 static double InitialHuffmanCost(void) { |
| 207 // Small bias because Huffman code length is typically not stored in | 208 // Small bias because Huffman code length is typically not stored in |
| 208 // full length. | 209 // full length. |
| 209 static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; | 210 static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; |
| 210 static const double kSmallBias = 9.1; | 211 static const double kSmallBias = 9.1; |
| 211 return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; | 212 return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; |
| 212 } | 213 } |
| 213 | 214 |
| 214 // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) | 215 // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) |
| 215 static double FinalHuffmanCost(const VP8LStreaks* const stats) { | 216 static double FinalHuffmanCost(const VP8LStreaks* const stats) { |
| 217 // The constants in this function are experimental and got rounded from |
| 218 // their original values in 1/8 when switched to 1/1024. |
| 216 double retval = InitialHuffmanCost(); | 219 double retval = InitialHuffmanCost(); |
| 220 // Second coefficient: Many zeros in the histogram are covered efficiently |
| 221 // by a run-length encode. Originally 2/8. |
| 217 retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; | 222 retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; |
| 223 // Second coefficient: Constant values are encoded less efficiently, but still |
| 224 // RLE'ed. Originally 6/8. |
| 218 retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; | 225 retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; |
| 226 // 0s are usually encoded more efficiently than non-0s. |
| 227 // Originally 15/8. |
| 219 retval += 1.796875 * stats->streaks[0][0]; | 228 retval += 1.796875 * stats->streaks[0][0]; |
| 229 // Originally 26/8. |
| 220 retval += 3.28125 * stats->streaks[1][0]; | 230 retval += 3.28125 * stats->streaks[1][0]; |
| 221 return retval; | 231 return retval; |
| 222 } | 232 } |
| 223 | 233 |
| 224 // Get the symbol entropy for the distribution 'population'. | 234 // Get the symbol entropy for the distribution 'population'. |
| 225 // Set 'trivial_sym', if there's only one symbol present in the distribution. | 235 // Set 'trivial_sym', if there's only one symbol present in the distribution. |
| 226 static double PopulationCost(const uint32_t* const population, int length, | 236 static double PopulationCost(const uint32_t* const population, int length, |
| 227 uint32_t* const trivial_sym) { | 237 uint32_t* const trivial_sym) { |
| 228 VP8LBitEntropy bit_entropy; | 238 VP8LBitEntropy bit_entropy; |
| 229 VP8LStreaks stats; | 239 VP8LStreaks stats; |
| 230 VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); | 240 VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); |
| 231 if (trivial_sym != NULL) { | 241 if (trivial_sym != NULL) { |
| 232 *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code | 242 *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code |
| 233 : VP8L_NON_TRIVIAL_SYM; | 243 : VP8L_NON_TRIVIAL_SYM; |
| 234 } | 244 } |
| 235 | 245 |
| 236 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); | 246 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); |
| 237 } | 247 } |
| 238 | 248 |
| 249 // trivial_at_end is 1 if the two histograms only have one element that is |
| 250 // non-zero: both the zero-th one, or both the last one. |
| 239 static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, | 251 static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, |
| 240 const uint32_t* const Y, | 252 const uint32_t* const Y, |
| 241 int length) { | 253 int length, int trivial_at_end) { |
| 242 VP8LBitEntropy bit_entropy; | |
| 243 VP8LStreaks stats; | 254 VP8LStreaks stats; |
| 244 VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats); | 255 if (trivial_at_end) { |
| 256 // This configuration is due to palettization that transforms an indexed |
| 257 // pixel into 0xff000000 | (pixel << 8) in VP8LBundleColorMap. |
| 258 // BitsEntropyRefine is 0 for histograms with only one non-zero value. |
| 259 // Only FinalHuffmanCost needs to be evaluated. |
| 260 memset(&stats, 0, sizeof(stats)); |
| 261 // Deal with the non-zero value at index 0 or length-1. |
| 262 stats.streaks[1][0] += 1; |
| 263 // Deal with the following/previous zero streak. |
| 264 stats.counts[0] += 1; |
| 265 stats.streaks[0][1] += length - 1; |
| 266 return FinalHuffmanCost(&stats); |
| 267 } else { |
| 268 VP8LBitEntropy bit_entropy; |
| 269 VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats); |
| 245 | 270 |
| 246 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); | 271 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); |
| 272 } |
| 247 } | 273 } |
| 248 | 274 |
| 249 // Estimates the Entropy + Huffman + other block overhead size cost. | 275 // Estimates the Entropy + Huffman + other block overhead size cost. |
| 250 double VP8LHistogramEstimateBits(const VP8LHistogram* const p) { | 276 double VP8LHistogramEstimateBits(const VP8LHistogram* const p) { |
| 251 return | 277 return |
| 252 PopulationCost( | 278 PopulationCost( |
| 253 p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL) | 279 p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL) |
| 254 + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL) | 280 + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL) |
| 255 + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL) | 281 + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL) |
| 256 + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL) | 282 + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL) |
| 257 + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL) | 283 + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL) |
| 258 + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES) | 284 + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES) |
| 259 + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES); | 285 + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES); |
| 260 } | 286 } |
| 261 | 287 |
| 262 // ----------------------------------------------------------------------------- | 288 // ----------------------------------------------------------------------------- |
| 263 // Various histogram combine/cost-eval functions | 289 // Various histogram combine/cost-eval functions |
| 264 | 290 |
| 265 static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, | 291 static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, |
| 266 const VP8LHistogram* const b, | 292 const VP8LHistogram* const b, |
| 267 double cost_threshold, | 293 double cost_threshold, |
| 268 double* cost) { | 294 double* cost) { |
| 269 const int palette_code_bits = a->palette_code_bits_; | 295 const int palette_code_bits = a->palette_code_bits_; |
| 296 int trivial_at_end = 0; |
| 270 assert(a->palette_code_bits_ == b->palette_code_bits_); | 297 assert(a->palette_code_bits_ == b->palette_code_bits_); |
| 271 *cost += GetCombinedEntropy(a->literal_, b->literal_, | 298 *cost += GetCombinedEntropy(a->literal_, b->literal_, |
| 272 VP8LHistogramNumCodes(palette_code_bits)); | 299 VP8LHistogramNumCodes(palette_code_bits), 0); |
| 273 *cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES, | 300 *cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES, |
| 274 b->literal_ + NUM_LITERAL_CODES, | 301 b->literal_ + NUM_LITERAL_CODES, |
| 275 NUM_LENGTH_CODES); | 302 NUM_LENGTH_CODES); |
| 276 if (*cost > cost_threshold) return 0; | 303 if (*cost > cost_threshold) return 0; |
| 277 | 304 |
| 278 *cost += GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES); | 305 if (a->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM && |
| 306 a->trivial_symbol_ == b->trivial_symbol_) { |
| 307 // A, R and B are all 0 or 0xff. |
| 308 const uint32_t color_a = (a->trivial_symbol_ >> 24) & 0xff; |
| 309 const uint32_t color_r = (a->trivial_symbol_ >> 16) & 0xff; |
| 310 const uint32_t color_b = (a->trivial_symbol_ >> 0) & 0xff; |
| 311 if ((color_a == 0 || color_a == 0xff) && |
| 312 (color_r == 0 || color_r == 0xff) && |
| 313 (color_b == 0 || color_b == 0xff)) { |
| 314 trivial_at_end = 1; |
| 315 } |
| 316 } |
| 317 |
| 318 *cost += |
| 319 GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, trivial_at_end); |
| 279 if (*cost > cost_threshold) return 0; | 320 if (*cost > cost_threshold) return 0; |
| 280 | 321 |
| 281 *cost += GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES); | 322 *cost += |
| 323 GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, trivial_at_end); |
| 282 if (*cost > cost_threshold) return 0; | 324 if (*cost > cost_threshold) return 0; |
| 283 | 325 |
| 284 *cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES); | 326 *cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES, |
| 327 trivial_at_end); |
| 285 if (*cost > cost_threshold) return 0; | 328 if (*cost > cost_threshold) return 0; |
| 286 | 329 |
| 287 *cost += GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES); | 330 *cost += |
| 331 GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES, 0); |
| 288 *cost += | 332 *cost += |
| 289 VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES); | 333 VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES); |
| 290 if (*cost > cost_threshold) return 0; | 334 if (*cost > cost_threshold) return 0; |
| 291 | 335 |
| 292 return 1; | 336 return 1; |
| 293 } | 337 } |
| 294 | 338 |
| 339 static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a, |
| 340 const VP8LHistogram* const b, |
| 341 VP8LHistogram* const out) { |
| 342 VP8LHistogramAdd(a, b, out); |
| 343 out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_) |
| 344 ? a->trivial_symbol_ |
| 345 : VP8L_NON_TRIVIAL_SYM; |
| 346 } |
| 347 |
| 295 // Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing | 348 // Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing |
| 296 // to the threshold value 'cost_threshold'. The score returned is | 349 // to the threshold value 'cost_threshold'. The score returned is |
| 297 // Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed. | 350 // Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed. |
| 298 // Since the previous score passed is 'cost_threshold', we only need to compare | 351 // Since the previous score passed is 'cost_threshold', we only need to compare |
| 299 // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out | 352 // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out |
| 300 // early. | 353 // early. |
| 301 static double HistogramAddEval(const VP8LHistogram* const a, | 354 static double HistogramAddEval(const VP8LHistogram* const a, |
| 302 const VP8LHistogram* const b, | 355 const VP8LHistogram* const b, |
| 303 VP8LHistogram* const out, | 356 VP8LHistogram* const out, |
| 304 double cost_threshold) { | 357 double cost_threshold) { |
| 305 double cost = 0; | 358 double cost = 0; |
| 306 const double sum_cost = a->bit_cost_ + b->bit_cost_; | 359 const double sum_cost = a->bit_cost_ + b->bit_cost_; |
| 307 cost_threshold += sum_cost; | 360 cost_threshold += sum_cost; |
| 308 | 361 |
| 309 if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { | 362 if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { |
| 310 VP8LHistogramAdd(a, b, out); | 363 HistogramAdd(a, b, out); |
| 311 out->bit_cost_ = cost; | 364 out->bit_cost_ = cost; |
| 312 out->palette_code_bits_ = a->palette_code_bits_; | 365 out->palette_code_bits_ = a->palette_code_bits_; |
| 313 out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_) ? | |
| 314 a->trivial_symbol_ : VP8L_NON_TRIVIAL_SYM; | |
| 315 } | 366 } |
| 316 | 367 |
| 317 return cost - sum_cost; | 368 return cost - sum_cost; |
| 318 } | 369 } |
| 319 | 370 |
| 320 // Same as HistogramAddEval(), except that the resulting histogram | 371 // Same as HistogramAddEval(), except that the resulting histogram |
| 321 // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit | 372 // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit |
| 322 // the term C(b) which is constant over all the evaluations. | 373 // the term C(b) which is constant over all the evaluations. |
| 323 static double HistogramAddThresh(const VP8LHistogram* const a, | 374 static double HistogramAddThresh(const VP8LHistogram* const a, |
| 324 const VP8LHistogram* const b, | 375 const VP8LHistogram* const b, |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 443 VP8LHistogram* const histo = orig_histograms[i]; | 494 VP8LHistogram* const histo = orig_histograms[i]; |
| 444 UpdateHistogramCost(histo); | 495 UpdateHistogramCost(histo); |
| 445 // Copy histograms from orig_histo[] to image_histo[]. | 496 // Copy histograms from orig_histo[] to image_histo[]. |
| 446 HistogramCopy(histo, histograms[i]); | 497 HistogramCopy(histo, histograms[i]); |
| 447 } | 498 } |
| 448 } | 499 } |
| 449 | 500 |
| 450 // Partition histograms to different entropy bins for three dominant (literal, | 501 // Partition histograms to different entropy bins for three dominant (literal, |
| 451 // red and blue) symbol costs and compute the histogram aggregate bit_cost. | 502 // red and blue) symbol costs and compute the histogram aggregate bit_cost. |
| 452 static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo, | 503 static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo, |
| 453 int16_t* const bin_map, int low_effort) { | 504 uint16_t* const bin_map, |
| 505 int low_effort) { |
| 454 int i; | 506 int i; |
| 455 VP8LHistogram** const histograms = image_histo->histograms; | 507 VP8LHistogram** const histograms = image_histo->histograms; |
| 456 const int histo_size = image_histo->size; | 508 const int histo_size = image_histo->size; |
| 457 const int bin_depth = histo_size + 1; | |
| 458 DominantCostRange cost_range; | 509 DominantCostRange cost_range; |
| 459 DominantCostRangeInit(&cost_range); | 510 DominantCostRangeInit(&cost_range); |
| 460 | 511 |
| 461 // Analyze the dominant (literal, red and blue) entropy costs. | 512 // Analyze the dominant (literal, red and blue) entropy costs. |
| 462 for (i = 0; i < histo_size; ++i) { | 513 for (i = 0; i < histo_size; ++i) { |
| 463 VP8LHistogram* const histo = histograms[i]; | 514 UpdateDominantCostRange(histograms[i], &cost_range); |
| 464 UpdateDominantCostRange(histo, &cost_range); | |
| 465 } | 515 } |
| 466 | 516 |
| 467 // bin-hash histograms on three of the dominant (literal, red and blue) | 517 // bin-hash histograms on three of the dominant (literal, red and blue) |
| 468 // symbol costs. | 518 // symbol costs and store the resulting bin_id for each histogram. |
| 469 for (i = 0; i < histo_size; ++i) { | 519 for (i = 0; i < histo_size; ++i) { |
| 470 const VP8LHistogram* const histo = histograms[i]; | 520 bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort); |
| 471 const int bin_id = GetHistoBinIndex(histo, &cost_range, low_effort); | |
| 472 const int bin_offset = bin_id * bin_depth; | |
| 473 // bin_map[n][0] for every bin 'n' maintains the counter for the number of | |
| 474 // histograms in that bin. | |
| 475 // Get and increment the num_histos in that bin. | |
| 476 const int num_histos = ++bin_map[bin_offset]; | |
| 477 assert(bin_offset + num_histos < bin_depth * BIN_SIZE); | |
| 478 // Add histogram i'th index at num_histos (last) position in the bin_map. | |
| 479 bin_map[bin_offset + num_histos] = i; | |
| 480 } | 521 } |
| 481 } | 522 } |
| 482 | 523 |
| 483 // Compact the histogram set by removing unused entries. | 524 // Compact image_histo[] by merging some histograms with same bin_id together if |
| 484 static void HistogramCompactBins(VP8LHistogramSet* const image_histo) { | 525 // it's advantageous. |
| 485 VP8LHistogram** const histograms = image_histo->histograms; | |
| 486 int i, j; | |
| 487 | |
| 488 for (i = 0, j = 0; i < image_histo->size; ++i) { | |
| 489 if (histograms[i] != NULL && histograms[i]->bit_cost_ != 0.) { | |
| 490 if (j < i) { | |
| 491 histograms[j] = histograms[i]; | |
| 492 histograms[i] = NULL; | |
| 493 } | |
| 494 ++j; | |
| 495 } | |
| 496 } | |
| 497 image_histo->size = j; | |
| 498 } | |
| 499 | |
| 500 static VP8LHistogram* HistogramCombineEntropyBin( | 526 static VP8LHistogram* HistogramCombineEntropyBin( |
| 501 VP8LHistogramSet* const image_histo, | 527 VP8LHistogramSet* const image_histo, |
| 502 VP8LHistogram* cur_combo, | 528 VP8LHistogram* cur_combo, |
| 503 int16_t* const bin_map, int bin_depth, int num_bins, | 529 const uint16_t* const bin_map, int bin_map_size, int num_bins, |
| 504 double combine_cost_factor, int low_effort) { | 530 double combine_cost_factor, int low_effort) { |
| 505 int bin_id; | |
| 506 VP8LHistogram** const histograms = image_histo->histograms; | 531 VP8LHistogram** const histograms = image_histo->histograms; |
| 532 int idx; |
| 533 // Work in-place: processed histograms are put at the beginning of |
| 534 // image_histo[]. At the end, we just have to truncate the array. |
| 535 int size = 0; |
| 536 struct { |
| 537 int16_t first; // position of the histogram that accumulates all |
| 538 // histograms with the same bin_id |
| 539 uint16_t num_combine_failures; // number of combine failures per bin_id |
| 540 } bin_info[BIN_SIZE]; |
| 507 | 541 |
| 508 for (bin_id = 0; bin_id < num_bins; ++bin_id) { | 542 assert(num_bins <= BIN_SIZE); |
| 509 const int bin_offset = bin_id * bin_depth; | 543 for (idx = 0; idx < num_bins; ++idx) { |
| 510 const int num_histos = bin_map[bin_offset]; | 544 bin_info[idx].first = -1; |
| 511 const int idx1 = bin_map[bin_offset + 1]; | 545 bin_info[idx].num_combine_failures = 0; |
| 512 int num_combine_failures = 0; | 546 } |
| 513 int n; | 547 |
| 514 for (n = 2; n <= num_histos; ++n) { | 548 for (idx = 0; idx < bin_map_size; ++idx) { |
| 515 const int idx2 = bin_map[bin_offset + n]; | 549 const int bin_id = bin_map[idx]; |
| 516 if (low_effort) { | 550 const int first = bin_info[bin_id].first; |
| 517 // Merge all histograms with the same bin index, irrespective of cost of | 551 assert(size <= idx); |
| 518 // the merged histograms. | 552 if (first == -1) { |
| 519 VP8LHistogramAdd(histograms[idx1], histograms[idx2], histograms[idx1]); | 553 // just move histogram #idx to its final position |
| 520 histograms[idx2]->bit_cost_ = 0.; | 554 histograms[size] = histograms[idx]; |
| 555 bin_info[bin_id].first = size++; |
| 556 } else if (low_effort) { |
| 557 HistogramAdd(histograms[idx], histograms[first], histograms[first]); |
| 558 } else { |
| 559 // try to merge #idx into #first (both share the same bin_id) |
| 560 const double bit_cost = histograms[idx]->bit_cost_; |
| 561 const double bit_cost_thresh = -bit_cost * combine_cost_factor; |
| 562 const double curr_cost_diff = |
| 563 HistogramAddEval(histograms[first], histograms[idx], |
| 564 cur_combo, bit_cost_thresh); |
| 565 if (curr_cost_diff < bit_cost_thresh) { |
| 566 // Try to merge two histograms only if the combo is a trivial one or |
| 567 // the two candidate histograms are already non-trivial. |
| 568 // For some images, 'try_combine' turns out to be false for a lot of |
| 569 // histogram pairs. In that case, we fallback to combining |
| 570 // histograms as usual to avoid increasing the header size. |
| 571 const int try_combine = |
| 572 (cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) || |
| 573 ((histograms[idx]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) && |
| 574 (histograms[first]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM)); |
| 575 const int max_combine_failures = 32; |
| 576 if (try_combine || |
| 577 bin_info[bin_id].num_combine_failures >= max_combine_failures) { |
| 578 // move the (better) merged histogram to its final slot |
| 579 HistogramSwap(&cur_combo, &histograms[first]); |
| 580 } else { |
| 581 histograms[size++] = histograms[idx]; |
| 582 ++bin_info[bin_id].num_combine_failures; |
| 583 } |
| 521 } else { | 584 } else { |
| 522 const double bit_cost_idx2 = histograms[idx2]->bit_cost_; | 585 histograms[size++] = histograms[idx]; |
| 523 if (bit_cost_idx2 > 0.) { | |
| 524 const double bit_cost_thresh = -bit_cost_idx2 * combine_cost_factor; | |
| 525 const double curr_cost_diff = | |
| 526 HistogramAddEval(histograms[idx1], histograms[idx2], | |
| 527 cur_combo, bit_cost_thresh); | |
| 528 if (curr_cost_diff < bit_cost_thresh) { | |
| 529 // Try to merge two histograms only if the combo is a trivial one or | |
| 530 // the two candidate histograms are already non-trivial. | |
| 531 // For some images, 'try_combine' turns out to be false for a lot of | |
| 532 // histogram pairs. In that case, we fallback to combining | |
| 533 // histograms as usual to avoid increasing the header size. | |
| 534 const int try_combine = | |
| 535 (cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) || | |
| 536 ((histograms[idx1]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) && | |
| 537 (histograms[idx2]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM)); | |
| 538 const int max_combine_failures = 32; | |
| 539 if (try_combine || (num_combine_failures >= max_combine_failures)) { | |
| 540 HistogramSwap(&cur_combo, &histograms[idx1]); | |
| 541 histograms[idx2]->bit_cost_ = 0.; | |
| 542 } else { | |
| 543 ++num_combine_failures; | |
| 544 } | |
| 545 } | |
| 546 } | |
| 547 } | 586 } |
| 548 } | 587 } |
| 549 if (low_effort) { | 588 } |
| 550 // Update the bit_cost for the merged histograms (per bin index). | 589 image_histo->size = size; |
| 551 UpdateHistogramCost(histograms[idx1]); | 590 if (low_effort) { |
| 591 // for low_effort case, update the final cost when everything is merged |
| 592 for (idx = 0; idx < size; ++idx) { |
| 593 UpdateHistogramCost(histograms[idx]); |
| 552 } | 594 } |
| 553 } | 595 } |
| 554 HistogramCompactBins(image_histo); | |
| 555 return cur_combo; | 596 return cur_combo; |
| 556 } | 597 } |
| 557 | 598 |
| 558 static uint32_t MyRand(uint32_t *seed) { | 599 static uint32_t MyRand(uint32_t* const seed) { |
| 559 *seed *= 16807U; | 600 *seed = (*seed * 16807ull) & 0xffffffffu; |
| 560 if (*seed == 0) { | 601 if (*seed == 0) { |
| 561 *seed = 1; | 602 *seed = 1; |
| 562 } | 603 } |
| 563 return *seed; | 604 return *seed; |
| 564 } | 605 } |
| 565 | 606 |
| 566 // ----------------------------------------------------------------------------- | 607 // ----------------------------------------------------------------------------- |
| 567 // Histogram pairs priority queue | 608 // Histogram pairs priority queue |
| 568 | 609 |
| 569 // Pair of histograms. Negative idx1 value means that pair is out-of-date. | 610 // Pair of histograms. Negative idx1 value means that pair is out-of-date. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 675 // Initialize positions array. | 716 // Initialize positions array. |
| 676 PreparePair(histograms, i, j, &histo_queue.queue[histo_queue.size]); | 717 PreparePair(histograms, i, j, &histo_queue.queue[histo_queue.size]); |
| 677 UpdateQueueFront(&histo_queue); | 718 UpdateQueueFront(&histo_queue); |
| 678 } | 719 } |
| 679 } | 720 } |
| 680 | 721 |
| 681 while (image_histo_size > 1 && histo_queue.size > 0) { | 722 while (image_histo_size > 1 && histo_queue.size > 0) { |
| 682 HistogramPair* copy_to; | 723 HistogramPair* copy_to; |
| 683 const int idx1 = histo_queue.queue[0].idx1; | 724 const int idx1 = histo_queue.queue[0].idx1; |
| 684 const int idx2 = histo_queue.queue[0].idx2; | 725 const int idx2 = histo_queue.queue[0].idx2; |
| 685 VP8LHistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]); | 726 HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]); |
| 686 histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo; | 727 histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo; |
| 687 // Remove merged histogram. | 728 // Remove merged histogram. |
| 688 for (i = 0; i + 1 < image_histo_size; ++i) { | 729 for (i = 0; i + 1 < image_histo_size; ++i) { |
| 689 if (clusters[i] >= idx2) { | 730 if (clusters[i] >= idx2) { |
| 690 clusters[i] = clusters[i + 1]; | 731 clusters[i] = clusters[i + 1]; |
| 691 } | 732 } |
| 692 } | 733 } |
| 693 --image_histo_size; | 734 --image_histo_size; |
| 694 | 735 |
| 695 // Remove pairs intersecting the just combined best pair. This will | 736 // Remove pairs intersecting the just combined best pair. This will |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 741 VP8LHistogram* best_combo, | 782 VP8LHistogram* best_combo, |
| 742 int quality, int min_cluster_size) { | 783 int quality, int min_cluster_size) { |
| 743 int iter; | 784 int iter; |
| 744 uint32_t seed = 0; | 785 uint32_t seed = 0; |
| 745 int tries_with_no_success = 0; | 786 int tries_with_no_success = 0; |
| 746 int image_histo_size = image_histo->size; | 787 int image_histo_size = image_histo->size; |
| 747 const int iter_mult = (quality < 25) ? 2 : 2 + (quality - 25) / 8; | 788 const int iter_mult = (quality < 25) ? 2 : 2 + (quality - 25) / 8; |
| 748 const int outer_iters = image_histo_size * iter_mult; | 789 const int outer_iters = image_histo_size * iter_mult; |
| 749 const int num_pairs = image_histo_size / 2; | 790 const int num_pairs = image_histo_size / 2; |
| 750 const int num_tries_no_success = outer_iters / 2; | 791 const int num_tries_no_success = outer_iters / 2; |
| 792 int idx2_max = image_histo_size - 1; |
| 793 int do_brute_dorce = 0; |
| 751 VP8LHistogram** const histograms = image_histo->histograms; | 794 VP8LHistogram** const histograms = image_histo->histograms; |
| 752 | 795 |
| 753 // Collapse similar histograms in 'image_histo'. | 796 // Collapse similar histograms in 'image_histo'. |
| 754 ++min_cluster_size; | 797 ++min_cluster_size; |
| 755 for (iter = 0; | 798 for (iter = 0; |
| 756 iter < outer_iters && image_histo_size >= min_cluster_size; | 799 iter < outer_iters && image_histo_size >= min_cluster_size; |
| 757 ++iter) { | 800 ++iter) { |
| 758 double best_cost_diff = 0.; | 801 double best_cost_diff = 0.; |
| 759 int best_idx1 = -1, best_idx2 = 1; | 802 int best_idx1 = -1, best_idx2 = 1; |
| 760 int j; | 803 int j; |
| 761 const int num_tries = | 804 int num_tries = |
| 762 (num_pairs < image_histo_size) ? num_pairs : image_histo_size; | 805 (num_pairs < image_histo_size) ? num_pairs : image_histo_size; |
| 806 // Use a brute force approach if: |
| 807 // - stochastic has not worked for a while and |
| 808 // - if the number of iterations for brute force is less than the number of |
| 809 // iterations if we never find a match ever again stochastically (hence |
| 810 // num_tries times the number of remaining outer iterations). |
| 811 do_brute_dorce = |
| 812 (tries_with_no_success > 10) && |
| 813 (idx2_max * (idx2_max + 1) < 2 * num_tries * (outer_iters - iter)); |
| 814 if (do_brute_dorce) num_tries = idx2_max; |
| 815 |
| 763 seed += iter; | 816 seed += iter; |
| 764 for (j = 0; j < num_tries; ++j) { | 817 for (j = 0; j < num_tries; ++j) { |
| 765 double curr_cost_diff; | 818 double curr_cost_diff; |
| 766 // Choose two histograms at random and try to combine them. | 819 // Choose two histograms at random and try to combine them. |
| 767 const uint32_t idx1 = MyRand(&seed) % image_histo_size; | 820 uint32_t idx1, idx2; |
| 768 const uint32_t tmp = (j & 7) + 1; | 821 if (do_brute_dorce) { |
| 769 const uint32_t diff = | 822 // Use a brute force approach. |
| 770 (tmp < 3) ? tmp : MyRand(&seed) % (image_histo_size - 1); | 823 idx1 = (uint32_t)j; |
| 771 const uint32_t idx2 = (idx1 + diff + 1) % image_histo_size; | 824 idx2 = (uint32_t)idx2_max; |
| 772 if (idx1 == idx2) { | 825 } else { |
| 773 continue; | 826 const uint32_t tmp = (j & 7) + 1; |
| 827 const uint32_t diff = |
| 828 (tmp < 3) ? tmp : MyRand(&seed) % (image_histo_size - 1); |
| 829 idx1 = MyRand(&seed) % image_histo_size; |
| 830 idx2 = (idx1 + diff + 1) % image_histo_size; |
| 831 if (idx1 == idx2) { |
| 832 continue; |
| 833 } |
| 774 } | 834 } |
| 775 | 835 |
| 776 // Calculate cost reduction on combining. | 836 // Calculate cost reduction on combining. |
| 777 curr_cost_diff = HistogramAddEval(histograms[idx1], histograms[idx2], | 837 curr_cost_diff = HistogramAddEval(histograms[idx1], histograms[idx2], |
| 778 tmp_histo, best_cost_diff); | 838 tmp_histo, best_cost_diff); |
| 779 if (curr_cost_diff < best_cost_diff) { // found a better pair? | 839 if (curr_cost_diff < best_cost_diff) { // found a better pair? |
| 780 HistogramSwap(&best_combo, &tmp_histo); | 840 HistogramSwap(&best_combo, &tmp_histo); |
| 781 best_cost_diff = curr_cost_diff; | 841 best_cost_diff = curr_cost_diff; |
| 782 best_idx1 = idx1; | 842 best_idx1 = idx1; |
| 783 best_idx2 = idx2; | 843 best_idx2 = idx2; |
| 784 } | 844 } |
| 785 } | 845 } |
| 846 if (do_brute_dorce) --idx2_max; |
| 786 | 847 |
| 787 if (best_idx1 >= 0) { | 848 if (best_idx1 >= 0) { |
| 788 HistogramSwap(&best_combo, &histograms[best_idx1]); | 849 HistogramSwap(&best_combo, &histograms[best_idx1]); |
| 789 // swap best_idx2 slot with last one (which is now unused) | 850 // swap best_idx2 slot with last one (which is now unused) |
| 790 --image_histo_size; | 851 --image_histo_size; |
| 852 if (idx2_max >= image_histo_size) idx2_max = image_histo_size - 1; |
| 791 if (best_idx2 != image_histo_size) { | 853 if (best_idx2 != image_histo_size) { |
| 792 HistogramSwap(&histograms[image_histo_size], &histograms[best_idx2]); | 854 HistogramSwap(&histograms[image_histo_size], &histograms[best_idx2]); |
| 793 histograms[image_histo_size] = NULL; | 855 histograms[image_histo_size] = NULL; |
| 794 } | 856 } |
| 795 tries_with_no_success = 0; | 857 tries_with_no_success = 0; |
| 796 } | 858 } |
| 797 if (++tries_with_no_success >= num_tries_no_success) { | 859 if (++tries_with_no_success >= num_tries_no_success || idx2_max == 0) { |
| 798 break; | 860 break; |
| 799 } | 861 } |
| 800 } | 862 } |
| 801 image_histo->size = image_histo_size; | 863 image_histo->size = image_histo_size; |
| 802 } | 864 } |
| 803 | 865 |
| 804 // ----------------------------------------------------------------------------- | 866 // ----------------------------------------------------------------------------- |
| 805 // Histogram refinement | 867 // Histogram refinement |
| 806 | 868 |
| 807 // Find the best 'out' histogram for each of the 'in' histograms. | 869 // Find the best 'out' histogram for each of the 'in' histograms. |
| (...skipping 28 matching lines...) Expand all Loading... |
| 836 } | 898 } |
| 837 } | 899 } |
| 838 | 900 |
| 839 // Recompute each out based on raw and symbols. | 901 // Recompute each out based on raw and symbols. |
| 840 for (i = 0; i < out_size; ++i) { | 902 for (i = 0; i < out_size; ++i) { |
| 841 HistogramClear(out_histo[i]); | 903 HistogramClear(out_histo[i]); |
| 842 } | 904 } |
| 843 | 905 |
| 844 for (i = 0; i < in_size; ++i) { | 906 for (i = 0; i < in_size; ++i) { |
| 845 const int idx = symbols[i]; | 907 const int idx = symbols[i]; |
| 846 VP8LHistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]); | 908 HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]); |
| 847 } | 909 } |
| 848 } | 910 } |
| 849 | 911 |
| 850 static double GetCombineCostFactor(int histo_size, int quality) { | 912 static double GetCombineCostFactor(int histo_size, int quality) { |
| 851 double combine_cost_factor = 0.16; | 913 double combine_cost_factor = 0.16; |
| 852 if (quality < 90) { | 914 if (quality < 90) { |
| 853 if (histo_size > 256) combine_cost_factor /= 2.; | 915 if (histo_size > 256) combine_cost_factor /= 2.; |
| 854 if (histo_size > 512) combine_cost_factor /= 2.; | 916 if (histo_size > 512) combine_cost_factor /= 2.; |
| 855 if (histo_size > 1024) combine_cost_factor /= 2.; | 917 if (histo_size > 1024) combine_cost_factor /= 2.; |
| 856 if (quality <= 50) combine_cost_factor /= 2.; | 918 if (quality <= 50) combine_cost_factor /= 2.; |
| 857 } | 919 } |
| 858 return combine_cost_factor; | 920 return combine_cost_factor; |
| 859 } | 921 } |
| 860 | 922 |
| 861 int VP8LGetHistoImageSymbols(int xsize, int ysize, | 923 int VP8LGetHistoImageSymbols(int xsize, int ysize, |
| 862 const VP8LBackwardRefs* const refs, | 924 const VP8LBackwardRefs* const refs, |
| 863 int quality, int low_effort, | 925 int quality, int low_effort, |
| 864 int histo_bits, int cache_bits, | 926 int histo_bits, int cache_bits, |
| 865 VP8LHistogramSet* const image_histo, | 927 VP8LHistogramSet* const image_histo, |
| 866 VP8LHistogramSet* const tmp_histos, | 928 VP8LHistogramSet* const tmp_histos, |
| 867 uint16_t* const histogram_symbols) { | 929 uint16_t* const histogram_symbols) { |
| 868 int ok = 0; | 930 int ok = 0; |
| 869 const int histo_xsize = histo_bits ? VP8LSubSampleSize(xsize, histo_bits) : 1; | 931 const int histo_xsize = histo_bits ? VP8LSubSampleSize(xsize, histo_bits) : 1; |
| 870 const int histo_ysize = histo_bits ? VP8LSubSampleSize(ysize, histo_bits) : 1; | 932 const int histo_ysize = histo_bits ? VP8LSubSampleSize(ysize, histo_bits) : 1; |
| 871 const int image_histo_raw_size = histo_xsize * histo_ysize; | 933 const int image_histo_raw_size = histo_xsize * histo_ysize; |
| 872 const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE; | |
| 873 | |
| 874 // The bin_map for every bin follows following semantics: | |
| 875 // bin_map[n][0] = num_histo; // The number of histograms in that bin. | |
| 876 // bin_map[n][1] = index of first histogram in that bin; | |
| 877 // bin_map[n][num_histo] = index of last histogram in that bin; | |
| 878 // bin_map[n][num_histo + 1] ... bin_map[n][bin_depth - 1] = unused indices. | |
| 879 const int bin_depth = image_histo_raw_size + 1; | |
| 880 int16_t* bin_map = NULL; | |
| 881 VP8LHistogramSet* const orig_histo = | 934 VP8LHistogramSet* const orig_histo = |
| 882 VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits); | 935 VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits); |
| 883 VP8LHistogram* cur_combo; | 936 VP8LHistogram* cur_combo; |
| 937 // Don't attempt linear bin-partition heuristic for |
| 938 // histograms of small sizes (as bin_map will be very sparse) and |
| 939 // maximum quality q==100 (to preserve the compression gains at that level). |
| 940 const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE; |
| 884 const int entropy_combine = | 941 const int entropy_combine = |
| 885 (orig_histo->size > entropy_combine_num_bins * 2) && (quality < 100); | 942 (orig_histo->size > entropy_combine_num_bins * 2) && (quality < 100); |
| 886 | 943 |
| 887 if (orig_histo == NULL) goto Error; | 944 if (orig_histo == NULL) goto Error; |
| 888 | 945 |
| 889 // Don't attempt linear bin-partition heuristic for: | |
| 890 // histograms of small sizes, as bin_map will be very sparse and; | |
| 891 // Maximum quality (q==100), to preserve the compression gains at that level. | |
| 892 if (entropy_combine) { | |
| 893 const int bin_map_size = bin_depth * entropy_combine_num_bins; | |
| 894 bin_map = (int16_t*)WebPSafeCalloc(bin_map_size, sizeof(*bin_map)); | |
| 895 if (bin_map == NULL) goto Error; | |
| 896 } | |
| 897 | |
| 898 // Construct the histograms from backward references. | 946 // Construct the histograms from backward references. |
| 899 HistogramBuild(xsize, histo_bits, refs, orig_histo); | 947 HistogramBuild(xsize, histo_bits, refs, orig_histo); |
| 900 // Copies the histograms and computes its bit_cost. | 948 // Copies the histograms and computes its bit_cost. |
| 901 HistogramCopyAndAnalyze(orig_histo, image_histo); | 949 HistogramCopyAndAnalyze(orig_histo, image_histo); |
| 902 | 950 |
| 903 cur_combo = tmp_histos->histograms[1]; // pick up working slot | 951 cur_combo = tmp_histos->histograms[1]; // pick up working slot |
| 904 if (entropy_combine) { | 952 if (entropy_combine) { |
| 953 const int bin_map_size = orig_histo->size; |
| 954 // Reuse histogram_symbols storage. By definition, it's guaranteed to be ok. |
| 955 uint16_t* const bin_map = histogram_symbols; |
| 905 const double combine_cost_factor = | 956 const double combine_cost_factor = |
| 906 GetCombineCostFactor(image_histo_raw_size, quality); | 957 GetCombineCostFactor(image_histo_raw_size, quality); |
| 958 |
| 907 HistogramAnalyzeEntropyBin(orig_histo, bin_map, low_effort); | 959 HistogramAnalyzeEntropyBin(orig_histo, bin_map, low_effort); |
| 908 // Collapse histograms with similar entropy. | 960 // Collapse histograms with similar entropy. |
| 909 cur_combo = HistogramCombineEntropyBin(image_histo, cur_combo, bin_map, | 961 cur_combo = HistogramCombineEntropyBin(image_histo, cur_combo, |
| 910 bin_depth, entropy_combine_num_bins, | 962 bin_map, bin_map_size, |
| 963 entropy_combine_num_bins, |
| 911 combine_cost_factor, low_effort); | 964 combine_cost_factor, low_effort); |
| 912 } | 965 } |
| 913 | 966 |
| 914 // Don't combine the histograms using stochastic and greedy heuristics for | 967 // Don't combine the histograms using stochastic and greedy heuristics for |
| 915 // low-effort compression mode. | 968 // low-effort compression mode. |
| 916 if (!low_effort || !entropy_combine) { | 969 if (!low_effort || !entropy_combine) { |
| 917 const float x = quality / 100.f; | 970 const float x = quality / 100.f; |
| 918 // cubic ramp between 1 and MAX_HISTO_GREEDY: | 971 // cubic ramp between 1 and MAX_HISTO_GREEDY: |
| 919 const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1)); | 972 const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1)); |
| 920 HistogramCombineStochastic(image_histo, tmp_histos->histograms[0], | 973 HistogramCombineStochastic(image_histo, tmp_histos->histograms[0], |
| 921 cur_combo, quality, threshold_size); | 974 cur_combo, quality, threshold_size); |
| 922 if ((image_histo->size <= threshold_size) && | 975 if ((image_histo->size <= threshold_size) && |
| 923 !HistogramCombineGreedy(image_histo)) { | 976 !HistogramCombineGreedy(image_histo)) { |
| 924 goto Error; | 977 goto Error; |
| 925 } | 978 } |
| 926 } | 979 } |
| 927 | 980 |
| 928 // TODO(vikasa): Optimize HistogramRemap for low-effort compression mode also. | 981 // TODO(vikasa): Optimize HistogramRemap for low-effort compression mode also. |
| 929 // Find the optimal map from original histograms to the final ones. | 982 // Find the optimal map from original histograms to the final ones. |
| 930 HistogramRemap(orig_histo, image_histo, histogram_symbols); | 983 HistogramRemap(orig_histo, image_histo, histogram_symbols); |
| 931 | 984 |
| 932 ok = 1; | 985 ok = 1; |
| 933 | 986 |
| 934 Error: | 987 Error: |
| 935 WebPSafeFree(bin_map); | |
| 936 VP8LFreeHistogramSet(orig_histo); | 988 VP8LFreeHistogramSet(orig_histo); |
| 937 return ok; | 989 return ok; |
| 938 } | 990 } |
| OLD | NEW |