| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 Google Inc. All Rights Reserved. | |
| 2 // | |
| 3 // Use of this source code is governed by a BSD-style license | |
| 4 // that can be found in the COPYING file in the root of the source | |
| 5 // tree. An additional intellectual property rights grant can be found | |
| 6 // in the file PATENTS. All contributing project authors may | |
| 7 // be found in the AUTHORS file in the root of the source tree. | |
| 8 // ----------------------------------------------------------------------------- | |
| 9 // | |
| 10 // Author: Jyrki Alakuijala (jyrki@google.com) | |
| 11 // | |
| 12 #ifdef HAVE_CONFIG_H | |
| 13 #include "../webp/config.h" | |
| 14 #endif | |
| 15 | |
| 16 #include <math.h> | |
| 17 | |
| 18 #include "./backward_references.h" | |
| 19 #include "./histogram.h" | |
| 20 #include "../dsp/lossless.h" | |
| 21 #include "../utils/utils.h" | |
| 22 | |
| 23 #define MAX_COST 1.e38 | |
| 24 | |
| 25 // Number of partitions for the three dominant (literal, red and blue) symbol | |
| 26 // costs. | |
| 27 #define NUM_PARTITIONS 4 | |
| 28 // The size of the bin-hash corresponding to the three dominant costs. | |
| 29 #define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS) | |
| 30 // Maximum number of histograms allowed in greedy combining algorithm. | |
| 31 #define MAX_HISTO_GREEDY 100 | |
| 32 | |
| 33 static void HistogramClear(VP8LHistogram* const p) { | |
| 34 uint32_t* const literal = p->literal_; | |
| 35 const int cache_bits = p->palette_code_bits_; | |
| 36 const int histo_size = VP8LGetHistogramSize(cache_bits); | |
| 37 memset(p, 0, histo_size); | |
| 38 p->palette_code_bits_ = cache_bits; | |
| 39 p->literal_ = literal; | |
| 40 } | |
| 41 | |
| 42 // Swap two histogram pointers. | |
| 43 static void HistogramSwap(VP8LHistogram** const A, VP8LHistogram** const B) { | |
| 44 VP8LHistogram* const tmp = *A; | |
| 45 *A = *B; | |
| 46 *B = tmp; | |
| 47 } | |
| 48 | |
| 49 static void HistogramCopy(const VP8LHistogram* const src, | |
| 50 VP8LHistogram* const dst) { | |
| 51 uint32_t* const dst_literal = dst->literal_; | |
| 52 const int dst_cache_bits = dst->palette_code_bits_; | |
| 53 const int histo_size = VP8LGetHistogramSize(dst_cache_bits); | |
| 54 assert(src->palette_code_bits_ == dst_cache_bits); | |
| 55 memcpy(dst, src, histo_size); | |
| 56 dst->literal_ = dst_literal; | |
| 57 } | |
| 58 | |
| 59 int VP8LGetHistogramSize(int cache_bits) { | |
| 60 const int literal_size = VP8LHistogramNumCodes(cache_bits); | |
| 61 const size_t total_size = sizeof(VP8LHistogram) + sizeof(int) * literal_size; | |
| 62 assert(total_size <= (size_t)0x7fffffff); | |
| 63 return (int)total_size; | |
| 64 } | |
| 65 | |
| 66 void VP8LFreeHistogram(VP8LHistogram* const histo) { | |
| 67 WebPSafeFree(histo); | |
| 68 } | |
| 69 | |
| 70 void VP8LFreeHistogramSet(VP8LHistogramSet* const histo) { | |
| 71 WebPSafeFree(histo); | |
| 72 } | |
| 73 | |
| 74 void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs, | |
| 75 VP8LHistogram* const histo) { | |
| 76 VP8LRefsCursor c = VP8LRefsCursorInit(refs); | |
| 77 while (VP8LRefsCursorOk(&c)) { | |
| 78 VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos); | |
| 79 VP8LRefsCursorNext(&c); | |
| 80 } | |
| 81 } | |
| 82 | |
| 83 void VP8LHistogramCreate(VP8LHistogram* const p, | |
| 84 const VP8LBackwardRefs* const refs, | |
| 85 int palette_code_bits) { | |
| 86 if (palette_code_bits >= 0) { | |
| 87 p->palette_code_bits_ = palette_code_bits; | |
| 88 } | |
| 89 HistogramClear(p); | |
| 90 VP8LHistogramStoreRefs(refs, p); | |
| 91 } | |
| 92 | |
| 93 void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits) { | |
| 94 p->palette_code_bits_ = palette_code_bits; | |
| 95 HistogramClear(p); | |
| 96 } | |
| 97 | |
| 98 VP8LHistogram* VP8LAllocateHistogram(int cache_bits) { | |
| 99 VP8LHistogram* histo = NULL; | |
| 100 const int total_size = VP8LGetHistogramSize(cache_bits); | |
| 101 uint8_t* const memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory)); | |
| 102 if (memory == NULL) return NULL; | |
| 103 histo = (VP8LHistogram*)memory; | |
| 104 // literal_ won't necessary be aligned. | |
| 105 histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram)); | |
| 106 VP8LHistogramInit(histo, cache_bits); | |
| 107 return histo; | |
| 108 } | |
| 109 | |
| 110 VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) { | |
| 111 int i; | |
| 112 VP8LHistogramSet* set; | |
| 113 const int histo_size = VP8LGetHistogramSize(cache_bits); | |
| 114 const size_t total_size = | |
| 115 sizeof(*set) + size * (sizeof(*set->histograms) + | |
| 116 histo_size + WEBP_ALIGN_CST); | |
| 117 uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory)); | |
| 118 if (memory == NULL) return NULL; | |
| 119 | |
| 120 set = (VP8LHistogramSet*)memory; | |
| 121 memory += sizeof(*set); | |
| 122 set->histograms = (VP8LHistogram**)memory; | |
| 123 memory += size * sizeof(*set->histograms); | |
| 124 set->max_size = size; | |
| 125 set->size = size; | |
| 126 for (i = 0; i < size; ++i) { | |
| 127 memory = (uint8_t*)WEBP_ALIGN(memory); | |
| 128 set->histograms[i] = (VP8LHistogram*)memory; | |
| 129 // literal_ won't necessary be aligned. | |
| 130 set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram)); | |
| 131 VP8LHistogramInit(set->histograms[i], cache_bits); | |
| 132 memory += histo_size; | |
| 133 } | |
| 134 return set; | |
| 135 } | |
| 136 | |
| 137 // ----------------------------------------------------------------------------- | |
| 138 | |
| 139 void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo, | |
| 140 const PixOrCopy* const v) { | |
| 141 if (PixOrCopyIsLiteral(v)) { | |
| 142 ++histo->alpha_[PixOrCopyLiteral(v, 3)]; | |
| 143 ++histo->red_[PixOrCopyLiteral(v, 2)]; | |
| 144 ++histo->literal_[PixOrCopyLiteral(v, 1)]; | |
| 145 ++histo->blue_[PixOrCopyLiteral(v, 0)]; | |
| 146 } else if (PixOrCopyIsCacheIdx(v)) { | |
| 147 const int literal_ix = | |
| 148 NUM_LITERAL_CODES + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v); | |
| 149 ++histo->literal_[literal_ix]; | |
| 150 } else { | |
| 151 int code, extra_bits; | |
| 152 VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits); | |
| 153 ++histo->literal_[NUM_LITERAL_CODES + code]; | |
| 154 VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits); | |
| 155 ++histo->distance_[code]; | |
| 156 } | |
| 157 } | |
| 158 | |
| 159 // ----------------------------------------------------------------------------- | |
| 160 // Entropy-related functions. | |
| 161 | |
| 162 static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) { | |
| 163 double mix; | |
| 164 if (entropy->nonzeros < 5) { | |
| 165 if (entropy->nonzeros <= 1) { | |
| 166 return 0; | |
| 167 } | |
| 168 // Two symbols, they will be 0 and 1 in a Huffman code. | |
| 169 // Let's mix in a bit of entropy to favor good clustering when | |
| 170 // distributions of these are combined. | |
| 171 if (entropy->nonzeros == 2) { | |
| 172 return 0.99 * entropy->sum + 0.01 * entropy->entropy; | |
| 173 } | |
| 174 // No matter what the entropy says, we cannot be better than min_limit | |
| 175 // with Huffman coding. I am mixing a bit of entropy into the | |
| 176 // min_limit since it produces much better (~0.5 %) compression results | |
| 177 // perhaps because of better entropy clustering. | |
| 178 if (entropy->nonzeros == 3) { | |
| 179 mix = 0.95; | |
| 180 } else { | |
| 181 mix = 0.7; // nonzeros == 4. | |
| 182 } | |
| 183 } else { | |
| 184 mix = 0.627; | |
| 185 } | |
| 186 | |
| 187 { | |
| 188 double min_limit = 2 * entropy->sum - entropy->max_val; | |
| 189 min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy; | |
| 190 return (entropy->entropy < min_limit) ? min_limit : entropy->entropy; | |
| 191 } | |
| 192 } | |
| 193 | |
| 194 double VP8LBitsEntropy(const uint32_t* const array, int n, | |
| 195 uint32_t* const trivial_symbol) { | |
| 196 VP8LBitEntropy entropy; | |
| 197 VP8LBitsEntropyUnrefined(array, n, &entropy); | |
| 198 if (trivial_symbol != NULL) { | |
| 199 *trivial_symbol = | |
| 200 (entropy.nonzeros == 1) ? entropy.nonzero_code : VP8L_NON_TRIVIAL_SYM; | |
| 201 } | |
| 202 | |
| 203 return BitsEntropyRefine(&entropy); | |
| 204 } | |
| 205 | |
| 206 static double InitialHuffmanCost(void) { | |
| 207 // Small bias because Huffman code length is typically not stored in | |
| 208 // full length. | |
| 209 static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; | |
| 210 static const double kSmallBias = 9.1; | |
| 211 return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; | |
| 212 } | |
| 213 | |
| 214 // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) | |
| 215 static double FinalHuffmanCost(const VP8LStreaks* const stats) { | |
| 216 double retval = InitialHuffmanCost(); | |
| 217 retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; | |
| 218 retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; | |
| 219 retval += 1.796875 * stats->streaks[0][0]; | |
| 220 retval += 3.28125 * stats->streaks[1][0]; | |
| 221 return retval; | |
| 222 } | |
| 223 | |
| 224 // Get the symbol entropy for the distribution 'population'. | |
| 225 // Set 'trivial_sym', if there's only one symbol present in the distribution. | |
| 226 static double PopulationCost(const uint32_t* const population, int length, | |
| 227 uint32_t* const trivial_sym) { | |
| 228 VP8LBitEntropy bit_entropy; | |
| 229 VP8LStreaks stats; | |
| 230 VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); | |
| 231 if (trivial_sym != NULL) { | |
| 232 *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code | |
| 233 : VP8L_NON_TRIVIAL_SYM; | |
| 234 } | |
| 235 | |
| 236 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); | |
| 237 } | |
| 238 | |
| 239 static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, | |
| 240 const uint32_t* const Y, | |
| 241 int length) { | |
| 242 VP8LBitEntropy bit_entropy; | |
| 243 VP8LStreaks stats; | |
| 244 VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats); | |
| 245 | |
| 246 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); | |
| 247 } | |
| 248 | |
| 249 // Estimates the Entropy + Huffman + other block overhead size cost. | |
| 250 double VP8LHistogramEstimateBits(const VP8LHistogram* const p) { | |
| 251 return | |
| 252 PopulationCost( | |
| 253 p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL) | |
| 254 + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL) | |
| 255 + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL) | |
| 256 + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL) | |
| 257 + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL) | |
| 258 + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES) | |
| 259 + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES); | |
| 260 } | |
| 261 | |
| 262 // ----------------------------------------------------------------------------- | |
| 263 // Various histogram combine/cost-eval functions | |
| 264 | |
| 265 static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, | |
| 266 const VP8LHistogram* const b, | |
| 267 double cost_threshold, | |
| 268 double* cost) { | |
| 269 const int palette_code_bits = a->palette_code_bits_; | |
| 270 assert(a->palette_code_bits_ == b->palette_code_bits_); | |
| 271 *cost += GetCombinedEntropy(a->literal_, b->literal_, | |
| 272 VP8LHistogramNumCodes(palette_code_bits)); | |
| 273 *cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES, | |
| 274 b->literal_ + NUM_LITERAL_CODES, | |
| 275 NUM_LENGTH_CODES); | |
| 276 if (*cost > cost_threshold) return 0; | |
| 277 | |
| 278 *cost += GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES); | |
| 279 if (*cost > cost_threshold) return 0; | |
| 280 | |
| 281 *cost += GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES); | |
| 282 if (*cost > cost_threshold) return 0; | |
| 283 | |
| 284 *cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES); | |
| 285 if (*cost > cost_threshold) return 0; | |
| 286 | |
| 287 *cost += GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES); | |
| 288 *cost += | |
| 289 VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES); | |
| 290 if (*cost > cost_threshold) return 0; | |
| 291 | |
| 292 return 1; | |
| 293 } | |
| 294 | |
| 295 // Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing | |
| 296 // to the threshold value 'cost_threshold'. The score returned is | |
| 297 // Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed. | |
| 298 // Since the previous score passed is 'cost_threshold', we only need to compare | |
| 299 // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out | |
| 300 // early. | |
| 301 static double HistogramAddEval(const VP8LHistogram* const a, | |
| 302 const VP8LHistogram* const b, | |
| 303 VP8LHistogram* const out, | |
| 304 double cost_threshold) { | |
| 305 double cost = 0; | |
| 306 const double sum_cost = a->bit_cost_ + b->bit_cost_; | |
| 307 cost_threshold += sum_cost; | |
| 308 | |
| 309 if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { | |
| 310 VP8LHistogramAdd(a, b, out); | |
| 311 out->bit_cost_ = cost; | |
| 312 out->palette_code_bits_ = a->palette_code_bits_; | |
| 313 out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_) ? | |
| 314 a->trivial_symbol_ : VP8L_NON_TRIVIAL_SYM; | |
| 315 } | |
| 316 | |
| 317 return cost - sum_cost; | |
| 318 } | |
| 319 | |
| 320 // Same as HistogramAddEval(), except that the resulting histogram | |
| 321 // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit | |
| 322 // the term C(b) which is constant over all the evaluations. | |
| 323 static double HistogramAddThresh(const VP8LHistogram* const a, | |
| 324 const VP8LHistogram* const b, | |
| 325 double cost_threshold) { | |
| 326 double cost = -a->bit_cost_; | |
| 327 GetCombinedHistogramEntropy(a, b, cost_threshold, &cost); | |
| 328 return cost; | |
| 329 } | |
| 330 | |
| 331 // ----------------------------------------------------------------------------- | |
| 332 | |
| 333 // The structure to keep track of cost range for the three dominant entropy | |
| 334 // symbols. | |
| 335 // TODO(skal): Evaluate if float can be used here instead of double for | |
| 336 // representing the entropy costs. | |
| 337 typedef struct { | |
| 338 double literal_max_; | |
| 339 double literal_min_; | |
| 340 double red_max_; | |
| 341 double red_min_; | |
| 342 double blue_max_; | |
| 343 double blue_min_; | |
| 344 } DominantCostRange; | |
| 345 | |
| 346 static void DominantCostRangeInit(DominantCostRange* const c) { | |
| 347 c->literal_max_ = 0.; | |
| 348 c->literal_min_ = MAX_COST; | |
| 349 c->red_max_ = 0.; | |
| 350 c->red_min_ = MAX_COST; | |
| 351 c->blue_max_ = 0.; | |
| 352 c->blue_min_ = MAX_COST; | |
| 353 } | |
| 354 | |
| 355 static void UpdateDominantCostRange( | |
| 356 const VP8LHistogram* const h, DominantCostRange* const c) { | |
| 357 if (c->literal_max_ < h->literal_cost_) c->literal_max_ = h->literal_cost_; | |
| 358 if (c->literal_min_ > h->literal_cost_) c->literal_min_ = h->literal_cost_; | |
| 359 if (c->red_max_ < h->red_cost_) c->red_max_ = h->red_cost_; | |
| 360 if (c->red_min_ > h->red_cost_) c->red_min_ = h->red_cost_; | |
| 361 if (c->blue_max_ < h->blue_cost_) c->blue_max_ = h->blue_cost_; | |
| 362 if (c->blue_min_ > h->blue_cost_) c->blue_min_ = h->blue_cost_; | |
| 363 } | |
| 364 | |
| 365 static void UpdateHistogramCost(VP8LHistogram* const h) { | |
| 366 uint32_t alpha_sym, red_sym, blue_sym; | |
| 367 const double alpha_cost = | |
| 368 PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym); | |
| 369 const double distance_cost = | |
| 370 PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL) + | |
| 371 VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES); | |
| 372 const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_); | |
| 373 h->literal_cost_ = PopulationCost(h->literal_, num_codes, NULL) + | |
| 374 VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES, | |
| 375 NUM_LENGTH_CODES); | |
| 376 h->red_cost_ = PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym); | |
| 377 h->blue_cost_ = PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym); | |
| 378 h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ + | |
| 379 alpha_cost + distance_cost; | |
| 380 if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) { | |
| 381 h->trivial_symbol_ = VP8L_NON_TRIVIAL_SYM; | |
| 382 } else { | |
| 383 h->trivial_symbol_ = | |
| 384 ((uint32_t)alpha_sym << 24) | (red_sym << 16) | (blue_sym << 0); | |
| 385 } | |
| 386 } | |
| 387 | |
| 388 static int GetBinIdForEntropy(double min, double max, double val) { | |
| 389 const double range = max - min; | |
| 390 if (range > 0.) { | |
| 391 const double delta = val - min; | |
| 392 return (int)((NUM_PARTITIONS - 1e-6) * delta / range); | |
| 393 } else { | |
| 394 return 0; | |
| 395 } | |
| 396 } | |
| 397 | |
| 398 static int GetHistoBinIndex(const VP8LHistogram* const h, | |
| 399 const DominantCostRange* const c, int low_effort) { | |
| 400 int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_, | |
| 401 h->literal_cost_); | |
| 402 assert(bin_id < NUM_PARTITIONS); | |
| 403 if (!low_effort) { | |
| 404 bin_id = bin_id * NUM_PARTITIONS | |
| 405 + GetBinIdForEntropy(c->red_min_, c->red_max_, h->red_cost_); | |
| 406 bin_id = bin_id * NUM_PARTITIONS | |
| 407 + GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_); | |
| 408 assert(bin_id < BIN_SIZE); | |
| 409 } | |
| 410 return bin_id; | |
| 411 } | |
| 412 | |
| 413 // Construct the histograms from backward references. | |
| 414 static void HistogramBuild( | |
| 415 int xsize, int histo_bits, const VP8LBackwardRefs* const backward_refs, | |
| 416 VP8LHistogramSet* const image_histo) { | |
| 417 int x = 0, y = 0; | |
| 418 const int histo_xsize = VP8LSubSampleSize(xsize, histo_bits); | |
| 419 VP8LHistogram** const histograms = image_histo->histograms; | |
| 420 VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs); | |
| 421 assert(histo_bits > 0); | |
| 422 while (VP8LRefsCursorOk(&c)) { | |
| 423 const PixOrCopy* const v = c.cur_pos; | |
| 424 const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits); | |
| 425 VP8LHistogramAddSinglePixOrCopy(histograms[ix], v); | |
| 426 x += PixOrCopyLength(v); | |
| 427 while (x >= xsize) { | |
| 428 x -= xsize; | |
| 429 ++y; | |
| 430 } | |
| 431 VP8LRefsCursorNext(&c); | |
| 432 } | |
| 433 } | |
| 434 | |
| 435 // Copies the histograms and computes its bit_cost. | |
| 436 static void HistogramCopyAndAnalyze( | |
| 437 VP8LHistogramSet* const orig_histo, VP8LHistogramSet* const image_histo) { | |
| 438 int i; | |
| 439 const int histo_size = orig_histo->size; | |
| 440 VP8LHistogram** const orig_histograms = orig_histo->histograms; | |
| 441 VP8LHistogram** const histograms = image_histo->histograms; | |
| 442 for (i = 0; i < histo_size; ++i) { | |
| 443 VP8LHistogram* const histo = orig_histograms[i]; | |
| 444 UpdateHistogramCost(histo); | |
| 445 // Copy histograms from orig_histo[] to image_histo[]. | |
| 446 HistogramCopy(histo, histograms[i]); | |
| 447 } | |
| 448 } | |
| 449 | |
| 450 // Partition histograms to different entropy bins for three dominant (literal, | |
| 451 // red and blue) symbol costs and compute the histogram aggregate bit_cost. | |
| 452 static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo, | |
| 453 int16_t* const bin_map, int low_effort) { | |
| 454 int i; | |
| 455 VP8LHistogram** const histograms = image_histo->histograms; | |
| 456 const int histo_size = image_histo->size; | |
| 457 const int bin_depth = histo_size + 1; | |
| 458 DominantCostRange cost_range; | |
| 459 DominantCostRangeInit(&cost_range); | |
| 460 | |
| 461 // Analyze the dominant (literal, red and blue) entropy costs. | |
| 462 for (i = 0; i < histo_size; ++i) { | |
| 463 VP8LHistogram* const histo = histograms[i]; | |
| 464 UpdateDominantCostRange(histo, &cost_range); | |
| 465 } | |
| 466 | |
| 467 // bin-hash histograms on three of the dominant (literal, red and blue) | |
| 468 // symbol costs. | |
| 469 for (i = 0; i < histo_size; ++i) { | |
| 470 const VP8LHistogram* const histo = histograms[i]; | |
| 471 const int bin_id = GetHistoBinIndex(histo, &cost_range, low_effort); | |
| 472 const int bin_offset = bin_id * bin_depth; | |
| 473 // bin_map[n][0] for every bin 'n' maintains the counter for the number of | |
| 474 // histograms in that bin. | |
| 475 // Get and increment the num_histos in that bin. | |
| 476 const int num_histos = ++bin_map[bin_offset]; | |
| 477 assert(bin_offset + num_histos < bin_depth * BIN_SIZE); | |
| 478 // Add histogram i'th index at num_histos (last) position in the bin_map. | |
| 479 bin_map[bin_offset + num_histos] = i; | |
| 480 } | |
| 481 } | |
| 482 | |
| 483 // Compact the histogram set by removing unused entries. | |
| 484 static void HistogramCompactBins(VP8LHistogramSet* const image_histo) { | |
| 485 VP8LHistogram** const histograms = image_histo->histograms; | |
| 486 int i, j; | |
| 487 | |
| 488 for (i = 0, j = 0; i < image_histo->size; ++i) { | |
| 489 if (histograms[i] != NULL && histograms[i]->bit_cost_ != 0.) { | |
| 490 if (j < i) { | |
| 491 histograms[j] = histograms[i]; | |
| 492 histograms[i] = NULL; | |
| 493 } | |
| 494 ++j; | |
| 495 } | |
| 496 } | |
| 497 image_histo->size = j; | |
| 498 } | |
| 499 | |
| 500 static VP8LHistogram* HistogramCombineEntropyBin( | |
| 501 VP8LHistogramSet* const image_histo, | |
| 502 VP8LHistogram* cur_combo, | |
| 503 int16_t* const bin_map, int bin_depth, int num_bins, | |
| 504 double combine_cost_factor, int low_effort) { | |
| 505 int bin_id; | |
| 506 VP8LHistogram** const histograms = image_histo->histograms; | |
| 507 | |
| 508 for (bin_id = 0; bin_id < num_bins; ++bin_id) { | |
| 509 const int bin_offset = bin_id * bin_depth; | |
| 510 const int num_histos = bin_map[bin_offset]; | |
| 511 const int idx1 = bin_map[bin_offset + 1]; | |
| 512 int num_combine_failures = 0; | |
| 513 int n; | |
| 514 for (n = 2; n <= num_histos; ++n) { | |
| 515 const int idx2 = bin_map[bin_offset + n]; | |
| 516 if (low_effort) { | |
| 517 // Merge all histograms with the same bin index, irrespective of cost of | |
| 518 // the merged histograms. | |
| 519 VP8LHistogramAdd(histograms[idx1], histograms[idx2], histograms[idx1]); | |
| 520 histograms[idx2]->bit_cost_ = 0.; | |
| 521 } else { | |
| 522 const double bit_cost_idx2 = histograms[idx2]->bit_cost_; | |
| 523 if (bit_cost_idx2 > 0.) { | |
| 524 const double bit_cost_thresh = -bit_cost_idx2 * combine_cost_factor; | |
| 525 const double curr_cost_diff = | |
| 526 HistogramAddEval(histograms[idx1], histograms[idx2], | |
| 527 cur_combo, bit_cost_thresh); | |
| 528 if (curr_cost_diff < bit_cost_thresh) { | |
| 529 // Try to merge two histograms only if the combo is a trivial one or | |
| 530 // the two candidate histograms are already non-trivial. | |
| 531 // For some images, 'try_combine' turns out to be false for a lot of | |
| 532 // histogram pairs. In that case, we fallback to combining | |
| 533 // histograms as usual to avoid increasing the header size. | |
| 534 const int try_combine = | |
| 535 (cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) || | |
| 536 ((histograms[idx1]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) && | |
| 537 (histograms[idx2]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM)); | |
| 538 const int max_combine_failures = 32; | |
| 539 if (try_combine || (num_combine_failures >= max_combine_failures)) { | |
| 540 HistogramSwap(&cur_combo, &histograms[idx1]); | |
| 541 histograms[idx2]->bit_cost_ = 0.; | |
| 542 } else { | |
| 543 ++num_combine_failures; | |
| 544 } | |
| 545 } | |
| 546 } | |
| 547 } | |
| 548 } | |
| 549 if (low_effort) { | |
| 550 // Update the bit_cost for the merged histograms (per bin index). | |
| 551 UpdateHistogramCost(histograms[idx1]); | |
| 552 } | |
| 553 } | |
| 554 HistogramCompactBins(image_histo); | |
| 555 return cur_combo; | |
| 556 } | |
| 557 | |
| 558 static uint32_t MyRand(uint32_t *seed) { | |
| 559 *seed *= 16807U; | |
| 560 if (*seed == 0) { | |
| 561 *seed = 1; | |
| 562 } | |
| 563 return *seed; | |
| 564 } | |
| 565 | |
| 566 // ----------------------------------------------------------------------------- | |
| 567 // Histogram pairs priority queue | |
| 568 | |
| 569 // Pair of histograms. Negative idx1 value means that pair is out-of-date. | |
| 570 typedef struct { | |
| 571 int idx1; | |
| 572 int idx2; | |
| 573 double cost_diff; | |
| 574 double cost_combo; | |
| 575 } HistogramPair; | |
| 576 | |
| 577 typedef struct { | |
| 578 HistogramPair* queue; | |
| 579 int size; | |
| 580 int max_size; | |
| 581 } HistoQueue; | |
| 582 | |
| 583 static int HistoQueueInit(HistoQueue* const histo_queue, const int max_index) { | |
| 584 histo_queue->size = 0; | |
| 585 // max_index^2 for the queue size is safe. If you look at | |
| 586 // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes | |
| 587 // data to the queue, you insert at most: | |
| 588 // - max_index*(max_index-1)/2 (the first two for loops) | |
| 589 // - max_index - 1 in the last for loop at the first iteration of the while | |
| 590 // loop, max_index - 2 at the second iteration ... therefore | |
| 591 // max_index*(max_index-1)/2 overall too | |
| 592 histo_queue->max_size = max_index * max_index; | |
| 593 // We allocate max_size + 1 because the last element at index "size" is | |
| 594 // used as temporary data (and it could be up to max_size). | |
| 595 histo_queue->queue = (HistogramPair*)WebPSafeMalloc( | |
| 596 histo_queue->max_size + 1, sizeof(*histo_queue->queue)); | |
| 597 return histo_queue->queue != NULL; | |
| 598 } | |
| 599 | |
| 600 static void HistoQueueClear(HistoQueue* const histo_queue) { | |
| 601 assert(histo_queue != NULL); | |
| 602 WebPSafeFree(histo_queue->queue); | |
| 603 } | |
| 604 | |
| 605 static void SwapHistogramPairs(HistogramPair *p1, | |
| 606 HistogramPair *p2) { | |
| 607 const HistogramPair tmp = *p1; | |
| 608 *p1 = *p2; | |
| 609 *p2 = tmp; | |
| 610 } | |
| 611 | |
| 612 // Given a valid priority queue in range [0, queue_size) this function checks | |
| 613 // whether histo_queue[queue_size] should be accepted and swaps it with the | |
| 614 // front if it is smaller. Otherwise, it leaves it as is. | |
| 615 static void UpdateQueueFront(HistoQueue* const histo_queue) { | |
| 616 if (histo_queue->queue[histo_queue->size].cost_diff >= 0) return; | |
| 617 | |
| 618 if (histo_queue->queue[histo_queue->size].cost_diff < | |
| 619 histo_queue->queue[0].cost_diff) { | |
| 620 SwapHistogramPairs(histo_queue->queue, | |
| 621 histo_queue->queue + histo_queue->size); | |
| 622 } | |
| 623 ++histo_queue->size; | |
| 624 | |
| 625 // We cannot add more elements than the capacity. | |
| 626 // The allocation adds an extra element to the official capacity so that | |
| 627 // histo_queue->queue[histo_queue->max_size] is read/written within bound. | |
| 628 assert(histo_queue->size <= histo_queue->max_size); | |
| 629 } | |
| 630 | |
| 631 // ----------------------------------------------------------------------------- | |
| 632 | |
| 633 static void PreparePair(VP8LHistogram** histograms, int idx1, int idx2, | |
| 634 HistogramPair* const pair) { | |
| 635 VP8LHistogram* h1; | |
| 636 VP8LHistogram* h2; | |
| 637 double sum_cost; | |
| 638 | |
| 639 if (idx1 > idx2) { | |
| 640 const int tmp = idx2; | |
| 641 idx2 = idx1; | |
| 642 idx1 = tmp; | |
| 643 } | |
| 644 pair->idx1 = idx1; | |
| 645 pair->idx2 = idx2; | |
| 646 h1 = histograms[idx1]; | |
| 647 h2 = histograms[idx2]; | |
| 648 sum_cost = h1->bit_cost_ + h2->bit_cost_; | |
| 649 pair->cost_combo = 0.; | |
| 650 GetCombinedHistogramEntropy(h1, h2, sum_cost, &pair->cost_combo); | |
| 651 pair->cost_diff = pair->cost_combo - sum_cost; | |
| 652 } | |
| 653 | |
| 654 // Combines histograms by continuously choosing the one with the highest cost | |
| 655 // reduction. | |
| 656 static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) { | |
| 657 int ok = 0; | |
| 658 int image_histo_size = image_histo->size; | |
| 659 int i, j; | |
| 660 VP8LHistogram** const histograms = image_histo->histograms; | |
| 661 // Indexes of remaining histograms. | |
| 662 int* const clusters = | |
| 663 (int*)WebPSafeMalloc(image_histo_size, sizeof(*clusters)); | |
| 664 // Priority queue of histogram pairs. | |
| 665 HistoQueue histo_queue; | |
| 666 | |
| 667 if (!HistoQueueInit(&histo_queue, image_histo_size) || clusters == NULL) { | |
| 668 goto End; | |
| 669 } | |
| 670 | |
| 671 for (i = 0; i < image_histo_size; ++i) { | |
| 672 // Initialize clusters indexes. | |
| 673 clusters[i] = i; | |
| 674 for (j = i + 1; j < image_histo_size; ++j) { | |
| 675 // Initialize positions array. | |
| 676 PreparePair(histograms, i, j, &histo_queue.queue[histo_queue.size]); | |
| 677 UpdateQueueFront(&histo_queue); | |
| 678 } | |
| 679 } | |
| 680 | |
| 681 while (image_histo_size > 1 && histo_queue.size > 0) { | |
| 682 HistogramPair* copy_to; | |
| 683 const int idx1 = histo_queue.queue[0].idx1; | |
| 684 const int idx2 = histo_queue.queue[0].idx2; | |
| 685 VP8LHistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]); | |
| 686 histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo; | |
| 687 // Remove merged histogram. | |
| 688 for (i = 0; i + 1 < image_histo_size; ++i) { | |
| 689 if (clusters[i] >= idx2) { | |
| 690 clusters[i] = clusters[i + 1]; | |
| 691 } | |
| 692 } | |
| 693 --image_histo_size; | |
| 694 | |
| 695 // Remove pairs intersecting the just combined best pair. This will | |
| 696 // therefore pop the head of the queue. | |
| 697 copy_to = histo_queue.queue; | |
| 698 for (i = 0; i < histo_queue.size; ++i) { | |
| 699 HistogramPair* const p = histo_queue.queue + i; | |
| 700 if (p->idx1 == idx1 || p->idx2 == idx1 || | |
| 701 p->idx1 == idx2 || p->idx2 == idx2) { | |
| 702 // Do not copy the invalid pair. | |
| 703 continue; | |
| 704 } | |
| 705 if (p->cost_diff < histo_queue.queue[0].cost_diff) { | |
| 706 // Replace the top of the queue if we found better. | |
| 707 SwapHistogramPairs(histo_queue.queue, p); | |
| 708 } | |
| 709 SwapHistogramPairs(copy_to, p); | |
| 710 ++copy_to; | |
| 711 } | |
| 712 histo_queue.size = (int)(copy_to - histo_queue.queue); | |
| 713 | |
| 714 // Push new pairs formed with combined histogram to the queue. | |
| 715 for (i = 0; i < image_histo_size; ++i) { | |
| 716 if (clusters[i] != idx1) { | |
| 717 PreparePair(histograms, idx1, clusters[i], | |
| 718 &histo_queue.queue[histo_queue.size]); | |
| 719 UpdateQueueFront(&histo_queue); | |
| 720 } | |
| 721 } | |
| 722 } | |
| 723 // Move remaining histograms to the beginning of the array. | |
| 724 for (i = 0; i < image_histo_size; ++i) { | |
| 725 if (i != clusters[i]) { // swap the two histograms | |
| 726 HistogramSwap(&histograms[i], &histograms[clusters[i]]); | |
| 727 } | |
| 728 } | |
| 729 | |
| 730 image_histo->size = image_histo_size; | |
| 731 ok = 1; | |
| 732 | |
| 733 End: | |
| 734 WebPSafeFree(clusters); | |
| 735 HistoQueueClear(&histo_queue); | |
| 736 return ok; | |
| 737 } | |
| 738 | |
| 739 static void HistogramCombineStochastic(VP8LHistogramSet* const image_histo, | |
| 740 VP8LHistogram* tmp_histo, | |
| 741 VP8LHistogram* best_combo, | |
| 742 int quality, int min_cluster_size) { | |
| 743 int iter; | |
| 744 uint32_t seed = 0; | |
| 745 int tries_with_no_success = 0; | |
| 746 int image_histo_size = image_histo->size; | |
| 747 const int iter_mult = (quality < 25) ? 2 : 2 + (quality - 25) / 8; | |
| 748 const int outer_iters = image_histo_size * iter_mult; | |
| 749 const int num_pairs = image_histo_size / 2; | |
| 750 const int num_tries_no_success = outer_iters / 2; | |
| 751 VP8LHistogram** const histograms = image_histo->histograms; | |
| 752 | |
| 753 // Collapse similar histograms in 'image_histo'. | |
| 754 ++min_cluster_size; | |
| 755 for (iter = 0; | |
| 756 iter < outer_iters && image_histo_size >= min_cluster_size; | |
| 757 ++iter) { | |
| 758 double best_cost_diff = 0.; | |
| 759 int best_idx1 = -1, best_idx2 = 1; | |
| 760 int j; | |
| 761 const int num_tries = | |
| 762 (num_pairs < image_histo_size) ? num_pairs : image_histo_size; | |
| 763 seed += iter; | |
| 764 for (j = 0; j < num_tries; ++j) { | |
| 765 double curr_cost_diff; | |
| 766 // Choose two histograms at random and try to combine them. | |
| 767 const uint32_t idx1 = MyRand(&seed) % image_histo_size; | |
| 768 const uint32_t tmp = (j & 7) + 1; | |
| 769 const uint32_t diff = | |
| 770 (tmp < 3) ? tmp : MyRand(&seed) % (image_histo_size - 1); | |
| 771 const uint32_t idx2 = (idx1 + diff + 1) % image_histo_size; | |
| 772 if (idx1 == idx2) { | |
| 773 continue; | |
| 774 } | |
| 775 | |
| 776 // Calculate cost reduction on combining. | |
| 777 curr_cost_diff = HistogramAddEval(histograms[idx1], histograms[idx2], | |
| 778 tmp_histo, best_cost_diff); | |
| 779 if (curr_cost_diff < best_cost_diff) { // found a better pair? | |
| 780 HistogramSwap(&best_combo, &tmp_histo); | |
| 781 best_cost_diff = curr_cost_diff; | |
| 782 best_idx1 = idx1; | |
| 783 best_idx2 = idx2; | |
| 784 } | |
| 785 } | |
| 786 | |
| 787 if (best_idx1 >= 0) { | |
| 788 HistogramSwap(&best_combo, &histograms[best_idx1]); | |
| 789 // swap best_idx2 slot with last one (which is now unused) | |
| 790 --image_histo_size; | |
| 791 if (best_idx2 != image_histo_size) { | |
| 792 HistogramSwap(&histograms[image_histo_size], &histograms[best_idx2]); | |
| 793 histograms[image_histo_size] = NULL; | |
| 794 } | |
| 795 tries_with_no_success = 0; | |
| 796 } | |
| 797 if (++tries_with_no_success >= num_tries_no_success) { | |
| 798 break; | |
| 799 } | |
| 800 } | |
| 801 image_histo->size = image_histo_size; | |
| 802 } | |
| 803 | |
| 804 // ----------------------------------------------------------------------------- | |
| 805 // Histogram refinement | |
| 806 | |
| 807 // Find the best 'out' histogram for each of the 'in' histograms. | |
| 808 // Note: we assume that out[]->bit_cost_ is already up-to-date. | |
| 809 static void HistogramRemap(const VP8LHistogramSet* const in, | |
| 810 const VP8LHistogramSet* const out, | |
| 811 uint16_t* const symbols) { | |
| 812 int i; | |
| 813 VP8LHistogram** const in_histo = in->histograms; | |
| 814 VP8LHistogram** const out_histo = out->histograms; | |
| 815 const int in_size = in->size; | |
| 816 const int out_size = out->size; | |
| 817 if (out_size > 1) { | |
| 818 for (i = 0; i < in_size; ++i) { | |
| 819 int best_out = 0; | |
| 820 double best_bits = MAX_COST; | |
| 821 int k; | |
| 822 for (k = 0; k < out_size; ++k) { | |
| 823 const double cur_bits = | |
| 824 HistogramAddThresh(out_histo[k], in_histo[i], best_bits); | |
| 825 if (k == 0 || cur_bits < best_bits) { | |
| 826 best_bits = cur_bits; | |
| 827 best_out = k; | |
| 828 } | |
| 829 } | |
| 830 symbols[i] = best_out; | |
| 831 } | |
| 832 } else { | |
| 833 assert(out_size == 1); | |
| 834 for (i = 0; i < in_size; ++i) { | |
| 835 symbols[i] = 0; | |
| 836 } | |
| 837 } | |
| 838 | |
| 839 // Recompute each out based on raw and symbols. | |
| 840 for (i = 0; i < out_size; ++i) { | |
| 841 HistogramClear(out_histo[i]); | |
| 842 } | |
| 843 | |
| 844 for (i = 0; i < in_size; ++i) { | |
| 845 const int idx = symbols[i]; | |
| 846 VP8LHistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]); | |
| 847 } | |
| 848 } | |
| 849 | |
| 850 static double GetCombineCostFactor(int histo_size, int quality) { | |
| 851 double combine_cost_factor = 0.16; | |
| 852 if (quality < 90) { | |
| 853 if (histo_size > 256) combine_cost_factor /= 2.; | |
| 854 if (histo_size > 512) combine_cost_factor /= 2.; | |
| 855 if (histo_size > 1024) combine_cost_factor /= 2.; | |
| 856 if (quality <= 50) combine_cost_factor /= 2.; | |
| 857 } | |
| 858 return combine_cost_factor; | |
| 859 } | |
| 860 | |
| 861 int VP8LGetHistoImageSymbols(int xsize, int ysize, | |
| 862 const VP8LBackwardRefs* const refs, | |
| 863 int quality, int low_effort, | |
| 864 int histo_bits, int cache_bits, | |
| 865 VP8LHistogramSet* const image_histo, | |
| 866 VP8LHistogramSet* const tmp_histos, | |
| 867 uint16_t* const histogram_symbols) { | |
| 868 int ok = 0; | |
| 869 const int histo_xsize = histo_bits ? VP8LSubSampleSize(xsize, histo_bits) : 1; | |
| 870 const int histo_ysize = histo_bits ? VP8LSubSampleSize(ysize, histo_bits) : 1; | |
| 871 const int image_histo_raw_size = histo_xsize * histo_ysize; | |
| 872 const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE; | |
| 873 | |
| 874 // The bin_map for every bin follows following semantics: | |
| 875 // bin_map[n][0] = num_histo; // The number of histograms in that bin. | |
| 876 // bin_map[n][1] = index of first histogram in that bin; | |
| 877 // bin_map[n][num_histo] = index of last histogram in that bin; | |
| 878 // bin_map[n][num_histo + 1] ... bin_map[n][bin_depth - 1] = unused indices. | |
| 879 const int bin_depth = image_histo_raw_size + 1; | |
| 880 int16_t* bin_map = NULL; | |
| 881 VP8LHistogramSet* const orig_histo = | |
| 882 VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits); | |
| 883 VP8LHistogram* cur_combo; | |
| 884 const int entropy_combine = | |
| 885 (orig_histo->size > entropy_combine_num_bins * 2) && (quality < 100); | |
| 886 | |
| 887 if (orig_histo == NULL) goto Error; | |
| 888 | |
| 889 // Don't attempt linear bin-partition heuristic for: | |
| 890 // histograms of small sizes, as bin_map will be very sparse and; | |
| 891 // Maximum quality (q==100), to preserve the compression gains at that level. | |
| 892 if (entropy_combine) { | |
| 893 const int bin_map_size = bin_depth * entropy_combine_num_bins; | |
| 894 bin_map = (int16_t*)WebPSafeCalloc(bin_map_size, sizeof(*bin_map)); | |
| 895 if (bin_map == NULL) goto Error; | |
| 896 } | |
| 897 | |
| 898 // Construct the histograms from backward references. | |
| 899 HistogramBuild(xsize, histo_bits, refs, orig_histo); | |
| 900 // Copies the histograms and computes its bit_cost. | |
| 901 HistogramCopyAndAnalyze(orig_histo, image_histo); | |
| 902 | |
| 903 cur_combo = tmp_histos->histograms[1]; // pick up working slot | |
| 904 if (entropy_combine) { | |
| 905 const double combine_cost_factor = | |
| 906 GetCombineCostFactor(image_histo_raw_size, quality); | |
| 907 HistogramAnalyzeEntropyBin(orig_histo, bin_map, low_effort); | |
| 908 // Collapse histograms with similar entropy. | |
| 909 cur_combo = HistogramCombineEntropyBin(image_histo, cur_combo, bin_map, | |
| 910 bin_depth, entropy_combine_num_bins, | |
| 911 combine_cost_factor, low_effort); | |
| 912 } | |
| 913 | |
| 914 // Don't combine the histograms using stochastic and greedy heuristics for | |
| 915 // low-effort compression mode. | |
| 916 if (!low_effort || !entropy_combine) { | |
| 917 const float x = quality / 100.f; | |
| 918 // cubic ramp between 1 and MAX_HISTO_GREEDY: | |
| 919 const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1)); | |
| 920 HistogramCombineStochastic(image_histo, tmp_histos->histograms[0], | |
| 921 cur_combo, quality, threshold_size); | |
| 922 if ((image_histo->size <= threshold_size) && | |
| 923 !HistogramCombineGreedy(image_histo)) { | |
| 924 goto Error; | |
| 925 } | |
| 926 } | |
| 927 | |
| 928 // TODO(vikasa): Optimize HistogramRemap for low-effort compression mode also. | |
| 929 // Find the optimal map from original histograms to the final ones. | |
| 930 HistogramRemap(orig_histo, image_histo, histogram_symbols); | |
| 931 | |
| 932 ok = 1; | |
| 933 | |
| 934 Error: | |
| 935 WebPSafeFree(bin_map); | |
| 936 VP8LFreeHistogramSet(orig_histo); | |
| 937 return ok; | |
| 938 } | |
| OLD | NEW |