| OLD | NEW |
| (Empty) | |
| 1 /* Copyright 2015 Google Inc. All Rights Reserved. |
| 2 |
| 3 Distributed under MIT license. |
| 4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
| 5 */ |
| 6 |
| 7 // Function for fast encoding of an input fragment, independently from the input |
| 8 // history. This function uses one-pass processing: when we find a backward |
| 9 // match, we immediately emit the corresponding command and literal codes to |
| 10 // the bit stream. |
| 11 // |
| 12 // Adapted from the CompressFragment() function in |
| 13 // https://github.com/google/snappy/blob/master/snappy.cc |
| 14 |
| 15 #include "./compress_fragment.h" |
| 16 |
| 17 #include <algorithm> |
| 18 #include <cstring> |
| 19 |
| 20 #include "./brotli_bit_stream.h" |
| 21 #include "./entropy_encode.h" |
| 22 #include "./fast_log.h" |
| 23 #include "./find_match_length.h" |
| 24 #include "./port.h" |
| 25 #include "./types.h" |
| 26 #include "./write_bits.h" |
| 27 |
| 28 namespace brotli { |
| 29 |
| 30 // kHashMul32 multiplier has these properties: |
| 31 // * The multiplier must be odd. Otherwise we may lose the highest bit. |
| 32 // * No long streaks of 1s or 0s. |
| 33 // * There is no effort to ensure that it is a prime, the oddity is enough |
| 34 // for this use. |
| 35 // * The number has been tuned heuristically against compression benchmarks. |
| 36 static const uint32_t kHashMul32 = 0x1e35a7bd; |
| 37 |
| 38 static inline uint32_t Hash(const uint8_t* p, size_t shift) { |
| 39 const uint64_t h = (BROTLI_UNALIGNED_LOAD64(p) << 24) * kHashMul32; |
| 40 return static_cast<uint32_t>(h >> shift); |
| 41 } |
| 42 |
| 43 static inline uint32_t HashBytesAtOffset(uint64_t v, int offset, size_t shift) { |
| 44 assert(offset >= 0); |
| 45 assert(offset <= 3); |
| 46 const uint64_t h = ((v >> (8 * offset)) << 24) * kHashMul32; |
| 47 return static_cast<uint32_t>(h >> shift); |
| 48 } |
| 49 |
| 50 static inline int IsMatch(const uint8_t* p1, const uint8_t* p2) { |
| 51 return (BROTLI_UNALIGNED_LOAD32(p1) == BROTLI_UNALIGNED_LOAD32(p2) && |
| 52 p1[4] == p2[4]); |
| 53 } |
| 54 |
| 55 // Builds a literal prefix code into "depths" and "bits" based on the statistics |
| 56 // of the "input" string and stores it into the bit stream. |
| 57 // Note that the prefix code here is built from the pre-LZ77 input, therefore |
| 58 // we can only approximate the statistics of the actual literal stream. |
| 59 // Moreover, for long inputs we build a histogram from a sample of the input |
| 60 // and thus have to assign a non-zero depth for each literal. |
| 61 static void BuildAndStoreLiteralPrefixCode(const uint8_t* input, |
| 62 const size_t input_size, |
| 63 uint8_t depths[256], |
| 64 uint16_t bits[256], |
| 65 size_t* storage_ix, |
| 66 uint8_t* storage) { |
| 67 uint32_t histogram[256] = { 0 }; |
| 68 size_t histogram_total; |
| 69 if (input_size < (1 << 15)) { |
| 70 for (size_t i = 0; i < input_size; ++i) { |
| 71 ++histogram[input[i]]; |
| 72 } |
| 73 histogram_total = input_size; |
| 74 for (size_t i = 0; i < 256; ++i) { |
| 75 // We weigh the first 11 samples with weight 3 to account for the |
| 76 // balancing effect of the LZ77 phase on the histogram. |
| 77 const uint32_t adjust = 2 * std::min(histogram[i], 11u); |
| 78 histogram[i] += adjust; |
| 79 histogram_total += adjust; |
| 80 } |
| 81 } else { |
| 82 static const size_t kSampleRate = 29; |
| 83 for (size_t i = 0; i < input_size; i += kSampleRate) { |
| 84 ++histogram[input[i]]; |
| 85 } |
| 86 histogram_total = (input_size + kSampleRate - 1) / kSampleRate; |
| 87 for (size_t i = 0; i < 256; ++i) { |
| 88 // We add 1 to each population count to avoid 0 bit depths (since this is |
| 89 // only a sample and we don't know if the symbol appears or not), and we |
| 90 // weigh the first 11 samples with weight 3 to account for the balancing |
| 91 // effect of the LZ77 phase on the histogram (more frequent symbols are |
| 92 // more likely to be in backward references instead as literals). |
| 93 const uint32_t adjust = 1 + 2 * std::min(histogram[i], 11u); |
| 94 histogram[i] += adjust; |
| 95 histogram_total += adjust; |
| 96 } |
| 97 } |
| 98 BuildAndStoreHuffmanTreeFast(histogram, histogram_total, |
| 99 /* max_bits = */ 8, |
| 100 depths, bits, storage_ix, storage); |
| 101 } |
| 102 |
| 103 // Builds a command and distance prefix code (each 64 symbols) into "depth" and |
| 104 // "bits" based on "histogram" and stores it into the bit stream. |
| 105 static void BuildAndStoreCommandPrefixCode(const uint32_t histogram[128], |
| 106 uint8_t depth[128], |
| 107 uint16_t bits[128], |
| 108 size_t* storage_ix, |
| 109 uint8_t* storage) { |
| 110 // Tree size for building a tree over 64 symbols is 2 * 64 + 1. |
| 111 static const size_t kTreeSize = 129; |
| 112 HuffmanTree tree[kTreeSize]; |
| 113 CreateHuffmanTree(histogram, 64, 15, tree, depth); |
| 114 CreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]); |
| 115 // We have to jump through a few hoopes here in order to compute |
| 116 // the command bits because the symbols are in a different order than in |
| 117 // the full alphabet. This looks complicated, but having the symbols |
| 118 // in this order in the command bits saves a few branches in the Emit* |
| 119 // functions. |
| 120 uint8_t cmd_depth[64]; |
| 121 uint16_t cmd_bits[64]; |
| 122 memcpy(cmd_depth, depth, 24); |
| 123 memcpy(cmd_depth + 24, depth + 40, 8); |
| 124 memcpy(cmd_depth + 32, depth + 24, 8); |
| 125 memcpy(cmd_depth + 40, depth + 48, 8); |
| 126 memcpy(cmd_depth + 48, depth + 32, 8); |
| 127 memcpy(cmd_depth + 56, depth + 56, 8); |
| 128 ConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits); |
| 129 memcpy(bits, cmd_bits, 48); |
| 130 memcpy(bits + 24, cmd_bits + 32, 16); |
| 131 memcpy(bits + 32, cmd_bits + 48, 16); |
| 132 memcpy(bits + 40, cmd_bits + 24, 16); |
| 133 memcpy(bits + 48, cmd_bits + 40, 16); |
| 134 memcpy(bits + 56, cmd_bits + 56, 16); |
| 135 ConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]); |
| 136 { |
| 137 // Create the bit length array for the full command alphabet. |
| 138 uint8_t cmd_depth[704] = { 0 }; |
| 139 memcpy(cmd_depth, depth, 8); |
| 140 memcpy(cmd_depth + 64, depth + 8, 8); |
| 141 memcpy(cmd_depth + 128, depth + 16, 8); |
| 142 memcpy(cmd_depth + 192, depth + 24, 8); |
| 143 memcpy(cmd_depth + 384, depth + 32, 8); |
| 144 for (size_t i = 0; i < 8; ++i) { |
| 145 cmd_depth[128 + 8 * i] = depth[40 + i]; |
| 146 cmd_depth[256 + 8 * i] = depth[48 + i]; |
| 147 cmd_depth[448 + 8 * i] = depth[56 + i]; |
| 148 } |
| 149 StoreHuffmanTree(cmd_depth, 704, tree, storage_ix, storage); |
| 150 } |
| 151 StoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage); |
| 152 } |
| 153 |
| 154 // REQUIRES: insertlen < 6210 |
| 155 inline void EmitInsertLen(size_t insertlen, |
| 156 const uint8_t depth[128], |
| 157 const uint16_t bits[128], |
| 158 uint32_t histo[128], |
| 159 size_t* storage_ix, |
| 160 uint8_t* storage) { |
| 161 if (insertlen < 6) { |
| 162 const size_t code = insertlen + 40; |
| 163 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 164 ++histo[code]; |
| 165 } else if (insertlen < 130) { |
| 166 insertlen -= 2; |
| 167 const uint32_t nbits = Log2FloorNonZero(insertlen) - 1u; |
| 168 const size_t prefix = insertlen >> nbits; |
| 169 const size_t inscode = (nbits << 1) + prefix + 42; |
| 170 WriteBits(depth[inscode], bits[inscode], storage_ix, storage); |
| 171 WriteBits(nbits, insertlen - (prefix << nbits), storage_ix, storage); |
| 172 ++histo[inscode]; |
| 173 } else if (insertlen < 2114) { |
| 174 insertlen -= 66; |
| 175 const uint32_t nbits = Log2FloorNonZero(insertlen); |
| 176 const size_t code = nbits + 50; |
| 177 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 178 WriteBits(nbits, insertlen - (1 << nbits), storage_ix, storage); |
| 179 ++histo[code]; |
| 180 } else { |
| 181 WriteBits(depth[61], bits[61], storage_ix, storage); |
| 182 WriteBits(12, insertlen - 2114, storage_ix, storage); |
| 183 ++histo[21]; |
| 184 } |
| 185 } |
| 186 |
| 187 inline void EmitLongInsertLen(size_t insertlen, |
| 188 const uint8_t depth[128], |
| 189 const uint16_t bits[128], |
| 190 uint32_t histo[128], |
| 191 size_t* storage_ix, |
| 192 uint8_t* storage) { |
| 193 if (insertlen < 22594) { |
| 194 WriteBits(depth[62], bits[62], storage_ix, storage); |
| 195 WriteBits(14, insertlen - 6210, storage_ix, storage); |
| 196 ++histo[22]; |
| 197 } else { |
| 198 WriteBits(depth[63], bits[63], storage_ix, storage); |
| 199 WriteBits(24, insertlen - 22594, storage_ix, storage); |
| 200 ++histo[23]; |
| 201 } |
| 202 } |
| 203 |
| 204 inline void EmitCopyLen(size_t copylen, |
| 205 const uint8_t depth[128], |
| 206 const uint16_t bits[128], |
| 207 uint32_t histo[128], |
| 208 size_t* storage_ix, |
| 209 uint8_t* storage) { |
| 210 if (copylen < 10) { |
| 211 WriteBits(depth[copylen + 14], bits[copylen + 14], storage_ix, storage); |
| 212 ++histo[copylen + 14]; |
| 213 } else if (copylen < 134) { |
| 214 copylen -= 6; |
| 215 const uint32_t nbits = Log2FloorNonZero(copylen) - 1u; |
| 216 const size_t prefix = copylen >> nbits; |
| 217 const size_t code = (nbits << 1) + prefix + 20; |
| 218 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 219 WriteBits(nbits, copylen - (prefix << nbits), storage_ix, storage); |
| 220 ++histo[code]; |
| 221 } else if (copylen < 2118) { |
| 222 copylen -= 70; |
| 223 const uint32_t nbits = Log2FloorNonZero(copylen); |
| 224 const size_t code = nbits + 28; |
| 225 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 226 WriteBits(nbits, copylen - (1 << nbits), storage_ix, storage); |
| 227 ++histo[code]; |
| 228 } else { |
| 229 WriteBits(depth[39], bits[39], storage_ix, storage); |
| 230 WriteBits(24, copylen - 2118, storage_ix, storage); |
| 231 ++histo[47]; |
| 232 } |
| 233 } |
| 234 |
| 235 inline void EmitCopyLenLastDistance(size_t copylen, |
| 236 const uint8_t depth[128], |
| 237 const uint16_t bits[128], |
| 238 uint32_t histo[128], |
| 239 size_t* storage_ix, |
| 240 uint8_t* storage) { |
| 241 if (copylen < 12) { |
| 242 WriteBits(depth[copylen - 4], bits[copylen - 4], storage_ix, storage); |
| 243 ++histo[copylen - 4]; |
| 244 } else if (copylen < 72) { |
| 245 copylen -= 8; |
| 246 const uint32_t nbits = Log2FloorNonZero(copylen) - 1; |
| 247 const size_t prefix = copylen >> nbits; |
| 248 const size_t code = (nbits << 1) + prefix + 4; |
| 249 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 250 WriteBits(nbits, copylen - (prefix << nbits), storage_ix, storage); |
| 251 ++histo[code]; |
| 252 } else if (copylen < 136) { |
| 253 copylen -= 8; |
| 254 const size_t code = (copylen >> 5) + 30; |
| 255 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 256 WriteBits(5, copylen & 31, storage_ix, storage); |
| 257 WriteBits(depth[64], bits[64], storage_ix, storage); |
| 258 ++histo[code]; |
| 259 ++histo[64]; |
| 260 } else if (copylen < 2120) { |
| 261 copylen -= 72; |
| 262 const uint32_t nbits = Log2FloorNonZero(copylen); |
| 263 const size_t code = nbits + 28; |
| 264 WriteBits(depth[code], bits[code], storage_ix, storage); |
| 265 WriteBits(nbits, copylen - (1 << nbits), storage_ix, storage); |
| 266 WriteBits(depth[64], bits[64], storage_ix, storage); |
| 267 ++histo[code]; |
| 268 ++histo[64]; |
| 269 } else { |
| 270 WriteBits(depth[39], bits[39], storage_ix, storage); |
| 271 WriteBits(24, copylen - 2120, storage_ix, storage); |
| 272 WriteBits(depth[64], bits[64], storage_ix, storage); |
| 273 ++histo[47]; |
| 274 ++histo[64]; |
| 275 } |
| 276 } |
| 277 |
| 278 inline void EmitDistance(size_t distance, |
| 279 const uint8_t depth[128], |
| 280 const uint16_t bits[128], |
| 281 uint32_t histo[128], |
| 282 size_t* storage_ix, uint8_t* storage) { |
| 283 distance += 3; |
| 284 const uint32_t nbits = Log2FloorNonZero(distance) - 1u; |
| 285 const size_t prefix = (distance >> nbits) & 1; |
| 286 const size_t offset = (2 + prefix) << nbits; |
| 287 const size_t distcode = 2 * (nbits - 1) + prefix + 80; |
| 288 WriteBits(depth[distcode], bits[distcode], storage_ix, storage); |
| 289 WriteBits(nbits, distance - offset, storage_ix, storage); |
| 290 ++histo[distcode]; |
| 291 } |
| 292 |
| 293 inline void EmitLiterals(const uint8_t* input, const size_t len, |
| 294 const uint8_t depth[256], const uint16_t bits[256], |
| 295 size_t* storage_ix, uint8_t* storage) { |
| 296 for (size_t j = 0; j < len; j++) { |
| 297 const uint8_t lit = input[j]; |
| 298 WriteBits(depth[lit], bits[lit], storage_ix, storage); |
| 299 } |
| 300 } |
| 301 |
| 302 // REQUIRES: len <= 1 << 20. |
| 303 static void StoreMetaBlockHeader( |
| 304 size_t len, bool is_uncompressed, size_t* storage_ix, uint8_t* storage) { |
| 305 // ISLAST |
| 306 WriteBits(1, 0, storage_ix, storage); |
| 307 if (len <= (1U << 16)) { |
| 308 // MNIBBLES is 4 |
| 309 WriteBits(2, 0, storage_ix, storage); |
| 310 WriteBits(16, len - 1, storage_ix, storage); |
| 311 } else { |
| 312 // MNIBBLES is 5 |
| 313 WriteBits(2, 1, storage_ix, storage); |
| 314 WriteBits(20, len - 1, storage_ix, storage); |
| 315 } |
| 316 // ISUNCOMPRESSED |
| 317 WriteBits(1, is_uncompressed, storage_ix, storage); |
| 318 } |
| 319 |
| 320 static void UpdateBits(size_t n_bits, |
| 321 uint32_t bits, |
| 322 size_t pos, |
| 323 uint8_t *array) { |
| 324 while (n_bits > 0) { |
| 325 size_t byte_pos = pos >> 3; |
| 326 size_t n_unchanged_bits = pos & 7; |
| 327 size_t n_changed_bits = std::min(n_bits, 8 - n_unchanged_bits); |
| 328 size_t total_bits = n_unchanged_bits + n_changed_bits; |
| 329 uint32_t mask = (~((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1); |
| 330 uint32_t unchanged_bits = array[byte_pos] & mask; |
| 331 uint32_t changed_bits = bits & ((1 << n_changed_bits) - 1); |
| 332 array[byte_pos] = |
| 333 static_cast<uint8_t>((changed_bits << n_unchanged_bits) | |
| 334 unchanged_bits); |
| 335 n_bits -= n_changed_bits; |
| 336 bits >>= n_changed_bits; |
| 337 pos += n_changed_bits; |
| 338 } |
| 339 } |
| 340 |
| 341 static void RewindBitPosition(const size_t new_storage_ix, |
| 342 size_t* storage_ix, uint8_t* storage) { |
| 343 const size_t bitpos = new_storage_ix & 7; |
| 344 const size_t mask = (1u << bitpos) - 1; |
| 345 storage[new_storage_ix >> 3] &= static_cast<uint8_t>(mask); |
| 346 *storage_ix = new_storage_ix; |
| 347 } |
| 348 |
| 349 static bool ShouldMergeBlock(const uint8_t* data, size_t len, |
| 350 const uint8_t* depths) { |
| 351 size_t histo[256] = { 0 }; |
| 352 static const size_t kSampleRate = 43; |
| 353 for (size_t i = 0; i < len; i += kSampleRate) { |
| 354 ++histo[data[i]]; |
| 355 } |
| 356 const size_t total = (len + kSampleRate - 1) / kSampleRate; |
| 357 double r = (FastLog2(total) + 0.5) * static_cast<double>(total) + 200; |
| 358 for (size_t i = 0; i < 256; ++i) { |
| 359 r -= static_cast<double>(histo[i]) * (depths[i] + FastLog2(histo[i])); |
| 360 } |
| 361 return r >= 0.0; |
| 362 } |
| 363 |
| 364 inline bool ShouldUseUncompressedMode(const uint8_t* metablock_start, |
| 365 const uint8_t* next_emit, |
| 366 const size_t insertlen, |
| 367 const uint8_t literal_depths[256]) { |
| 368 const size_t compressed = static_cast<size_t>(next_emit - metablock_start); |
| 369 if (compressed * 50 > insertlen) { |
| 370 return false; |
| 371 } |
| 372 static const double kAcceptableLossForUncompressibleSpeedup = 0.02; |
| 373 static const double kMinEntropy = |
| 374 8 * (1.0 - kAcceptableLossForUncompressibleSpeedup); |
| 375 uint32_t sum = 0; |
| 376 for (int i = 0; i < 256; ++i) { |
| 377 const uint32_t n = literal_depths[i]; |
| 378 sum += n << (15 - n); |
| 379 } |
| 380 return sum > static_cast<uint32_t>((1 << 15) * kMinEntropy); |
| 381 } |
| 382 |
| 383 static void EmitUncompressedMetaBlock(const uint8_t* begin, const uint8_t* end, |
| 384 const size_t storage_ix_start, |
| 385 size_t* storage_ix, uint8_t* storage) { |
| 386 const size_t len = static_cast<size_t>(end - begin); |
| 387 RewindBitPosition(storage_ix_start, storage_ix, storage); |
| 388 StoreMetaBlockHeader(len, 1, storage_ix, storage); |
| 389 *storage_ix = (*storage_ix + 7u) & ~7u; |
| 390 memcpy(&storage[*storage_ix >> 3], begin, len); |
| 391 *storage_ix += len << 3; |
| 392 storage[*storage_ix >> 3] = 0; |
| 393 } |
| 394 |
| 395 void BrotliCompressFragmentFast(const uint8_t* input, size_t input_size, |
| 396 bool is_last, |
| 397 int* table, size_t table_size, |
| 398 uint8_t cmd_depth[128], uint16_t cmd_bits[128], |
| 399 size_t* cmd_code_numbits, uint8_t* cmd_code, |
| 400 size_t* storage_ix, uint8_t* storage) { |
| 401 if (input_size == 0) { |
| 402 assert(is_last); |
| 403 WriteBits(1, 1, storage_ix, storage); // islast |
| 404 WriteBits(1, 1, storage_ix, storage); // isempty |
| 405 *storage_ix = (*storage_ix + 7u) & ~7u; |
| 406 return; |
| 407 } |
| 408 |
| 409 // "next_emit" is a pointer to the first byte that is not covered by a |
| 410 // previous copy. Bytes between "next_emit" and the start of the next copy or |
| 411 // the end of the input will be emitted as literal bytes. |
| 412 const uint8_t* next_emit = input; |
| 413 // Save the start of the first block for position and distance computations. |
| 414 const uint8_t* base_ip = input; |
| 415 |
| 416 static const size_t kFirstBlockSize = 3 << 15; |
| 417 static const size_t kMergeBlockSize = 1 << 16; |
| 418 |
| 419 const uint8_t* metablock_start = input; |
| 420 size_t block_size = std::min(input_size, kFirstBlockSize); |
| 421 size_t total_block_size = block_size; |
| 422 // Save the bit position of the MLEN field of the meta-block header, so that |
| 423 // we can update it later if we decide to extend this meta-block. |
| 424 size_t mlen_storage_ix = *storage_ix + 3; |
| 425 StoreMetaBlockHeader(block_size, 0, storage_ix, storage); |
| 426 // No block splits, no contexts. |
| 427 WriteBits(13, 0, storage_ix, storage); |
| 428 |
| 429 uint8_t lit_depth[256] = { 0 }; |
| 430 uint16_t lit_bits[256] = { 0 }; |
| 431 BuildAndStoreLiteralPrefixCode(input, block_size, lit_depth, lit_bits, |
| 432 storage_ix, storage); |
| 433 |
| 434 // Store the pre-compressed command and distance prefix codes. |
| 435 for (size_t i = 0; i + 7 < *cmd_code_numbits; i += 8) { |
| 436 WriteBits(8, cmd_code[i >> 3], storage_ix, storage); |
| 437 } |
| 438 WriteBits(*cmd_code_numbits & 7, cmd_code[*cmd_code_numbits >> 3], |
| 439 storage_ix, storage); |
| 440 |
| 441 emit_commands: |
| 442 // Initialize the command and distance histograms. We will gather |
| 443 // statistics of command and distance codes during the processing |
| 444 // of this block and use it to update the command and distance |
| 445 // prefix codes for the next block. |
| 446 uint32_t cmd_histo[128] = { |
| 447 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, |
| 448 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, |
| 449 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, |
| 450 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
| 451 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
| 452 1, 1, 1, 1, 0, 0, 0, 0, |
| 453 }; |
| 454 |
| 455 // "ip" is the input pointer. |
| 456 const uint8_t* ip = input; |
| 457 assert(table_size); |
| 458 assert(table_size <= (1u << 31)); |
| 459 assert((table_size & (table_size - 1)) == 0); // table must be power of two |
| 460 const size_t shift = 64u - Log2FloorNonZero(table_size); |
| 461 assert(table_size - 1 == static_cast<size_t>( |
| 462 MAKE_UINT64_T(0xFFFFFFFF, 0xFFFFFF) >> shift)); |
| 463 const uint8_t* ip_end = input + block_size; |
| 464 |
| 465 int last_distance = -1; |
| 466 const size_t kInputMarginBytes = 16; |
| 467 const size_t kMinMatchLen = 5; |
| 468 if (PREDICT_TRUE(block_size >= kInputMarginBytes)) { |
| 469 // For the last block, we need to keep a 16 bytes margin so that we can be |
| 470 // sure that all distances are at most window size - 16. |
| 471 // For all other blocks, we only need to keep a margin of 5 bytes so that |
| 472 // we don't go over the block size with a copy. |
| 473 const size_t len_limit = std::min(block_size - kMinMatchLen, |
| 474 input_size - kInputMarginBytes); |
| 475 const uint8_t* ip_limit = input + len_limit; |
| 476 |
| 477 for (uint32_t next_hash = Hash(++ip, shift); ; ) { |
| 478 assert(next_emit < ip); |
| 479 // Step 1: Scan forward in the input looking for a 5-byte-long match. |
| 480 // If we get close to exhausting the input then goto emit_remainder. |
| 481 // |
| 482 // Heuristic match skipping: If 32 bytes are scanned with no matches |
| 483 // found, start looking only at every other byte. If 32 more bytes are |
| 484 // scanned, look at every third byte, etc.. When a match is found, |
| 485 // immediately go back to looking at every byte. This is a small loss |
| 486 // (~5% performance, ~0.1% density) for compressible data due to more |
| 487 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge |
| 488 // win since the compressor quickly "realizes" the data is incompressible |
| 489 // and doesn't bother looking for matches everywhere. |
| 490 // |
| 491 // The "skip" variable keeps track of how many bytes there are since the |
| 492 // last match; dividing it by 32 (ie. right-shifting by five) gives the |
| 493 // number of bytes to move ahead for each iteration. |
| 494 uint32_t skip = 32; |
| 495 |
| 496 const uint8_t* next_ip = ip; |
| 497 const uint8_t* candidate; |
| 498 do { |
| 499 ip = next_ip; |
| 500 uint32_t hash = next_hash; |
| 501 assert(hash == Hash(ip, shift)); |
| 502 uint32_t bytes_between_hash_lookups = skip++ >> 5; |
| 503 next_ip = ip + bytes_between_hash_lookups; |
| 504 if (PREDICT_FALSE(next_ip > ip_limit)) { |
| 505 goto emit_remainder; |
| 506 } |
| 507 next_hash = Hash(next_ip, shift); |
| 508 candidate = ip - last_distance; |
| 509 if (IsMatch(ip, candidate)) { |
| 510 if (PREDICT_TRUE(candidate < ip)) { |
| 511 table[hash] = static_cast<int>(ip - base_ip); |
| 512 break; |
| 513 } |
| 514 } |
| 515 candidate = base_ip + table[hash]; |
| 516 assert(candidate >= base_ip); |
| 517 assert(candidate < ip); |
| 518 |
| 519 table[hash] = static_cast<int>(ip - base_ip); |
| 520 } while (PREDICT_TRUE(!IsMatch(ip, candidate))); |
| 521 |
| 522 // Step 2: Emit the found match together with the literal bytes from |
| 523 // "next_emit" to the bit stream, and then see if we can find a next macth |
| 524 // immediately afterwards. Repeat until we find no match for the input |
| 525 // without emitting some literal bytes. |
| 526 uint64_t input_bytes; |
| 527 |
| 528 { |
| 529 // We have a 5-byte match at ip, and we need to emit bytes in |
| 530 // [next_emit, ip). |
| 531 const uint8_t* base = ip; |
| 532 size_t matched = 5 + FindMatchLengthWithLimit( |
| 533 candidate + 5, ip + 5, static_cast<size_t>(ip_end - ip) - 5); |
| 534 ip += matched; |
| 535 int distance = static_cast<int>(base - candidate); /* > 0 */ |
| 536 size_t insert = static_cast<size_t>(base - next_emit); |
| 537 assert(0 == memcmp(base, candidate, matched)); |
| 538 if (PREDICT_TRUE(insert < 6210)) { |
| 539 EmitInsertLen(insert, cmd_depth, cmd_bits, cmd_histo, |
| 540 storage_ix, storage); |
| 541 } else if (ShouldUseUncompressedMode(metablock_start, next_emit, insert, |
| 542 lit_depth)) { |
| 543 EmitUncompressedMetaBlock(metablock_start, base, mlen_storage_ix - 3, |
| 544 storage_ix, storage); |
| 545 input_size -= static_cast<size_t>(base - input); |
| 546 input = base; |
| 547 next_emit = input; |
| 548 goto next_block; |
| 549 } else { |
| 550 EmitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo, |
| 551 storage_ix, storage); |
| 552 } |
| 553 EmitLiterals(next_emit, insert, lit_depth, lit_bits, |
| 554 storage_ix, storage); |
| 555 if (distance == last_distance) { |
| 556 WriteBits(cmd_depth[64], cmd_bits[64], storage_ix, storage); |
| 557 ++cmd_histo[64]; |
| 558 } else { |
| 559 EmitDistance(static_cast<size_t>(distance), cmd_depth, cmd_bits, |
| 560 cmd_histo, storage_ix, storage); |
| 561 last_distance = distance; |
| 562 } |
| 563 EmitCopyLenLastDistance(matched, cmd_depth, cmd_bits, cmd_histo, |
| 564 storage_ix, storage); |
| 565 |
| 566 next_emit = ip; |
| 567 if (PREDICT_FALSE(ip >= ip_limit)) { |
| 568 goto emit_remainder; |
| 569 } |
| 570 // We could immediately start working at ip now, but to improve |
| 571 // compression we first update "table" with the hashes of some positions |
| 572 // within the last copy. |
| 573 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 3); |
| 574 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
| 575 table[prev_hash] = static_cast<int>(ip - base_ip - 3); |
| 576 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
| 577 table[prev_hash] = static_cast<int>(ip - base_ip - 2); |
| 578 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); |
| 579 table[prev_hash] = static_cast<int>(ip - base_ip - 1); |
| 580 |
| 581 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 3, shift); |
| 582 candidate = base_ip + table[cur_hash]; |
| 583 table[cur_hash] = static_cast<int>(ip - base_ip); |
| 584 } |
| 585 |
| 586 while (IsMatch(ip, candidate)) { |
| 587 // We have a 5-byte match at ip, and no need to emit any literal bytes |
| 588 // prior to ip. |
| 589 const uint8_t* base = ip; |
| 590 size_t matched = 5 + FindMatchLengthWithLimit( |
| 591 candidate + 5, ip + 5, static_cast<size_t>(ip_end - ip) - 5); |
| 592 ip += matched; |
| 593 last_distance = static_cast<int>(base - candidate); /* > 0 */ |
| 594 assert(0 == memcmp(base, candidate, matched)); |
| 595 EmitCopyLen(matched, cmd_depth, cmd_bits, cmd_histo, |
| 596 storage_ix, storage); |
| 597 EmitDistance(static_cast<size_t>(last_distance), cmd_depth, cmd_bits, |
| 598 cmd_histo, storage_ix, storage); |
| 599 |
| 600 next_emit = ip; |
| 601 if (PREDICT_FALSE(ip >= ip_limit)) { |
| 602 goto emit_remainder; |
| 603 } |
| 604 // We could immediately start working at ip now, but to improve |
| 605 // compression we first update "table" with the hashes of some positions |
| 606 // within the last copy. |
| 607 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 3); |
| 608 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
| 609 table[prev_hash] = static_cast<int>(ip - base_ip - 3); |
| 610 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
| 611 table[prev_hash] = static_cast<int>(ip - base_ip - 2); |
| 612 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); |
| 613 table[prev_hash] = static_cast<int>(ip - base_ip - 1); |
| 614 |
| 615 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 3, shift); |
| 616 candidate = base_ip + table[cur_hash]; |
| 617 table[cur_hash] = static_cast<int>(ip - base_ip); |
| 618 } |
| 619 |
| 620 next_hash = Hash(++ip, shift); |
| 621 } |
| 622 } |
| 623 |
| 624 emit_remainder: |
| 625 assert(next_emit <= ip_end); |
| 626 input += block_size; |
| 627 input_size -= block_size; |
| 628 block_size = std::min(input_size, kMergeBlockSize); |
| 629 |
| 630 // Decide if we want to continue this meta-block instead of emitting the |
| 631 // last insert-only command. |
| 632 if (input_size > 0 && |
| 633 total_block_size + block_size <= (1 << 20) && |
| 634 ShouldMergeBlock(input, block_size, lit_depth)) { |
| 635 assert(total_block_size > (1 << 16)); |
| 636 // Update the size of the current meta-block and continue emitting commands. |
| 637 // We can do this because the current size and the new size both have 5 |
| 638 // nibbles. |
| 639 total_block_size += block_size; |
| 640 UpdateBits(20, static_cast<uint32_t>(total_block_size - 1), |
| 641 mlen_storage_ix, storage); |
| 642 goto emit_commands; |
| 643 } |
| 644 |
| 645 // Emit the remaining bytes as literals. |
| 646 if (next_emit < ip_end) { |
| 647 const size_t insert = static_cast<size_t>(ip_end - next_emit); |
| 648 if (PREDICT_TRUE(insert < 6210)) { |
| 649 EmitInsertLen(insert, cmd_depth, cmd_bits, cmd_histo, |
| 650 storage_ix, storage); |
| 651 EmitLiterals(next_emit, insert, lit_depth, lit_bits, storage_ix, storage); |
| 652 } else if (ShouldUseUncompressedMode(metablock_start, next_emit, insert, |
| 653 lit_depth)) { |
| 654 EmitUncompressedMetaBlock(metablock_start, ip_end, mlen_storage_ix - 3, |
| 655 storage_ix, storage); |
| 656 } else { |
| 657 EmitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo, |
| 658 storage_ix, storage); |
| 659 EmitLiterals(next_emit, insert, lit_depth, lit_bits, |
| 660 storage_ix, storage); |
| 661 } |
| 662 } |
| 663 next_emit = ip_end; |
| 664 |
| 665 next_block: |
| 666 // If we have more data, write a new meta-block header and prefix codes and |
| 667 // then continue emitting commands. |
| 668 if (input_size > 0) { |
| 669 metablock_start = input; |
| 670 block_size = std::min(input_size, kFirstBlockSize); |
| 671 total_block_size = block_size; |
| 672 // Save the bit position of the MLEN field of the meta-block header, so that |
| 673 // we can update it later if we decide to extend this meta-block. |
| 674 mlen_storage_ix = *storage_ix + 3; |
| 675 StoreMetaBlockHeader(block_size, 0, storage_ix, storage); |
| 676 // No block splits, no contexts. |
| 677 WriteBits(13, 0, storage_ix, storage); |
| 678 memset(lit_depth, 0, sizeof(lit_depth)); |
| 679 memset(lit_bits, 0, sizeof(lit_bits)); |
| 680 BuildAndStoreLiteralPrefixCode(input, block_size, lit_depth, lit_bits, |
| 681 storage_ix, storage); |
| 682 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depth, cmd_bits, |
| 683 storage_ix, storage); |
| 684 goto emit_commands; |
| 685 } |
| 686 |
| 687 if (is_last) { |
| 688 WriteBits(1, 1, storage_ix, storage); // islast |
| 689 WriteBits(1, 1, storage_ix, storage); // isempty |
| 690 *storage_ix = (*storage_ix + 7u) & ~7u; |
| 691 } else { |
| 692 // If this is not the last block, update the command and distance prefix |
| 693 // codes for the next block and store the compressed forms. |
| 694 cmd_code[0] = 0; |
| 695 *cmd_code_numbits = 0; |
| 696 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depth, cmd_bits, |
| 697 cmd_code_numbits, cmd_code); |
| 698 } |
| 699 } |
| 700 |
| 701 } // namespace brotli |
| OLD | NEW |