| OLD | NEW |
| (Empty) |
| 1 /* Copyright 2015 Google Inc. All Rights Reserved. | |
| 2 | |
| 3 Distributed under MIT license. | |
| 4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT | |
| 5 */ | |
| 6 | |
| 7 // Function for fast encoding of an input fragment, independently from the input | |
| 8 // history. This function uses two-pass processing: in the first pass we save | |
| 9 // the found backward matches and literal bytes into a buffer, and in the | |
| 10 // second pass we emit them into the bit stream using prefix codes built based | |
| 11 // on the actual command and literal byte histograms. | |
| 12 | |
| 13 #include "./compress_fragment_two_pass.h" | |
| 14 | |
| 15 #include <algorithm> | |
| 16 | |
| 17 #include "./brotli_bit_stream.h" | |
| 18 #include "./bit_cost.h" | |
| 19 #include "./entropy_encode.h" | |
| 20 #include "./fast_log.h" | |
| 21 #include "./find_match_length.h" | |
| 22 #include "./port.h" | |
| 23 #include "./types.h" | |
| 24 #include "./write_bits.h" | |
| 25 | |
| 26 namespace brotli { | |
| 27 | |
| 28 // kHashMul32 multiplier has these properties: | |
| 29 // * The multiplier must be odd. Otherwise we may lose the highest bit. | |
| 30 // * No long streaks of 1s or 0s. | |
| 31 // * There is no effort to ensure that it is a prime, the oddity is enough | |
| 32 // for this use. | |
| 33 // * The number has been tuned heuristically against compression benchmarks. | |
| 34 static const uint32_t kHashMul32 = 0x1e35a7bd; | |
| 35 | |
| 36 static inline uint32_t Hash(const uint8_t* p, size_t shift) { | |
| 37 const uint64_t h = (BROTLI_UNALIGNED_LOAD64(p) << 16) * kHashMul32; | |
| 38 return static_cast<uint32_t>(h >> shift); | |
| 39 } | |
| 40 | |
| 41 static inline uint32_t HashBytesAtOffset(uint64_t v, int offset, size_t shift) { | |
| 42 assert(offset >= 0); | |
| 43 assert(offset <= 2); | |
| 44 const uint64_t h = ((v >> (8 * offset)) << 16) * kHashMul32; | |
| 45 return static_cast<uint32_t>(h >> shift); | |
| 46 } | |
| 47 | |
| 48 static inline int IsMatch(const uint8_t* p1, const uint8_t* p2) { | |
| 49 return (BROTLI_UNALIGNED_LOAD32(p1) == BROTLI_UNALIGNED_LOAD32(p2) && | |
| 50 p1[4] == p2[4] && | |
| 51 p1[5] == p2[5]); | |
| 52 } | |
| 53 | |
| 54 // Builds a command and distance prefix code (each 64 symbols) into "depth" and | |
| 55 // "bits" based on "histogram" and stores it into the bit stream. | |
| 56 static void BuildAndStoreCommandPrefixCode( | |
| 57 const uint32_t histogram[128], | |
| 58 uint8_t depth[128], uint16_t bits[128], | |
| 59 size_t* storage_ix, uint8_t* storage) { | |
| 60 // Tree size for building a tree over 64 symbols is 2 * 64 + 1. | |
| 61 static const size_t kTreeSize = 129; | |
| 62 HuffmanTree tree[kTreeSize]; | |
| 63 CreateHuffmanTree(histogram, 64, 15, tree, depth); | |
| 64 CreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]); | |
| 65 // We have to jump through a few hoopes here in order to compute | |
| 66 // the command bits because the symbols are in a different order than in | |
| 67 // the full alphabet. This looks complicated, but having the symbols | |
| 68 // in this order in the command bits saves a few branches in the Emit* | |
| 69 // functions. | |
| 70 uint8_t cmd_depth[64]; | |
| 71 uint16_t cmd_bits[64]; | |
| 72 memcpy(cmd_depth, depth + 24, 24); | |
| 73 memcpy(cmd_depth + 24, depth, 8); | |
| 74 memcpy(cmd_depth + 32, depth + 48, 8); | |
| 75 memcpy(cmd_depth + 40, depth + 8, 8); | |
| 76 memcpy(cmd_depth + 48, depth + 56, 8); | |
| 77 memcpy(cmd_depth + 56, depth + 16, 8); | |
| 78 ConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits); | |
| 79 memcpy(bits, cmd_bits + 24, 16); | |
| 80 memcpy(bits + 8, cmd_bits + 40, 16); | |
| 81 memcpy(bits + 16, cmd_bits + 56, 16); | |
| 82 memcpy(bits + 24, cmd_bits, 48); | |
| 83 memcpy(bits + 48, cmd_bits + 32, 16); | |
| 84 memcpy(bits + 56, cmd_bits + 48, 16); | |
| 85 ConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]); | |
| 86 { | |
| 87 // Create the bit length array for the full command alphabet. | |
| 88 uint8_t cmd_depth[704] = { 0 }; | |
| 89 memcpy(cmd_depth, depth + 24, 8); | |
| 90 memcpy(cmd_depth + 64, depth + 32, 8); | |
| 91 memcpy(cmd_depth + 128, depth + 40, 8); | |
| 92 memcpy(cmd_depth + 192, depth + 48, 8); | |
| 93 memcpy(cmd_depth + 384, depth + 56, 8); | |
| 94 for (size_t i = 0; i < 8; ++i) { | |
| 95 cmd_depth[128 + 8 * i] = depth[i]; | |
| 96 cmd_depth[256 + 8 * i] = depth[8 + i]; | |
| 97 cmd_depth[448 + 8 * i] = depth[16 + i]; | |
| 98 } | |
| 99 StoreHuffmanTree(cmd_depth, 704, tree, storage_ix, storage); | |
| 100 } | |
| 101 StoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage); | |
| 102 } | |
| 103 | |
| 104 inline void EmitInsertLen(uint32_t insertlen, uint32_t** commands) { | |
| 105 if (insertlen < 6) { | |
| 106 **commands = insertlen; | |
| 107 } else if (insertlen < 130) { | |
| 108 insertlen -= 2; | |
| 109 const uint32_t nbits = Log2FloorNonZero(insertlen) - 1u; | |
| 110 const uint32_t prefix = insertlen >> nbits; | |
| 111 const uint32_t inscode = (nbits << 1) + prefix + 2; | |
| 112 const uint32_t extra = insertlen - (prefix << nbits); | |
| 113 **commands = inscode | (extra << 8); | |
| 114 } else if (insertlen < 2114) { | |
| 115 insertlen -= 66; | |
| 116 const uint32_t nbits = Log2FloorNonZero(insertlen); | |
| 117 const uint32_t code = nbits + 10; | |
| 118 const uint32_t extra = insertlen - (1 << nbits); | |
| 119 **commands = code | (extra << 8); | |
| 120 } else if (insertlen < 6210) { | |
| 121 const uint32_t extra = insertlen - 2114; | |
| 122 **commands = 21 | (extra << 8); | |
| 123 } else if (insertlen < 22594) { | |
| 124 const uint32_t extra = insertlen - 6210; | |
| 125 **commands = 22 | (extra << 8); | |
| 126 } else { | |
| 127 const uint32_t extra = insertlen - 22594; | |
| 128 **commands = 23 | (extra << 8); | |
| 129 } | |
| 130 ++(*commands); | |
| 131 } | |
| 132 | |
| 133 inline void EmitCopyLen(size_t copylen, uint32_t** commands) { | |
| 134 if (copylen < 10) { | |
| 135 **commands = static_cast<uint32_t>(copylen + 38); | |
| 136 } else if (copylen < 134) { | |
| 137 copylen -= 6; | |
| 138 const size_t nbits = Log2FloorNonZero(copylen) - 1; | |
| 139 const size_t prefix = copylen >> nbits; | |
| 140 const size_t code = (nbits << 1) + prefix + 44; | |
| 141 const size_t extra = copylen - (prefix << nbits); | |
| 142 **commands = static_cast<uint32_t>(code | (extra << 8)); | |
| 143 } else if (copylen < 2118) { | |
| 144 copylen -= 70; | |
| 145 const size_t nbits = Log2FloorNonZero(copylen); | |
| 146 const size_t code = nbits + 52; | |
| 147 const size_t extra = copylen - (1 << nbits); | |
| 148 **commands = static_cast<uint32_t>(code | (extra << 8)); | |
| 149 } else { | |
| 150 const size_t extra = copylen - 2118; | |
| 151 **commands = static_cast<uint32_t>(63 | (extra << 8)); | |
| 152 } | |
| 153 ++(*commands); | |
| 154 } | |
| 155 | |
| 156 inline void EmitCopyLenLastDistance(size_t copylen, uint32_t** commands) { | |
| 157 if (copylen < 12) { | |
| 158 **commands = static_cast<uint32_t>(copylen + 20); | |
| 159 ++(*commands); | |
| 160 } else if (copylen < 72) { | |
| 161 copylen -= 8; | |
| 162 const size_t nbits = Log2FloorNonZero(copylen) - 1; | |
| 163 const size_t prefix = copylen >> nbits; | |
| 164 const size_t code = (nbits << 1) + prefix + 28; | |
| 165 const size_t extra = copylen - (prefix << nbits); | |
| 166 **commands = static_cast<uint32_t>(code | (extra << 8)); | |
| 167 ++(*commands); | |
| 168 } else if (copylen < 136) { | |
| 169 copylen -= 8; | |
| 170 const size_t code = (copylen >> 5) + 54; | |
| 171 const size_t extra = copylen & 31; | |
| 172 **commands = static_cast<uint32_t>(code | (extra << 8)); | |
| 173 ++(*commands); | |
| 174 **commands = 64; | |
| 175 ++(*commands); | |
| 176 } else if (copylen < 2120) { | |
| 177 copylen -= 72; | |
| 178 const size_t nbits = Log2FloorNonZero(copylen); | |
| 179 const size_t code = nbits + 52; | |
| 180 const size_t extra = copylen - (1 << nbits); | |
| 181 **commands = static_cast<uint32_t>(code | (extra << 8)); | |
| 182 ++(*commands); | |
| 183 **commands = 64; | |
| 184 ++(*commands); | |
| 185 } else { | |
| 186 const size_t extra = copylen - 2120; | |
| 187 **commands = static_cast<uint32_t>(63 | (extra << 8)); | |
| 188 ++(*commands); | |
| 189 **commands = 64; | |
| 190 ++(*commands); | |
| 191 } | |
| 192 } | |
| 193 | |
| 194 inline void EmitDistance(uint32_t distance, uint32_t** commands) { | |
| 195 distance += 3; | |
| 196 uint32_t nbits = Log2FloorNonZero(distance) - 1; | |
| 197 const uint32_t prefix = (distance >> nbits) & 1; | |
| 198 const uint32_t offset = (2 + prefix) << nbits; | |
| 199 const uint32_t distcode = 2 * (nbits - 1) + prefix + 80; | |
| 200 uint32_t extra = distance - offset; | |
| 201 **commands = distcode | (extra << 8); | |
| 202 ++(*commands); | |
| 203 } | |
| 204 | |
| 205 // REQUIRES: len <= 1 << 20. | |
| 206 static void StoreMetaBlockHeader( | |
| 207 size_t len, bool is_uncompressed, size_t* storage_ix, uint8_t* storage) { | |
| 208 // ISLAST | |
| 209 WriteBits(1, 0, storage_ix, storage); | |
| 210 if (len <= (1U << 16)) { | |
| 211 // MNIBBLES is 4 | |
| 212 WriteBits(2, 0, storage_ix, storage); | |
| 213 WriteBits(16, len - 1, storage_ix, storage); | |
| 214 } else { | |
| 215 // MNIBBLES is 5 | |
| 216 WriteBits(2, 1, storage_ix, storage); | |
| 217 WriteBits(20, len - 1, storage_ix, storage); | |
| 218 } | |
| 219 // ISUNCOMPRESSED | |
| 220 WriteBits(1, is_uncompressed, storage_ix, storage); | |
| 221 } | |
| 222 | |
| 223 static void CreateCommands(const uint8_t* input, size_t block_size, | |
| 224 size_t input_size, const uint8_t* base_ip, | |
| 225 int* table, size_t table_size, | |
| 226 uint8_t** literals, uint32_t** commands) { | |
| 227 // "ip" is the input pointer. | |
| 228 const uint8_t* ip = input; | |
| 229 assert(table_size); | |
| 230 assert(table_size <= (1u << 31)); | |
| 231 assert((table_size & (table_size - 1)) == 0); // table must be power of two | |
| 232 const size_t shift = 64u - Log2FloorNonZero(table_size); | |
| 233 assert(table_size - 1 == static_cast<size_t>( | |
| 234 MAKE_UINT64_T(0xFFFFFFFF, 0xFFFFFF) >> shift)); | |
| 235 const uint8_t* ip_end = input + block_size; | |
| 236 // "next_emit" is a pointer to the first byte that is not covered by a | |
| 237 // previous copy. Bytes between "next_emit" and the start of the next copy or | |
| 238 // the end of the input will be emitted as literal bytes. | |
| 239 const uint8_t* next_emit = input; | |
| 240 | |
| 241 int last_distance = -1; | |
| 242 const size_t kInputMarginBytes = 16; | |
| 243 const size_t kMinMatchLen = 6; | |
| 244 if (PREDICT_TRUE(block_size >= kInputMarginBytes)) { | |
| 245 // For the last block, we need to keep a 16 bytes margin so that we can be | |
| 246 // sure that all distances are at most window size - 16. | |
| 247 // For all other blocks, we only need to keep a margin of 5 bytes so that | |
| 248 // we don't go over the block size with a copy. | |
| 249 const size_t len_limit = std::min(block_size - kMinMatchLen, | |
| 250 input_size - kInputMarginBytes); | |
| 251 const uint8_t* ip_limit = input + len_limit; | |
| 252 | |
| 253 for (uint32_t next_hash = Hash(++ip, shift); ; ) { | |
| 254 assert(next_emit < ip); | |
| 255 // Step 1: Scan forward in the input looking for a 6-byte-long match. | |
| 256 // If we get close to exhausting the input then goto emit_remainder. | |
| 257 // | |
| 258 // Heuristic match skipping: If 32 bytes are scanned with no matches | |
| 259 // found, start looking only at every other byte. If 32 more bytes are | |
| 260 // scanned, look at every third byte, etc.. When a match is found, | |
| 261 // immediately go back to looking at every byte. This is a small loss | |
| 262 // (~5% performance, ~0.1% density) for compressible data due to more | |
| 263 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge | |
| 264 // win since the compressor quickly "realizes" the data is incompressible | |
| 265 // and doesn't bother looking for matches everywhere. | |
| 266 // | |
| 267 // The "skip" variable keeps track of how many bytes there are since the | |
| 268 // last match; dividing it by 32 (ie. right-shifting by five) gives the | |
| 269 // number of bytes to move ahead for each iteration. | |
| 270 uint32_t skip = 32; | |
| 271 | |
| 272 const uint8_t* next_ip = ip; | |
| 273 const uint8_t* candidate; | |
| 274 do { | |
| 275 ip = next_ip; | |
| 276 uint32_t hash = next_hash; | |
| 277 assert(hash == Hash(ip, shift)); | |
| 278 uint32_t bytes_between_hash_lookups = skip++ >> 5; | |
| 279 next_ip = ip + bytes_between_hash_lookups; | |
| 280 if (PREDICT_FALSE(next_ip > ip_limit)) { | |
| 281 goto emit_remainder; | |
| 282 } | |
| 283 next_hash = Hash(next_ip, shift); | |
| 284 candidate = ip - last_distance; | |
| 285 if (IsMatch(ip, candidate)) { | |
| 286 if (PREDICT_TRUE(candidate < ip)) { | |
| 287 table[hash] = static_cast<int>(ip - base_ip); | |
| 288 break; | |
| 289 } | |
| 290 } | |
| 291 candidate = base_ip + table[hash]; | |
| 292 assert(candidate >= base_ip); | |
| 293 assert(candidate < ip); | |
| 294 | |
| 295 table[hash] = static_cast<int>(ip - base_ip); | |
| 296 } while (PREDICT_TRUE(!IsMatch(ip, candidate))); | |
| 297 | |
| 298 // Step 2: Emit the found match together with the literal bytes from | |
| 299 // "next_emit", and then see if we can find a next macth immediately | |
| 300 // afterwards. Repeat until we find no match for the input | |
| 301 // without emitting some literal bytes. | |
| 302 uint64_t input_bytes; | |
| 303 | |
| 304 { | |
| 305 // We have a 6-byte match at ip, and we need to emit bytes in | |
| 306 // [next_emit, ip). | |
| 307 const uint8_t* base = ip; | |
| 308 size_t matched = 6 + FindMatchLengthWithLimit( | |
| 309 candidate + 6, ip + 6, static_cast<size_t>(ip_end - ip) - 6); | |
| 310 ip += matched; | |
| 311 int distance = static_cast<int>(base - candidate); /* > 0 */ | |
| 312 int insert = static_cast<int>(base - next_emit); | |
| 313 assert(0 == memcmp(base, candidate, matched)); | |
| 314 EmitInsertLen(static_cast<uint32_t>(insert), commands); | |
| 315 memcpy(*literals, next_emit, static_cast<size_t>(insert)); | |
| 316 *literals += insert; | |
| 317 if (distance == last_distance) { | |
| 318 **commands = 64; | |
| 319 ++(*commands); | |
| 320 } else { | |
| 321 EmitDistance(static_cast<uint32_t>(distance), commands); | |
| 322 last_distance = distance; | |
| 323 } | |
| 324 EmitCopyLenLastDistance(matched, commands); | |
| 325 | |
| 326 next_emit = ip; | |
| 327 if (PREDICT_FALSE(ip >= ip_limit)) { | |
| 328 goto emit_remainder; | |
| 329 } | |
| 330 // We could immediately start working at ip now, but to improve | |
| 331 // compression we first update "table" with the hashes of some positions | |
| 332 // within the last copy. | |
| 333 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); | |
| 334 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | |
| 335 table[prev_hash] = static_cast<int>(ip - base_ip - 5); | |
| 336 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | |
| 337 table[prev_hash] = static_cast<int>(ip - base_ip - 4); | |
| 338 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); | |
| 339 table[prev_hash] = static_cast<int>(ip - base_ip - 3); | |
| 340 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); | |
| 341 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | |
| 342 table[prev_hash] = static_cast<int>(ip - base_ip - 2); | |
| 343 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | |
| 344 table[prev_hash] = static_cast<int>(ip - base_ip - 1); | |
| 345 | |
| 346 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 2, shift); | |
| 347 candidate = base_ip + table[cur_hash]; | |
| 348 table[cur_hash] = static_cast<int>(ip - base_ip); | |
| 349 } | |
| 350 | |
| 351 while (IsMatch(ip, candidate)) { | |
| 352 // We have a 6-byte match at ip, and no need to emit any | |
| 353 // literal bytes prior to ip. | |
| 354 const uint8_t* base = ip; | |
| 355 size_t matched = 6 + FindMatchLengthWithLimit( | |
| 356 candidate + 6, ip + 6, static_cast<size_t>(ip_end - ip) - 6); | |
| 357 ip += matched; | |
| 358 last_distance = static_cast<int>(base - candidate); /* > 0 */ | |
| 359 assert(0 == memcmp(base, candidate, matched)); | |
| 360 EmitCopyLen(matched, commands); | |
| 361 EmitDistance(static_cast<uint32_t>(last_distance), commands); | |
| 362 | |
| 363 next_emit = ip; | |
| 364 if (PREDICT_FALSE(ip >= ip_limit)) { | |
| 365 goto emit_remainder; | |
| 366 } | |
| 367 // We could immediately start working at ip now, but to improve | |
| 368 // compression we first update "table" with the hashes of some positions | |
| 369 // within the last copy. | |
| 370 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); | |
| 371 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | |
| 372 table[prev_hash] = static_cast<int>(ip - base_ip - 5); | |
| 373 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | |
| 374 table[prev_hash] = static_cast<int>(ip - base_ip - 4); | |
| 375 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); | |
| 376 table[prev_hash] = static_cast<int>(ip - base_ip - 3); | |
| 377 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); | |
| 378 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | |
| 379 table[prev_hash] = static_cast<int>(ip - base_ip - 2); | |
| 380 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | |
| 381 table[prev_hash] = static_cast<int>(ip - base_ip - 1); | |
| 382 | |
| 383 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 2, shift); | |
| 384 candidate = base_ip + table[cur_hash]; | |
| 385 table[cur_hash] = static_cast<int>(ip - base_ip); | |
| 386 } | |
| 387 | |
| 388 next_hash = Hash(++ip, shift); | |
| 389 } | |
| 390 } | |
| 391 | |
| 392 emit_remainder: | |
| 393 assert(next_emit <= ip_end); | |
| 394 // Emit the remaining bytes as literals. | |
| 395 if (next_emit < ip_end) { | |
| 396 const uint32_t insert = static_cast<uint32_t>(ip_end - next_emit); | |
| 397 EmitInsertLen(insert, commands); | |
| 398 memcpy(*literals, next_emit, insert); | |
| 399 *literals += insert; | |
| 400 } | |
| 401 } | |
| 402 | |
| 403 static void StoreCommands(const uint8_t* literals, const size_t num_literals, | |
| 404 const uint32_t* commands, const size_t num_commands, | |
| 405 size_t* storage_ix, uint8_t* storage) { | |
| 406 uint8_t lit_depths[256] = { 0 }; | |
| 407 uint16_t lit_bits[256] = { 0 }; | |
| 408 uint32_t lit_histo[256] = { 0 }; | |
| 409 for (size_t i = 0; i < num_literals; ++i) { | |
| 410 ++lit_histo[literals[i]]; | |
| 411 } | |
| 412 BuildAndStoreHuffmanTreeFast(lit_histo, num_literals, | |
| 413 /* max_bits = */ 8, | |
| 414 lit_depths, lit_bits, | |
| 415 storage_ix, storage); | |
| 416 | |
| 417 uint8_t cmd_depths[128] = { 0 }; | |
| 418 uint16_t cmd_bits[128] = { 0 }; | |
| 419 uint32_t cmd_histo[128] = { 0 }; | |
| 420 for (size_t i = 0; i < num_commands; ++i) { | |
| 421 ++cmd_histo[commands[i] & 0xff]; | |
| 422 } | |
| 423 cmd_histo[1] += 1; | |
| 424 cmd_histo[2] += 1; | |
| 425 cmd_histo[64] += 1; | |
| 426 cmd_histo[84] += 1; | |
| 427 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depths, cmd_bits, | |
| 428 storage_ix, storage); | |
| 429 | |
| 430 static const uint32_t kNumExtraBits[128] = { | |
| 431 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24, | |
| 432 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, | |
| 433 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24, | |
| 434 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
| 435 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, | |
| 436 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, | |
| 437 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, | |
| 438 }; | |
| 439 static const uint32_t kInsertOffset[24] = { | |
| 440 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578, | |
| 441 1090, 2114, 6210, 22594, | |
| 442 }; | |
| 443 | |
| 444 for (size_t i = 0; i < num_commands; ++i) { | |
| 445 const uint32_t cmd = commands[i]; | |
| 446 const uint32_t code = cmd & 0xff; | |
| 447 const uint32_t extra = cmd >> 8; | |
| 448 WriteBits(cmd_depths[code], cmd_bits[code], storage_ix, storage); | |
| 449 WriteBits(kNumExtraBits[code], extra, storage_ix, storage); | |
| 450 if (code < 24) { | |
| 451 const uint32_t insert = kInsertOffset[code] + extra; | |
| 452 for (uint32_t j = 0; j < insert; ++j) { | |
| 453 const uint8_t lit = *literals; | |
| 454 WriteBits(lit_depths[lit], lit_bits[lit], storage_ix, storage); | |
| 455 ++literals; | |
| 456 } | |
| 457 } | |
| 458 } | |
| 459 } | |
| 460 | |
| 461 static bool ShouldCompress(const uint8_t* input, size_t input_size, | |
| 462 size_t num_literals) { | |
| 463 static const double kAcceptableLossForUncompressibleSpeedup = 0.02; | |
| 464 static const double kMaxRatioOfLiterals = | |
| 465 1.0 - kAcceptableLossForUncompressibleSpeedup; | |
| 466 if (num_literals < kMaxRatioOfLiterals * static_cast<double>(input_size)) { | |
| 467 return true; | |
| 468 } | |
| 469 uint32_t literal_histo[256] = { 0 }; | |
| 470 static const uint32_t kSampleRate = 43; | |
| 471 static const double kMaxEntropy = | |
| 472 8 * (1.0 - kAcceptableLossForUncompressibleSpeedup); | |
| 473 const double max_total_bit_cost = | |
| 474 static_cast<double>(input_size) * kMaxEntropy / kSampleRate; | |
| 475 for (size_t i = 0; i < input_size; i += kSampleRate) { | |
| 476 ++literal_histo[input[i]]; | |
| 477 } | |
| 478 return BitsEntropy(literal_histo, 256) < max_total_bit_cost; | |
| 479 } | |
| 480 | |
| 481 void BrotliCompressFragmentTwoPass(const uint8_t* input, size_t input_size, | |
| 482 bool is_last, | |
| 483 uint32_t* command_buf, uint8_t* literal_buf, | |
| 484 int* table, size_t table_size, | |
| 485 size_t* storage_ix, uint8_t* storage) { | |
| 486 // Save the start of the first block for position and distance computations. | |
| 487 const uint8_t* base_ip = input; | |
| 488 | |
| 489 while (input_size > 0) { | |
| 490 size_t block_size = std::min(input_size, kCompressFragmentTwoPassBlockSize); | |
| 491 uint32_t* commands = command_buf; | |
| 492 uint8_t* literals = literal_buf; | |
| 493 CreateCommands(input, block_size, input_size, base_ip, table, table_size, | |
| 494 &literals, &commands); | |
| 495 const size_t num_literals = static_cast<size_t>(literals - literal_buf); | |
| 496 const size_t num_commands = static_cast<size_t>(commands - command_buf); | |
| 497 if (ShouldCompress(input, block_size, num_literals)) { | |
| 498 StoreMetaBlockHeader(block_size, 0, storage_ix, storage); | |
| 499 // No block splits, no contexts. | |
| 500 WriteBits(13, 0, storage_ix, storage); | |
| 501 StoreCommands(literal_buf, num_literals, command_buf, num_commands, | |
| 502 storage_ix, storage); | |
| 503 } else { | |
| 504 // Since we did not find many backward references and the entropy of | |
| 505 // the data is close to 8 bits, we can simply emit an uncompressed block. | |
| 506 // This makes compression speed of uncompressible data about 3x faster. | |
| 507 StoreMetaBlockHeader(block_size, 1, storage_ix, storage); | |
| 508 *storage_ix = (*storage_ix + 7u) & ~7u; | |
| 509 memcpy(&storage[*storage_ix >> 3], input, block_size); | |
| 510 *storage_ix += block_size << 3; | |
| 511 storage[*storage_ix >> 3] = 0; | |
| 512 } | |
| 513 input += block_size; | |
| 514 input_size -= block_size; | |
| 515 } | |
| 516 | |
| 517 if (is_last) { | |
| 518 WriteBits(1, 1, storage_ix, storage); // islast | |
| 519 WriteBits(1, 1, storage_ix, storage); // isempty | |
| 520 *storage_ix = (*storage_ix + 7u) & ~7u; | |
| 521 } | |
| 522 } | |
| 523 | |
| 524 } // namespace brotli | |
| OLD | NEW |