Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: third_party/brotli/enc/compress_fragment_two_pass.c

Issue 2537133002: Update brotli to v1.0.0-snapshot. (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /* Copyright 2015 Google Inc. All Rights Reserved.
2
3 Distributed under MIT license.
4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
5 */
6
7 /* Function for fast encoding of an input fragment, independently from the input
8 history. This function uses two-pass processing: in the first pass we save
9 the found backward matches and literal bytes into a buffer, and in the
10 second pass we emit them into the bit stream using prefix codes built based
11 on the actual command and literal byte histograms. */
12
13 #include "./compress_fragment_two_pass.h"
14
15 #include <string.h> /* memcmp, memcpy, memset */
16
17 #include "../common/constants.h"
18 #include <brotli/types.h>
19 #include "./bit_cost.h"
20 #include "./brotli_bit_stream.h"
21 #include "./entropy_encode.h"
22 #include "./fast_log.h"
23 #include "./find_match_length.h"
24 #include "./memory.h"
25 #include "./port.h"
26 #include "./write_bits.h"
27
28
29 #if defined(__cplusplus) || defined(c_plusplus)
30 extern "C" {
31 #endif
32
33 /* Same as MaxBackwardLimit(18) */
34 #define MAX_DISTANCE ((1 << 18) - BROTLI_WINDOW_GAP)
35
36 /* kHashMul32 multiplier has these properties:
37 * The multiplier must be odd. Otherwise we may lose the highest bit.
38 * No long streaks of ones or zeros.
39 * There is no effort to ensure that it is a prime, the oddity is enough
40 for this use.
41 * The number has been tuned heuristically against compression benchmarks. */
42 static const uint32_t kHashMul32 = 0x1e35a7bd;
43
44 static BROTLI_INLINE uint32_t Hash(const uint8_t* p, size_t shift) {
45 const uint64_t h = (BROTLI_UNALIGNED_LOAD64(p) << 16) * kHashMul32;
46 return (uint32_t)(h >> shift);
47 }
48
49 static BROTLI_INLINE uint32_t HashBytesAtOffset(
50 uint64_t v, int offset, size_t shift) {
51 assert(offset >= 0);
52 assert(offset <= 2);
53 {
54 const uint64_t h = ((v >> (8 * offset)) << 16) * kHashMul32;
55 return (uint32_t)(h >> shift);
56 }
57 }
58
59 static BROTLI_INLINE BROTLI_BOOL IsMatch(const uint8_t* p1, const uint8_t* p2) {
60 return TO_BROTLI_BOOL(
61 BROTLI_UNALIGNED_LOAD32(p1) == BROTLI_UNALIGNED_LOAD32(p2) &&
62 p1[4] == p2[4] &&
63 p1[5] == p2[5]);
64 }
65
66 /* Builds a command and distance prefix code (each 64 symbols) into "depth" and
67 "bits" based on "histogram" and stores it into the bit stream. */
68 static void BuildAndStoreCommandPrefixCode(
69 const uint32_t histogram[128],
70 uint8_t depth[128], uint16_t bits[128],
71 size_t* storage_ix, uint8_t* storage) {
72 /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
73 HuffmanTree tree[129];
74 uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS] = { 0 };
75 uint16_t cmd_bits[64];
76 BrotliCreateHuffmanTree(histogram, 64, 15, tree, depth);
77 BrotliCreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]);
78 /* We have to jump through a few hoops here in order to compute
79 the command bits because the symbols are in a different order than in
80 the full alphabet. This looks complicated, but having the symbols
81 in this order in the command bits saves a few branches in the Emit*
82 functions. */
83 memcpy(cmd_depth, depth + 24, 24);
84 memcpy(cmd_depth + 24, depth, 8);
85 memcpy(cmd_depth + 32, depth + 48, 8);
86 memcpy(cmd_depth + 40, depth + 8, 8);
87 memcpy(cmd_depth + 48, depth + 56, 8);
88 memcpy(cmd_depth + 56, depth + 16, 8);
89 BrotliConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits);
90 memcpy(bits, cmd_bits + 24, 16);
91 memcpy(bits + 8, cmd_bits + 40, 16);
92 memcpy(bits + 16, cmd_bits + 56, 16);
93 memcpy(bits + 24, cmd_bits, 48);
94 memcpy(bits + 48, cmd_bits + 32, 16);
95 memcpy(bits + 56, cmd_bits + 48, 16);
96 BrotliConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]);
97 {
98 /* Create the bit length array for the full command alphabet. */
99 size_t i;
100 memset(cmd_depth, 0, 64); /* only 64 first values were used */
101 memcpy(cmd_depth, depth + 24, 8);
102 memcpy(cmd_depth + 64, depth + 32, 8);
103 memcpy(cmd_depth + 128, depth + 40, 8);
104 memcpy(cmd_depth + 192, depth + 48, 8);
105 memcpy(cmd_depth + 384, depth + 56, 8);
106 for (i = 0; i < 8; ++i) {
107 cmd_depth[128 + 8 * i] = depth[i];
108 cmd_depth[256 + 8 * i] = depth[8 + i];
109 cmd_depth[448 + 8 * i] = depth[16 + i];
110 }
111 BrotliStoreHuffmanTree(
112 cmd_depth, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage);
113 }
114 BrotliStoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage);
115 }
116
117 static BROTLI_INLINE void EmitInsertLen(
118 uint32_t insertlen, uint32_t** commands) {
119 if (insertlen < 6) {
120 **commands = insertlen;
121 } else if (insertlen < 130) {
122 const uint32_t tail = insertlen - 2;
123 const uint32_t nbits = Log2FloorNonZero(tail) - 1u;
124 const uint32_t prefix = tail >> nbits;
125 const uint32_t inscode = (nbits << 1) + prefix + 2;
126 const uint32_t extra = tail - (prefix << nbits);
127 **commands = inscode | (extra << 8);
128 } else if (insertlen < 2114) {
129 const uint32_t tail = insertlen - 66;
130 const uint32_t nbits = Log2FloorNonZero(tail);
131 const uint32_t code = nbits + 10;
132 const uint32_t extra = tail - (1u << nbits);
133 **commands = code | (extra << 8);
134 } else if (insertlen < 6210) {
135 const uint32_t extra = insertlen - 2114;
136 **commands = 21 | (extra << 8);
137 } else if (insertlen < 22594) {
138 const uint32_t extra = insertlen - 6210;
139 **commands = 22 | (extra << 8);
140 } else {
141 const uint32_t extra = insertlen - 22594;
142 **commands = 23 | (extra << 8);
143 }
144 ++(*commands);
145 }
146
147 static BROTLI_INLINE void EmitCopyLen(size_t copylen, uint32_t** commands) {
148 if (copylen < 10) {
149 **commands = (uint32_t)(copylen + 38);
150 } else if (copylen < 134) {
151 const size_t tail = copylen - 6;
152 const size_t nbits = Log2FloorNonZero(tail) - 1;
153 const size_t prefix = tail >> nbits;
154 const size_t code = (nbits << 1) + prefix + 44;
155 const size_t extra = tail - (prefix << nbits);
156 **commands = (uint32_t)(code | (extra << 8));
157 } else if (copylen < 2118) {
158 const size_t tail = copylen - 70;
159 const size_t nbits = Log2FloorNonZero(tail);
160 const size_t code = nbits + 52;
161 const size_t extra = tail - ((size_t)1 << nbits);
162 **commands = (uint32_t)(code | (extra << 8));
163 } else {
164 const size_t extra = copylen - 2118;
165 **commands = (uint32_t)(63 | (extra << 8));
166 }
167 ++(*commands);
168 }
169
170 static BROTLI_INLINE void EmitCopyLenLastDistance(
171 size_t copylen, uint32_t** commands) {
172 if (copylen < 12) {
173 **commands = (uint32_t)(copylen + 20);
174 ++(*commands);
175 } else if (copylen < 72) {
176 const size_t tail = copylen - 8;
177 const size_t nbits = Log2FloorNonZero(tail) - 1;
178 const size_t prefix = tail >> nbits;
179 const size_t code = (nbits << 1) + prefix + 28;
180 const size_t extra = tail - (prefix << nbits);
181 **commands = (uint32_t)(code | (extra << 8));
182 ++(*commands);
183 } else if (copylen < 136) {
184 const size_t tail = copylen - 8;
185 const size_t code = (tail >> 5) + 54;
186 const size_t extra = tail & 31;
187 **commands = (uint32_t)(code | (extra << 8));
188 ++(*commands);
189 **commands = 64;
190 ++(*commands);
191 } else if (copylen < 2120) {
192 const size_t tail = copylen - 72;
193 const size_t nbits = Log2FloorNonZero(tail);
194 const size_t code = nbits + 52;
195 const size_t extra = tail - ((size_t)1 << nbits);
196 **commands = (uint32_t)(code | (extra << 8));
197 ++(*commands);
198 **commands = 64;
199 ++(*commands);
200 } else {
201 const size_t extra = copylen - 2120;
202 **commands = (uint32_t)(63 | (extra << 8));
203 ++(*commands);
204 **commands = 64;
205 ++(*commands);
206 }
207 }
208
209 static BROTLI_INLINE void EmitDistance(uint32_t distance, uint32_t** commands) {
210 uint32_t d = distance + 3;
211 uint32_t nbits = Log2FloorNonZero(d) - 1;
212 const uint32_t prefix = (d >> nbits) & 1;
213 const uint32_t offset = (2 + prefix) << nbits;
214 const uint32_t distcode = 2 * (nbits - 1) + prefix + 80;
215 uint32_t extra = d - offset;
216 **commands = distcode | (extra << 8);
217 ++(*commands);
218 }
219
220 /* REQUIRES: len <= 1 << 20. */
221 static void BrotliStoreMetaBlockHeader(
222 size_t len, BROTLI_BOOL is_uncompressed, size_t* storage_ix,
223 uint8_t* storage) {
224 /* ISLAST */
225 BrotliWriteBits(1, 0, storage_ix, storage);
226 if (len <= (1U << 16)) {
227 /* MNIBBLES is 4 */
228 BrotliWriteBits(2, 0, storage_ix, storage);
229 BrotliWriteBits(16, len - 1, storage_ix, storage);
230 } else {
231 /* MNIBBLES is 5 */
232 BrotliWriteBits(2, 1, storage_ix, storage);
233 BrotliWriteBits(20, len - 1, storage_ix, storage);
234 }
235 /* ISUNCOMPRESSED */
236 BrotliWriteBits(1, (uint64_t)is_uncompressed, storage_ix, storage);
237 }
238
239 static BROTLI_INLINE void CreateCommands(const uint8_t* input,
240 size_t block_size, size_t input_size, const uint8_t* base_ip, int* table,
241 size_t table_bits, uint8_t** literals, uint32_t** commands) {
242 /* "ip" is the input pointer. */
243 const uint8_t* ip = input;
244 const size_t shift = 64u - table_bits;
245 const uint8_t* ip_end = input + block_size;
246 /* "next_emit" is a pointer to the first byte that is not covered by a
247 previous copy. Bytes between "next_emit" and the start of the next copy or
248 the end of the input will be emitted as literal bytes. */
249 const uint8_t* next_emit = input;
250
251 int last_distance = -1;
252 const size_t kInputMarginBytes = BROTLI_WINDOW_GAP;
253 const size_t kMinMatchLen = 6;
254
255 if (BROTLI_PREDICT_TRUE(block_size >= kInputMarginBytes)) {
256 /* For the last block, we need to keep a 16 bytes margin so that we can be
257 sure that all distances are at most window size - 16.
258 For all other blocks, we only need to keep a margin of 5 bytes so that
259 we don't go over the block size with a copy. */
260 const size_t len_limit = BROTLI_MIN(size_t, block_size - kMinMatchLen,
261 input_size - kInputMarginBytes);
262 const uint8_t* ip_limit = input + len_limit;
263
264 uint32_t next_hash;
265 for (next_hash = Hash(++ip, shift); ; ) {
266 /* Step 1: Scan forward in the input looking for a 6-byte-long match.
267 If we get close to exhausting the input then goto emit_remainder.
268
269 Heuristic match skipping: If 32 bytes are scanned with no matches
270 found, start looking only at every other byte. If 32 more bytes are
271 scanned, look at every third byte, etc.. When a match is found,
272 immediately go back to looking at every byte. This is a small loss
273 (~5% performance, ~0.1% density) for compressible data due to more
274 bookkeeping, but for non-compressible data (such as JPEG) it's a huge
275 win since the compressor quickly "realizes" the data is incompressible
276 and doesn't bother looking for matches everywhere.
277
278 The "skip" variable keeps track of how many bytes there are since the
279 last match; dividing it by 32 (ie. right-shifting by five) gives the
280 number of bytes to move ahead for each iteration. */
281 uint32_t skip = 32;
282
283 const uint8_t* next_ip = ip;
284 const uint8_t* candidate;
285
286 assert(next_emit < ip);
287 trawl:
288 do {
289 uint32_t hash = next_hash;
290 uint32_t bytes_between_hash_lookups = skip++ >> 5;
291 ip = next_ip;
292 assert(hash == Hash(ip, shift));
293 next_ip = ip + bytes_between_hash_lookups;
294 if (BROTLI_PREDICT_FALSE(next_ip > ip_limit)) {
295 goto emit_remainder;
296 }
297 next_hash = Hash(next_ip, shift);
298 candidate = ip - last_distance;
299 if (IsMatch(ip, candidate)) {
300 if (BROTLI_PREDICT_TRUE(candidate < ip)) {
301 table[hash] = (int)(ip - base_ip);
302 break;
303 }
304 }
305 candidate = base_ip + table[hash];
306 assert(candidate >= base_ip);
307 assert(candidate < ip);
308
309 table[hash] = (int)(ip - base_ip);
310 } while (BROTLI_PREDICT_TRUE(!IsMatch(ip, candidate)));
311
312 /* Check copy distance. If candidate is not feasible, continue search.
313 Checking is done outside of hot loop to reduce overhead. */
314 if (ip - candidate > MAX_DISTANCE) goto trawl;
315
316 /* Step 2: Emit the found match together with the literal bytes from
317 "next_emit", and then see if we can find a next match immediately
318 afterwards. Repeat until we find no match for the input
319 without emitting some literal bytes. */
320
321 {
322 /* We have a 6-byte match at ip, and we need to emit bytes in
323 [next_emit, ip). */
324 const uint8_t* base = ip;
325 size_t matched = 6 + FindMatchLengthWithLimit(
326 candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6);
327 int distance = (int)(base - candidate); /* > 0 */
328 int insert = (int)(base - next_emit);
329 ip += matched;
330 assert(0 == memcmp(base, candidate, matched));
331 EmitInsertLen((uint32_t)insert, commands);
332 memcpy(*literals, next_emit, (size_t)insert);
333 *literals += insert;
334 if (distance == last_distance) {
335 **commands = 64;
336 ++(*commands);
337 } else {
338 EmitDistance((uint32_t)distance, commands);
339 last_distance = distance;
340 }
341 EmitCopyLenLastDistance(matched, commands);
342
343 next_emit = ip;
344 if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
345 goto emit_remainder;
346 }
347 {
348 /* We could immediately start working at ip now, but to improve
349 compression we first update "table" with the hashes of some
350 positions within the last copy. */
351 uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5);
352 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
353 uint32_t cur_hash;
354 table[prev_hash] = (int)(ip - base_ip - 5);
355 prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
356 table[prev_hash] = (int)(ip - base_ip - 4);
357 prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
358 table[prev_hash] = (int)(ip - base_ip - 3);
359 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2);
360 cur_hash = HashBytesAtOffset(input_bytes, 2, shift);
361 prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
362 table[prev_hash] = (int)(ip - base_ip - 2);
363 prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
364 table[prev_hash] = (int)(ip - base_ip - 1);
365
366 candidate = base_ip + table[cur_hash];
367 table[cur_hash] = (int)(ip - base_ip);
368 }
369 }
370
371 while (ip - candidate <= MAX_DISTANCE && IsMatch(ip, candidate)) {
372 /* We have a 6-byte match at ip, and no need to emit any
373 literal bytes prior to ip. */
374 const uint8_t* base = ip;
375 size_t matched = 6 + FindMatchLengthWithLimit(
376 candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6);
377 ip += matched;
378 last_distance = (int)(base - candidate); /* > 0 */
379 assert(0 == memcmp(base, candidate, matched));
380 EmitCopyLen(matched, commands);
381 EmitDistance((uint32_t)last_distance, commands);
382
383 next_emit = ip;
384 if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) {
385 goto emit_remainder;
386 }
387 {
388 /* We could immediately start working at ip now, but to improve
389 compression we first update "table" with the hashes of some
390 positions within the last copy. */
391 uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5);
392 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
393 uint32_t cur_hash;
394 table[prev_hash] = (int)(ip - base_ip - 5);
395 prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
396 table[prev_hash] = (int)(ip - base_ip - 4);
397 prev_hash = HashBytesAtOffset(input_bytes, 2, shift);
398 table[prev_hash] = (int)(ip - base_ip - 3);
399 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2);
400 cur_hash = HashBytesAtOffset(input_bytes, 2, shift);
401 prev_hash = HashBytesAtOffset(input_bytes, 0, shift);
402 table[prev_hash] = (int)(ip - base_ip - 2);
403 prev_hash = HashBytesAtOffset(input_bytes, 1, shift);
404 table[prev_hash] = (int)(ip - base_ip - 1);
405
406 candidate = base_ip + table[cur_hash];
407 table[cur_hash] = (int)(ip - base_ip);
408 }
409 }
410
411 next_hash = Hash(++ip, shift);
412 }
413 }
414
415 emit_remainder:
416 assert(next_emit <= ip_end);
417 /* Emit the remaining bytes as literals. */
418 if (next_emit < ip_end) {
419 const uint32_t insert = (uint32_t)(ip_end - next_emit);
420 EmitInsertLen(insert, commands);
421 memcpy(*literals, next_emit, insert);
422 *literals += insert;
423 }
424 }
425
426 static void StoreCommands(MemoryManager* m,
427 const uint8_t* literals, const size_t num_literals,
428 const uint32_t* commands, const size_t num_commands,
429 size_t* storage_ix, uint8_t* storage) {
430 static const uint32_t kNumExtraBits[128] = {
431 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
432 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
433 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
434 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
435 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
436 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
437 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
438 };
439 static const uint32_t kInsertOffset[24] = {
440 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
441 1090, 2114, 6210, 22594,
442 };
443
444 uint8_t lit_depths[256];
445 uint16_t lit_bits[256];
446 uint32_t lit_histo[256] = { 0 };
447 uint8_t cmd_depths[128] = { 0 };
448 uint16_t cmd_bits[128] = { 0 };
449 uint32_t cmd_histo[128] = { 0 };
450 size_t i;
451 for (i = 0; i < num_literals; ++i) {
452 ++lit_histo[literals[i]];
453 }
454 BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo, num_literals,
455 /* max_bits = */ 8,
456 lit_depths, lit_bits,
457 storage_ix, storage);
458 if (BROTLI_IS_OOM(m)) return;
459
460 for (i = 0; i < num_commands; ++i) {
461 ++cmd_histo[commands[i] & 0xff];
462 }
463 cmd_histo[1] += 1;
464 cmd_histo[2] += 1;
465 cmd_histo[64] += 1;
466 cmd_histo[84] += 1;
467 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depths, cmd_bits,
468 storage_ix, storage);
469
470 for (i = 0; i < num_commands; ++i) {
471 const uint32_t cmd = commands[i];
472 const uint32_t code = cmd & 0xff;
473 const uint32_t extra = cmd >> 8;
474 BrotliWriteBits(cmd_depths[code], cmd_bits[code], storage_ix, storage);
475 BrotliWriteBits(kNumExtraBits[code], extra, storage_ix, storage);
476 if (code < 24) {
477 const uint32_t insert = kInsertOffset[code] + extra;
478 uint32_t j;
479 for (j = 0; j < insert; ++j) {
480 const uint8_t lit = *literals;
481 BrotliWriteBits(lit_depths[lit], lit_bits[lit], storage_ix, storage);
482 ++literals;
483 }
484 }
485 }
486 }
487
488 /* Acceptable loss for uncompressible speedup is 2% */
489 #define MIN_RATIO 0.98
490 #define SAMPLE_RATE 43
491
492 static BROTLI_BOOL ShouldCompress(
493 const uint8_t* input, size_t input_size, size_t num_literals) {
494 double corpus_size = (double)input_size;
495 if (num_literals < MIN_RATIO * corpus_size) {
496 return BROTLI_TRUE;
497 } else {
498 uint32_t literal_histo[256] = { 0 };
499 const double max_total_bit_cost = corpus_size * 8 * MIN_RATIO / SAMPLE_RATE;
500 size_t i;
501 for (i = 0; i < input_size; i += SAMPLE_RATE) {
502 ++literal_histo[input[i]];
503 }
504 return TO_BROTLI_BOOL(BitsEntropy(literal_histo, 256) < max_total_bit_cost);
505 }
506 }
507
508 static BROTLI_INLINE void BrotliCompressFragmentTwoPassImpl(
509 MemoryManager* m, const uint8_t* input, size_t input_size,
510 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf,
511 int* table, size_t table_bits, size_t* storage_ix, uint8_t* storage) {
512 /* Save the start of the first block for position and distance computations.
513 */
514 const uint8_t* base_ip = input;
515
516 while (input_size > 0) {
517 size_t block_size =
518 BROTLI_MIN(size_t, input_size, kCompressFragmentTwoPassBlockSize);
519 uint32_t* commands = command_buf;
520 uint8_t* literals = literal_buf;
521 size_t num_literals;
522 CreateCommands(input, block_size, input_size, base_ip, table, table_bits,
523 &literals, &commands);
524 num_literals = (size_t)(literals - literal_buf);
525 if (ShouldCompress(input, block_size, num_literals)) {
526 const size_t num_commands = (size_t)(commands - command_buf);
527 BrotliStoreMetaBlockHeader(block_size, 0, storage_ix, storage);
528 /* No block splits, no contexts. */
529 BrotliWriteBits(13, 0, storage_ix, storage);
530 StoreCommands(m, literal_buf, num_literals, command_buf, num_commands,
531 storage_ix, storage);
532 if (BROTLI_IS_OOM(m)) return;
533 } else {
534 /* Since we did not find many backward references and the entropy of
535 the data is close to 8 bits, we can simply emit an uncompressed block.
536 This makes compression speed of uncompressible data about 3x faster. */
537 BrotliStoreMetaBlockHeader(block_size, 1, storage_ix, storage);
538 *storage_ix = (*storage_ix + 7u) & ~7u;
539 memcpy(&storage[*storage_ix >> 3], input, block_size);
540 *storage_ix += block_size << 3;
541 storage[*storage_ix >> 3] = 0;
542 }
543 input += block_size;
544 input_size -= block_size;
545 }
546
547 if (is_last) {
548 BrotliWriteBits(1, 1, storage_ix, storage); /* islast */
549 BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */
550 *storage_ix = (*storage_ix + 7u) & ~7u;
551 }
552 }
553
554 #define FOR_TABLE_BITS_(X) \
555 X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17)
556
557 #define BAKE_METHOD_PARAM_(B) \
558 static BROTLI_NOINLINE void BrotliCompressFragmentTwoPassImpl ## B( \
559 MemoryManager* m, const uint8_t* input, size_t input_size, \
560 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, \
561 int* table, size_t* storage_ix, uint8_t* storage) { \
562 BrotliCompressFragmentTwoPassImpl(m, input, input_size, is_last, command_buf,\
563 literal_buf, table, B, storage_ix, storage); \
564 }
565 FOR_TABLE_BITS_(BAKE_METHOD_PARAM_)
566 #undef BAKE_METHOD_PARAM_
567
568 void BrotliCompressFragmentTwoPass(
569 MemoryManager* m, const uint8_t* input, size_t input_size,
570 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf,
571 int* table, size_t table_size, size_t* storage_ix, uint8_t* storage) {
572 const size_t table_bits = Log2FloorNonZero(table_size);
573 switch (table_bits) {
574 #define CASE_(B) \
575 case B: \
576 BrotliCompressFragmentTwoPassImpl ## B( \
577 m, input, input_size, is_last, command_buf, \
578 literal_buf, table, storage_ix, storage); \
579 break;
580 FOR_TABLE_BITS_(CASE_)
581 #undef CASE_
582 default: assert(0); break;
583 }
584 }
585
586 #undef FOR_TABLE_BITS_
587
588 #if defined(__cplusplus) || defined(c_plusplus)
589 } /* extern "C" */
590 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698