Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(232)

Side by Side Diff: third_party/brotli/enc/encode.cc

Issue 1956893002: Added brotli enc/ and tools/ directories. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Updated to most recent build tools Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « third_party/brotli/enc/encode.h ('k') | third_party/brotli/enc/encode_parallel.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /* Copyright 2013 Google Inc. All Rights Reserved.
2
3 Distributed under MIT license.
4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
5 */
6
7 // Implementation of Brotli compressor.
8
9 #include "./encode.h"
10
11 #include <algorithm>
12 #include <cstdlib> /* free, malloc */
13 #include <cstring> /* memset */
14 #include <limits>
15
16 #include "./backward_references.h"
17 #include "./bit_cost.h"
18 #include "./block_splitter.h"
19 #include "./brotli_bit_stream.h"
20 #include "./cluster.h"
21 #include "./context.h"
22 #include "./metablock.h"
23 #include "./transform.h"
24 #include "./compress_fragment.h"
25 #include "./compress_fragment_two_pass.h"
26 #include "./entropy_encode.h"
27 #include "./fast_log.h"
28 #include "./hash.h"
29 #include "./histogram.h"
30 #include "./prefix.h"
31 #include "./utf8_util.h"
32 #include "./write_bits.h"
33
34 namespace brotli {
35
36 static const int kMinQualityForBlockSplit = 4;
37 static const int kMinQualityForContextModeling = 5;
38 static const int kMinQualityForOptimizeHistograms = 4;
39 // For quality 2 there is no block splitting, so we buffer at most this much
40 // literals and commands.
41 static const size_t kMaxNumDelayedSymbols = 0x2fff;
42
43 #define COPY_ARRAY(dst, src) memcpy(dst, src, sizeof(src));
44
45 static void RecomputeDistancePrefixes(Command* cmds,
46 size_t num_commands,
47 uint32_t num_direct_distance_codes,
48 uint32_t distance_postfix_bits) {
49 if (num_direct_distance_codes == 0 && distance_postfix_bits == 0) {
50 return;
51 }
52 for (size_t i = 0; i < num_commands; ++i) {
53 Command* cmd = &cmds[i];
54 if (cmd->copy_len() && cmd->cmd_prefix_ >= 128) {
55 PrefixEncodeCopyDistance(cmd->DistanceCode(),
56 num_direct_distance_codes,
57 distance_postfix_bits,
58 &cmd->dist_prefix_,
59 &cmd->dist_extra_);
60 }
61 }
62 }
63
64 /* Wraps 64-bit input position to 32-bit ringbuffer position preserving
65 "not-a-first-lap" feature. */
66 static uint32_t WrapPosition(uint64_t position) {
67 uint32_t result = static_cast<uint32_t>(position);
68 if (position > (1u << 30)) {
69 result = (result & ((1u << 30) - 1)) | (1u << 30);
70 }
71 return result;
72 }
73
74 uint8_t* BrotliCompressor::GetBrotliStorage(size_t size) {
75 if (storage_size_ < size) {
76 delete[] storage_;
77 storage_ = new uint8_t[size];
78 storage_size_ = size;
79 }
80 return storage_;
81 }
82
83 static size_t MaxHashTableSize(int quality) {
84 return quality == 0 ? 1 << 15 : 1 << 17;
85 }
86
87 static size_t HashTableSize(size_t max_table_size, size_t input_size) {
88 size_t htsize = 256;
89 while (htsize < max_table_size && htsize < input_size) {
90 htsize <<= 1;
91 }
92 return htsize;
93 }
94
95 int* BrotliCompressor::GetHashTable(int quality,
96 size_t input_size,
97 size_t* table_size) {
98 // Use smaller hash table when input.size() is smaller, since we
99 // fill the table, incurring O(hash table size) overhead for
100 // compression, and if the input is short, we won't need that
101 // many hash table entries anyway.
102 const size_t max_table_size = MaxHashTableSize(quality);
103 assert(max_table_size >= 256);
104 size_t htsize = HashTableSize(max_table_size, input_size);
105
106 int* table;
107 if (htsize <= sizeof(small_table_) / sizeof(small_table_[0])) {
108 table = small_table_;
109 } else {
110 if (large_table_ == NULL) {
111 large_table_ = new int[max_table_size];
112 }
113 table = large_table_;
114 }
115
116 *table_size = htsize;
117 memset(table, 0, htsize * sizeof(*table));
118 return table;
119 }
120
121 static void EncodeWindowBits(int lgwin, uint8_t* last_byte,
122 uint8_t* last_byte_bits) {
123 if (lgwin == 16) {
124 *last_byte = 0;
125 *last_byte_bits = 1;
126 } else if (lgwin == 17) {
127 *last_byte = 1;
128 *last_byte_bits = 7;
129 } else if (lgwin > 17) {
130 *last_byte = static_cast<uint8_t>(((lgwin - 17) << 1) | 1);
131 *last_byte_bits = 4;
132 } else {
133 *last_byte = static_cast<uint8_t>(((lgwin - 8) << 4) | 1);
134 *last_byte_bits = 7;
135 }
136 }
137
138 // Initializes the command and distance prefix codes for the first block.
139 static void InitCommandPrefixCodes(uint8_t cmd_depths[128],
140 uint16_t cmd_bits[128],
141 uint8_t cmd_code[512],
142 size_t* cmd_code_numbits) {
143 static const uint8_t kDefaultCommandDepths[128] = {
144 0, 4, 4, 5, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8,
145 0, 0, 0, 4, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7,
146 7, 7, 10, 10, 10, 10, 10, 10, 0, 4, 4, 5, 5, 5, 6, 6,
147 7, 8, 8, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
148 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4,
150 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 7, 7, 7, 8, 10,
151 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
152 };
153 static const uint16_t kDefaultCommandBits[128] = {
154 0, 0, 8, 9, 3, 35, 7, 71,
155 39, 103, 23, 47, 175, 111, 239, 31,
156 0, 0, 0, 4, 12, 2, 10, 6,
157 13, 29, 11, 43, 27, 59, 87, 55,
158 15, 79, 319, 831, 191, 703, 447, 959,
159 0, 14, 1, 25, 5, 21, 19, 51,
160 119, 159, 95, 223, 479, 991, 63, 575,
161 127, 639, 383, 895, 255, 767, 511, 1023,
162 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 27, 59, 7, 39, 23, 55, 30, 1, 17, 9, 25, 5, 0, 8, 4, 12,
164 2, 10, 6, 21, 13, 29, 3, 19, 11, 15, 47, 31, 95, 63, 127, 255,
165 767, 2815, 1791, 3839, 511, 2559, 1535, 3583, 1023, 3071, 2047, 4095,
166 };
167 COPY_ARRAY(cmd_depths, kDefaultCommandDepths);
168 COPY_ARRAY(cmd_bits, kDefaultCommandBits);
169
170 // Initialize the pre-compressed form of the command and distance prefix
171 // codes.
172 static const uint8_t kDefaultCommandCode[] = {
173 0xff, 0x77, 0xd5, 0xbf, 0xe7, 0xde, 0xea, 0x9e, 0x51, 0x5d, 0xde, 0xc6,
174 0x70, 0x57, 0xbc, 0x58, 0x58, 0x58, 0xd8, 0xd8, 0x58, 0xd5, 0xcb, 0x8c,
175 0xea, 0xe0, 0xc3, 0x87, 0x1f, 0x83, 0xc1, 0x60, 0x1c, 0x67, 0xb2, 0xaa,
176 0x06, 0x83, 0xc1, 0x60, 0x30, 0x18, 0xcc, 0xa1, 0xce, 0x88, 0x54, 0x94,
177 0x46, 0xe1, 0xb0, 0xd0, 0x4e, 0xb2, 0xf7, 0x04, 0x00,
178 };
179 static const int kDefaultCommandCodeNumBits = 448;
180 COPY_ARRAY(cmd_code, kDefaultCommandCode);
181 *cmd_code_numbits = kDefaultCommandCodeNumBits;
182 }
183
184 // Decide about the context map based on the ability of the prediction
185 // ability of the previous byte UTF8-prefix on the next byte. The
186 // prediction ability is calculated as shannon entropy. Here we need
187 // shannon entropy instead of 'BitsEntropy' since the prefix will be
188 // encoded with the remaining 6 bits of the following byte, and
189 // BitsEntropy will assume that symbol to be stored alone using Huffman
190 // coding.
191 static void ChooseContextMap(int quality,
192 uint32_t* bigram_histo,
193 size_t* num_literal_contexts,
194 const uint32_t** literal_context_map) {
195 uint32_t monogram_histo[3] = { 0 };
196 uint32_t two_prefix_histo[6] = { 0 };
197 size_t total = 0;
198 for (size_t i = 0; i < 9; ++i) {
199 total += bigram_histo[i];
200 monogram_histo[i % 3] += bigram_histo[i];
201 size_t j = i;
202 if (j >= 6) {
203 j -= 6;
204 }
205 two_prefix_histo[j] += bigram_histo[i];
206 }
207 size_t dummy;
208 double entropy1 = ShannonEntropy(monogram_histo, 3, &dummy);
209 double entropy2 = (ShannonEntropy(two_prefix_histo, 3, &dummy) +
210 ShannonEntropy(two_prefix_histo + 3, 3, &dummy));
211 double entropy3 = 0;
212 for (size_t k = 0; k < 3; ++k) {
213 entropy3 += ShannonEntropy(bigram_histo + 3 * k, 3, &dummy);
214 }
215
216 assert(total != 0);
217 double scale = 1.0 / static_cast<double>(total);
218 entropy1 *= scale;
219 entropy2 *= scale;
220 entropy3 *= scale;
221
222 static const uint32_t kStaticContextMapContinuation[64] = {
223 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
225 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227 };
228 static const uint32_t kStaticContextMapSimpleUTF8[64] = {
229 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
233 };
234 if (quality < 7) {
235 // 3 context models is a bit slower, don't use it at lower qualities.
236 entropy3 = entropy1 * 10;
237 }
238 // If expected savings by symbol are less than 0.2 bits, skip the
239 // context modeling -- in exchange for faster decoding speed.
240 if (entropy1 - entropy2 < 0.2 &&
241 entropy1 - entropy3 < 0.2) {
242 *num_literal_contexts = 1;
243 } else if (entropy2 - entropy3 < 0.02) {
244 *num_literal_contexts = 2;
245 *literal_context_map = kStaticContextMapSimpleUTF8;
246 } else {
247 *num_literal_contexts = 3;
248 *literal_context_map = kStaticContextMapContinuation;
249 }
250 }
251
252 static void DecideOverLiteralContextModeling(
253 const uint8_t* input,
254 size_t start_pos,
255 size_t length,
256 size_t mask,
257 int quality,
258 ContextType* literal_context_mode,
259 size_t* num_literal_contexts,
260 const uint32_t** literal_context_map) {
261 if (quality < kMinQualityForContextModeling || length < 64) {
262 return;
263 }
264 // Gather bigram data of the UTF8 byte prefixes. To make the analysis of
265 // UTF8 data faster we only examine 64 byte long strides at every 4kB
266 // intervals.
267 const size_t end_pos = start_pos + length;
268 uint32_t bigram_prefix_histo[9] = { 0 };
269 for (; start_pos + 64 <= end_pos; start_pos += 4096) {
270 static const int lut[4] = { 0, 0, 1, 2 };
271 const size_t stride_end_pos = start_pos + 64;
272 int prev = lut[input[start_pos & mask] >> 6] * 3;
273 for (size_t pos = start_pos + 1; pos < stride_end_pos; ++pos) {
274 const uint8_t literal = input[pos & mask];
275 ++bigram_prefix_histo[prev + lut[literal >> 6]];
276 prev = lut[literal >> 6] * 3;
277 }
278 }
279 *literal_context_mode = CONTEXT_UTF8;
280 ChooseContextMap(quality, &bigram_prefix_histo[0], num_literal_contexts,
281 literal_context_map);
282 }
283
284 static bool ShouldCompress(const uint8_t* data,
285 const size_t mask,
286 const uint64_t last_flush_pos,
287 const size_t bytes,
288 const size_t num_literals,
289 const size_t num_commands) {
290 if (num_commands < (bytes >> 8) + 2) {
291 if (num_literals > 0.99 * static_cast<double>(bytes)) {
292 uint32_t literal_histo[256] = { 0 };
293 static const uint32_t kSampleRate = 13;
294 static const double kMinEntropy = 7.92;
295 const double bit_cost_threshold =
296 static_cast<double>(bytes) * kMinEntropy / kSampleRate;
297 size_t t = (bytes + kSampleRate - 1) / kSampleRate;
298 uint32_t pos = static_cast<uint32_t>(last_flush_pos);
299 for (size_t i = 0; i < t; i++) {
300 ++literal_histo[data[pos & mask]];
301 pos += kSampleRate;
302 }
303 if (BitsEntropy(literal_histo, 256) > bit_cost_threshold) {
304 return false;
305 }
306 }
307 }
308 return true;
309 }
310
311 static void WriteMetaBlockInternal(const uint8_t* data,
312 const size_t mask,
313 const uint64_t last_flush_pos,
314 const size_t bytes,
315 const bool is_last,
316 const int quality,
317 const bool font_mode,
318 const uint8_t prev_byte,
319 const uint8_t prev_byte2,
320 const size_t num_literals,
321 const size_t num_commands,
322 Command* commands,
323 const int* saved_dist_cache,
324 int* dist_cache,
325 size_t* storage_ix,
326 uint8_t* storage) {
327 if (bytes == 0) {
328 // Write the ISLAST and ISEMPTY bits.
329 WriteBits(2, 3, storage_ix, storage);
330 *storage_ix = (*storage_ix + 7u) & ~7u;
331 return;
332 }
333
334 if (!ShouldCompress(data, mask, last_flush_pos, bytes,
335 num_literals, num_commands)) {
336 // Restore the distance cache, as its last update by
337 // CreateBackwardReferences is now unused.
338 memcpy(dist_cache, saved_dist_cache, 4 * sizeof(dist_cache[0]));
339 StoreUncompressedMetaBlock(is_last, data,
340 WrapPosition(last_flush_pos), mask, bytes,
341 storage_ix, storage);
342 return;
343 }
344
345 const uint8_t last_byte = storage[0];
346 const uint8_t last_byte_bits = static_cast<uint8_t>(*storage_ix & 0xff);
347 uint32_t num_direct_distance_codes = 0;
348 uint32_t distance_postfix_bits = 0;
349 if (quality > 9 && font_mode) {
350 num_direct_distance_codes = 12;
351 distance_postfix_bits = 1;
352 RecomputeDistancePrefixes(commands,
353 num_commands,
354 num_direct_distance_codes,
355 distance_postfix_bits);
356 }
357 if (quality == 2) {
358 StoreMetaBlockFast(data, WrapPosition(last_flush_pos),
359 bytes, mask, is_last,
360 commands, num_commands,
361 storage_ix, storage);
362 } else if (quality < kMinQualityForBlockSplit) {
363 StoreMetaBlockTrivial(data, WrapPosition(last_flush_pos),
364 bytes, mask, is_last,
365 commands, num_commands,
366 storage_ix, storage);
367 } else {
368 MetaBlockSplit mb;
369 ContextType literal_context_mode = CONTEXT_UTF8;
370 if (quality <= 9) {
371 size_t num_literal_contexts = 1;
372 const uint32_t* literal_context_map = NULL;
373 DecideOverLiteralContextModeling(data, WrapPosition(last_flush_pos),
374 bytes, mask,
375 quality,
376 &literal_context_mode,
377 &num_literal_contexts,
378 &literal_context_map);
379 if (literal_context_map == NULL) {
380 BuildMetaBlockGreedy(data, WrapPosition(last_flush_pos), mask,
381 commands, num_commands, &mb);
382 } else {
383 BuildMetaBlockGreedyWithContexts(data, WrapPosition(last_flush_pos),
384 mask,
385 prev_byte, prev_byte2,
386 literal_context_mode,
387 num_literal_contexts,
388 literal_context_map,
389 commands, num_commands,
390 &mb);
391 }
392 } else {
393 if (!IsMostlyUTF8(data, WrapPosition(last_flush_pos), mask, bytes,
394 kMinUTF8Ratio)) {
395 literal_context_mode = CONTEXT_SIGNED;
396 }
397 BuildMetaBlock(data, WrapPosition(last_flush_pos), mask,
398 prev_byte, prev_byte2,
399 commands, num_commands,
400 literal_context_mode,
401 &mb);
402 }
403 if (quality >= kMinQualityForOptimizeHistograms) {
404 OptimizeHistograms(num_direct_distance_codes,
405 distance_postfix_bits,
406 &mb);
407 }
408 StoreMetaBlock(data, WrapPosition(last_flush_pos), bytes, mask,
409 prev_byte, prev_byte2,
410 is_last,
411 num_direct_distance_codes,
412 distance_postfix_bits,
413 literal_context_mode,
414 commands, num_commands,
415 mb,
416 storage_ix, storage);
417 }
418 if (bytes + 4 < (*storage_ix >> 3)) {
419 // Restore the distance cache and last byte.
420 memcpy(dist_cache, saved_dist_cache, 4 * sizeof(dist_cache[0]));
421 storage[0] = last_byte;
422 *storage_ix = last_byte_bits;
423 StoreUncompressedMetaBlock(is_last, data,
424 WrapPosition(last_flush_pos), mask,
425 bytes, storage_ix, storage);
426 }
427 }
428
429 BrotliCompressor::BrotliCompressor(BrotliParams params)
430 : params_(params),
431 hashers_(new Hashers()),
432 input_pos_(0),
433 num_commands_(0),
434 num_literals_(0),
435 last_insert_len_(0),
436 last_flush_pos_(0),
437 last_processed_pos_(0),
438 prev_byte_(0),
439 prev_byte2_(0),
440 storage_size_(0),
441 storage_(0),
442 large_table_(NULL),
443 cmd_code_numbits_(0),
444 command_buf_(NULL),
445 literal_buf_(NULL) {
446 // Sanitize params.
447 params_.quality = std::max(0, params_.quality);
448 if (params_.lgwin < kMinWindowBits) {
449 params_.lgwin = kMinWindowBits;
450 } else if (params_.lgwin > kMaxWindowBits) {
451 params_.lgwin = kMaxWindowBits;
452 }
453 if (params_.quality <= 1) {
454 params_.lgblock = params_.lgwin;
455 } else if (params_.quality < kMinQualityForBlockSplit) {
456 params_.lgblock = 14;
457 } else if (params_.lgblock == 0) {
458 params_.lgblock = 16;
459 if (params_.quality >= 9 && params_.lgwin > params_.lgblock) {
460 params_.lgblock = std::min(18, params_.lgwin);
461 }
462 } else {
463 params_.lgblock = std::min(kMaxInputBlockBits,
464 std::max(kMinInputBlockBits, params_.lgblock));
465 }
466
467 // Initialize input and literal cost ring buffers.
468 // We allocate at least lgwin + 1 bits for the ring buffer so that the newly
469 // added block fits there completely and we still get lgwin bits and at least
470 // read_block_size_bits + 1 bits because the copy tail length needs to be
471 // smaller than ringbuffer size.
472 int ringbuffer_bits = std::max(params_.lgwin + 1, params_.lgblock + 1);
473 ringbuffer_ = new RingBuffer(ringbuffer_bits, params_.lgblock);
474
475 commands_ = 0;
476 cmd_alloc_size_ = 0;
477
478 // Initialize last byte with stream header.
479 EncodeWindowBits(params_.lgwin, &last_byte_, &last_byte_bits_);
480
481 // Initialize distance cache.
482 dist_cache_[0] = 4;
483 dist_cache_[1] = 11;
484 dist_cache_[2] = 15;
485 dist_cache_[3] = 16;
486 // Save the state of the distance cache in case we need to restore it for
487 // emitting an uncompressed block.
488 memcpy(saved_dist_cache_, dist_cache_, sizeof(dist_cache_));
489
490 if (params_.quality == 0) {
491 InitCommandPrefixCodes(cmd_depths_, cmd_bits_,
492 cmd_code_, &cmd_code_numbits_);
493 } else if (params_.quality == 1) {
494 command_buf_ = new uint32_t[kCompressFragmentTwoPassBlockSize];
495 literal_buf_ = new uint8_t[kCompressFragmentTwoPassBlockSize];
496 }
497
498 // Initialize hashers.
499 hash_type_ = std::min(10, params_.quality);
500 hashers_->Init(hash_type_);
501 }
502
503 BrotliCompressor::~BrotliCompressor(void) {
504 delete[] storage_;
505 free(commands_);
506 delete ringbuffer_;
507 delete hashers_;
508 delete[] large_table_;
509 delete[] command_buf_;
510 delete[] literal_buf_;
511 }
512
513 void BrotliCompressor::CopyInputToRingBuffer(const size_t input_size,
514 const uint8_t* input_buffer) {
515 ringbuffer_->Write(input_buffer, input_size);
516 input_pos_ += input_size;
517
518 // TL;DR: If needed, initialize 7 more bytes in the ring buffer to make the
519 // hashing not depend on uninitialized data. This makes compression
520 // deterministic and it prevents uninitialized memory warnings in Valgrind.
521 // Even without erasing, the output would be valid (but nondeterministic).
522 //
523 // Background information: The compressor stores short (at most 8 bytes)
524 // substrings of the input already read in a hash table, and detects
525 // repetitions by looking up such substrings in the hash table. If it
526 // can find a substring, it checks whether the substring is really there
527 // in the ring buffer (or it's just a hash collision). Should the hash
528 // table become corrupt, this check makes sure that the output is
529 // still valid, albeit the compression ratio would be bad.
530 //
531 // The compressor populates the hash table from the ring buffer as it's
532 // reading new bytes from the input. However, at the last few indexes of
533 // the ring buffer, there are not enough bytes to build full-length
534 // substrings from. Since the hash table always contains full-length
535 // substrings, we erase with dummy 0s here to make sure that those
536 // substrings will contain 0s at the end instead of uninitialized
537 // data.
538 //
539 // Please note that erasing is not necessary (because the
540 // memory region is already initialized since he ring buffer
541 // has a `tail' that holds a copy of the beginning,) so we
542 // skip erasing if we have already gone around at least once in
543 // the ring buffer.
544 size_t pos = ringbuffer_->position();
545 // Only clear during the first round of ringbuffer writes. On
546 // subsequent rounds data in the ringbuffer would be affected.
547 if (pos <= ringbuffer_->mask()) {
548 // This is the first time when the ring buffer is being written.
549 // We clear 7 bytes just after the bytes that have been copied from
550 // the input buffer.
551 //
552 // The ringbuffer has a "tail" that holds a copy of the beginning,
553 // but only once the ring buffer has been fully written once, i.e.,
554 // pos <= mask. For the first time, we need to write values
555 // in this tail (where index may be larger than mask), so that
556 // we have exactly defined behavior and don't read un-initialized
557 // memory. Due to performance reasons, hashing reads data using a
558 // LOAD64, which can go 7 bytes beyond the bytes written in the
559 // ringbuffer.
560 memset(ringbuffer_->start() + pos, 0, 7);
561 }
562 }
563
564 void BrotliCompressor::BrotliSetCustomDictionary(
565 const size_t size, const uint8_t* dict) {
566 CopyInputToRingBuffer(size, dict);
567 last_flush_pos_ = size;
568 last_processed_pos_ = size;
569 if (size > 0) {
570 prev_byte_ = dict[size - 1];
571 }
572 if (size > 1) {
573 prev_byte2_ = dict[size - 2];
574 }
575 hashers_->PrependCustomDictionary(hash_type_, params_.lgwin, size, dict);
576 }
577
578 bool BrotliCompressor::WriteBrotliData(const bool is_last,
579 const bool force_flush,
580 size_t* out_size,
581 uint8_t** output) {
582 const uint64_t delta = input_pos_ - last_processed_pos_;
583 const uint8_t* data = ringbuffer_->start();
584 const uint32_t mask = ringbuffer_->mask();
585
586 if (delta > input_block_size()) {
587 return false;
588 }
589 const uint32_t bytes = static_cast<uint32_t>(delta);
590
591 if (params_.quality <= 1) {
592 if (delta == 0 && !is_last) {
593 // We have no new input data and we don't have to finish the stream, so
594 // nothing to do.
595 *out_size = 0;
596 return true;
597 }
598 const size_t max_out_size = 2 * bytes + 500;
599 uint8_t* storage = GetBrotliStorage(max_out_size);
600 storage[0] = last_byte_;
601 size_t storage_ix = last_byte_bits_;
602 size_t table_size;
603 int* table = GetHashTable(params_.quality, bytes, &table_size);
604 if (params_.quality == 0) {
605 BrotliCompressFragmentFast(
606 &data[WrapPosition(last_processed_pos_) & mask],
607 bytes, is_last,
608 table, table_size,
609 cmd_depths_, cmd_bits_,
610 &cmd_code_numbits_, cmd_code_,
611 &storage_ix, storage);
612 } else {
613 BrotliCompressFragmentTwoPass(
614 &data[WrapPosition(last_processed_pos_) & mask],
615 bytes, is_last,
616 command_buf_, literal_buf_,
617 table, table_size,
618 &storage_ix, storage);
619 }
620 last_byte_ = storage[storage_ix >> 3];
621 last_byte_bits_ = storage_ix & 7u;
622 last_processed_pos_ = input_pos_;
623 *output = &storage[0];
624 *out_size = storage_ix >> 3;
625 return true;
626 }
627
628 // Theoretical max number of commands is 1 per 2 bytes.
629 size_t newsize = num_commands_ + bytes / 2 + 1;
630 if (newsize > cmd_alloc_size_) {
631 // Reserve a bit more memory to allow merging with a next block
632 // without realloc: that would impact speed.
633 newsize += (bytes / 4) + 16;
634 cmd_alloc_size_ = newsize;
635 commands_ =
636 static_cast<Command*>(realloc(commands_, sizeof(Command) * newsize));
637 }
638
639 CreateBackwardReferences(bytes, WrapPosition(last_processed_pos_),
640 is_last, data, mask,
641 params_.quality,
642 params_.lgwin,
643 hashers_,
644 hash_type_,
645 dist_cache_,
646 &last_insert_len_,
647 &commands_[num_commands_],
648 &num_commands_,
649 &num_literals_);
650
651 size_t max_length = std::min<size_t>(mask + 1, 1u << kMaxInputBlockBits);
652 const size_t max_literals = max_length / 8;
653 const size_t max_commands = max_length / 8;
654 if (!is_last && !force_flush &&
655 (params_.quality >= kMinQualityForBlockSplit ||
656 (num_literals_ + num_commands_ < kMaxNumDelayedSymbols)) &&
657 num_literals_ < max_literals &&
658 num_commands_ < max_commands &&
659 input_pos_ + input_block_size() <= last_flush_pos_ + max_length) {
660 // Merge with next input block. Everything will happen later.
661 last_processed_pos_ = input_pos_;
662 *out_size = 0;
663 return true;
664 }
665
666 // Create the last insert-only command.
667 if (last_insert_len_ > 0) {
668 brotli::Command cmd(last_insert_len_);
669 commands_[num_commands_++] = cmd;
670 num_literals_ += last_insert_len_;
671 last_insert_len_ = 0;
672 }
673
674 if (!is_last && input_pos_ == last_flush_pos_) {
675 // We have no new input data and we don't have to finish the stream, so
676 // nothing to do.
677 *out_size = 0;
678 return true;
679 }
680 assert(input_pos_ >= last_flush_pos_);
681 assert(input_pos_ > last_flush_pos_ || is_last);
682 assert(input_pos_ - last_flush_pos_ <= 1u << 24);
683 const uint32_t metablock_size =
684 static_cast<uint32_t>(input_pos_ - last_flush_pos_);
685 const size_t max_out_size = 2 * metablock_size + 500;
686 uint8_t* storage = GetBrotliStorage(max_out_size);
687 storage[0] = last_byte_;
688 size_t storage_ix = last_byte_bits_;
689 bool font_mode = params_.mode == BrotliParams::MODE_FONT;
690 WriteMetaBlockInternal(
691 data, mask, last_flush_pos_, metablock_size, is_last, params_.quality,
692 font_mode, prev_byte_, prev_byte2_, num_literals_, num_commands_,
693 commands_, saved_dist_cache_, dist_cache_, &storage_ix, storage);
694 last_byte_ = storage[storage_ix >> 3];
695 last_byte_bits_ = storage_ix & 7u;
696 last_flush_pos_ = input_pos_;
697 last_processed_pos_ = input_pos_;
698 if (last_flush_pos_ > 0) {
699 prev_byte_ = data[(static_cast<uint32_t>(last_flush_pos_) - 1) & mask];
700 }
701 if (last_flush_pos_ > 1) {
702 prev_byte2_ = data[(static_cast<uint32_t>(last_flush_pos_) - 2) & mask];
703 }
704 num_commands_ = 0;
705 num_literals_ = 0;
706 // Save the state of the distance cache in case we need to restore it for
707 // emitting an uncompressed block.
708 memcpy(saved_dist_cache_, dist_cache_, sizeof(dist_cache_));
709 *output = &storage[0];
710 *out_size = storage_ix >> 3;
711 return true;
712 }
713
714 bool BrotliCompressor::WriteMetaBlock(const size_t input_size,
715 const uint8_t* input_buffer,
716 const bool is_last,
717 size_t* encoded_size,
718 uint8_t* encoded_buffer) {
719 CopyInputToRingBuffer(input_size, input_buffer);
720 size_t out_size = 0;
721 uint8_t* output;
722 if (!WriteBrotliData(is_last, /* force_flush = */ true, &out_size, &output) ||
723 out_size > *encoded_size) {
724 return false;
725 }
726 if (out_size > 0) {
727 memcpy(encoded_buffer, output, out_size);
728 }
729 *encoded_size = out_size;
730 return true;
731 }
732
733 bool BrotliCompressor::WriteMetadata(const size_t input_size,
734 const uint8_t* input_buffer,
735 const bool is_last,
736 size_t* encoded_size,
737 uint8_t* encoded_buffer) {
738 if (input_size > (1 << 24) || input_size + 6 > *encoded_size) {
739 return false;
740 }
741 uint64_t hdr_buffer_data[2];
742 uint8_t* hdr_buffer = reinterpret_cast<uint8_t*>(&hdr_buffer_data[0]);
743 size_t storage_ix = last_byte_bits_;
744 hdr_buffer[0] = last_byte_;
745 WriteBits(1, 0, &storage_ix, hdr_buffer);
746 WriteBits(2, 3, &storage_ix, hdr_buffer);
747 WriteBits(1, 0, &storage_ix, hdr_buffer);
748 if (input_size == 0) {
749 WriteBits(2, 0, &storage_ix, hdr_buffer);
750 *encoded_size = (storage_ix + 7u) >> 3;
751 memcpy(encoded_buffer, hdr_buffer, *encoded_size);
752 } else {
753 uint32_t nbits = (input_size == 1) ? 0 : (Log2FloorNonZero(
754 static_cast<uint32_t>(input_size) - 1) + 1);
755 uint32_t nbytes = (nbits + 7) / 8;
756 WriteBits(2, nbytes, &storage_ix, hdr_buffer);
757 WriteBits(8 * nbytes, input_size - 1, &storage_ix, hdr_buffer);
758 size_t hdr_size = (storage_ix + 7u) >> 3;
759 memcpy(encoded_buffer, hdr_buffer, hdr_size);
760 memcpy(&encoded_buffer[hdr_size], input_buffer, input_size);
761 *encoded_size = hdr_size + input_size;
762 }
763 if (is_last) {
764 encoded_buffer[(*encoded_size)++] = 3;
765 }
766 last_byte_ = 0;
767 last_byte_bits_ = 0;
768 return true;
769 }
770
771 bool BrotliCompressor::FinishStream(
772 size_t* encoded_size, uint8_t* encoded_buffer) {
773 return WriteMetaBlock(0, NULL, true, encoded_size, encoded_buffer);
774 }
775
776 static int BrotliCompressBufferQuality10(int lgwin,
777 size_t input_size,
778 const uint8_t* input_buffer,
779 size_t* encoded_size,
780 uint8_t* encoded_buffer) {
781 const size_t mask = std::numeric_limits<size_t>::max() >> 1;
782 assert(input_size <= mask + 1);
783 const size_t max_backward_limit = (1 << lgwin) - 16;
784 int dist_cache[4] = { 4, 11, 15, 16 };
785 int saved_dist_cache[4] = { 4, 11, 15, 16 };
786 int ok = 1;
787 const size_t max_out_size = *encoded_size;
788 size_t total_out_size = 0;
789 uint8_t last_byte;
790 uint8_t last_byte_bits;
791 EncodeWindowBits(lgwin, &last_byte, &last_byte_bits);
792
793 Hashers::H10* hasher = new Hashers::H10;
794 const size_t hasher_eff_size = std::min(input_size, max_backward_limit + 16);
795 hasher->Init(lgwin, 0, hasher_eff_size, true);
796
797 const int lgblock = std::min(18, lgwin);
798 const int lgmetablock = std::min(24, lgwin + 1);
799 const size_t max_block_size = static_cast<size_t>(1) << lgblock;
800 const size_t max_metablock_size = static_cast<size_t>(1) << lgmetablock;
801 const size_t max_literals_per_metablock = max_metablock_size / 8;
802 const size_t max_commands_per_metablock = max_metablock_size / 8;
803 size_t metablock_start = 0;
804 uint8_t prev_byte = 0;
805 uint8_t prev_byte2 = 0;
806 while (ok && metablock_start < input_size) {
807 const size_t metablock_end =
808 std::min(input_size, metablock_start + max_metablock_size);
809 const size_t expected_num_commands =
810 (metablock_end - metablock_start) / 12 + 16;
811 Command* commands = 0;
812 size_t num_commands = 0;
813 size_t last_insert_len = 0;
814 size_t num_literals = 0;
815 size_t metablock_size = 0;
816 size_t cmd_alloc_size = 0;
817
818 for (size_t block_start = metablock_start; block_start < metablock_end; ) {
819 size_t block_size = std::min(metablock_end - block_start, max_block_size);
820 ZopfliNode* nodes = new ZopfliNode[block_size + 1];
821 std::vector<uint32_t> path;
822 hasher->StitchToPreviousBlock(block_size, block_start,
823 input_buffer, mask);
824 ZopfliComputeShortestPath(block_size, block_start, input_buffer, mask,
825 max_backward_limit, dist_cache,
826 hasher, nodes, &path);
827 // We allocate a command buffer in the first iteration of this loop that
828 // will be likely big enough for the whole metablock, so that for most
829 // inputs we will not have to reallocate in later iterations. We do the
830 // allocation here and not before the loop, because if the input is small,
831 // this will be allocated after the zopfli cost model is freed, so this
832 // will not increase peak memory usage.
833 // TODO: If the first allocation is too small, increase command
834 // buffer size exponentially.
835 size_t new_cmd_alloc_size = std::max(expected_num_commands,
836 num_commands + path.size() + 1);
837 if (cmd_alloc_size != new_cmd_alloc_size) {
838 cmd_alloc_size = new_cmd_alloc_size;
839 commands = static_cast<Command*>(
840 realloc(commands, cmd_alloc_size * sizeof(Command)));
841 }
842 ZopfliCreateCommands(block_size, block_start, max_backward_limit, path,
843 &nodes[0], dist_cache, &last_insert_len,
844 &commands[num_commands], &num_literals);
845 num_commands += path.size();
846 block_start += block_size;
847 metablock_size += block_size;
848 delete[] nodes;
849 if (num_literals > max_literals_per_metablock ||
850 num_commands > max_commands_per_metablock) {
851 break;
852 }
853 }
854
855 if (last_insert_len > 0) {
856 Command cmd(last_insert_len);
857 commands[num_commands++] = cmd;
858 num_literals += last_insert_len;
859 }
860
861 const bool is_last = (metablock_start + metablock_size == input_size);
862 uint8_t* storage = NULL;
863 size_t storage_ix = last_byte_bits;
864
865 if (metablock_size == 0) {
866 // Write the ISLAST and ISEMPTY bits.
867 storage = new uint8_t[16];
868 storage[0] = last_byte;
869 WriteBits(2, 3, &storage_ix, storage);
870 storage_ix = (storage_ix + 7u) & ~7u;
871 } else if (!ShouldCompress(input_buffer, mask, metablock_start,
872 metablock_size, num_literals, num_commands)) {
873 // Restore the distance cache, as its last update by
874 // CreateBackwardReferences is now unused.
875 memcpy(dist_cache, saved_dist_cache, 4 * sizeof(dist_cache[0]));
876 storage = new uint8_t[metablock_size + 16];
877 storage[0] = last_byte;
878 StoreUncompressedMetaBlock(is_last, input_buffer,
879 metablock_start, mask, metablock_size,
880 &storage_ix, storage);
881 } else {
882 uint32_t num_direct_distance_codes = 0;
883 uint32_t distance_postfix_bits = 0;
884 MetaBlockSplit mb;
885 ContextType literal_context_mode = CONTEXT_UTF8;
886 if (!IsMostlyUTF8(
887 input_buffer, metablock_start, mask, metablock_size,
888 kMinUTF8Ratio)) {
889 literal_context_mode = CONTEXT_SIGNED;
890 }
891 BuildMetaBlock(input_buffer, metablock_start, mask,
892 prev_byte, prev_byte2,
893 commands, num_commands,
894 literal_context_mode,
895 &mb);
896 OptimizeHistograms(num_direct_distance_codes,
897 distance_postfix_bits,
898 &mb);
899 const size_t max_out_metablock_size = 2 * metablock_size + 500;
900 storage = new uint8_t[max_out_metablock_size];
901 storage[0] = last_byte;
902 StoreMetaBlock(input_buffer, metablock_start, metablock_size, mask,
903 prev_byte, prev_byte2,
904 is_last,
905 num_direct_distance_codes,
906 distance_postfix_bits,
907 literal_context_mode,
908 commands, num_commands,
909 mb,
910 &storage_ix, storage);
911 if (metablock_size + 4 < (storage_ix >> 3)) {
912 // Restore the distance cache and last byte.
913 memcpy(dist_cache, saved_dist_cache, 4 * sizeof(dist_cache[0]));
914 storage[0] = last_byte;
915 storage_ix = last_byte_bits;
916 StoreUncompressedMetaBlock(is_last, input_buffer,
917 metablock_start, mask,
918 metablock_size, &storage_ix, storage);
919 }
920 }
921 last_byte = storage[storage_ix >> 3];
922 last_byte_bits = storage_ix & 7u;
923 metablock_start += metablock_size;
924 prev_byte = input_buffer[metablock_start - 1];
925 prev_byte2 = input_buffer[metablock_start - 2];
926 // Save the state of the distance cache in case we need to restore it for
927 // emitting an uncompressed block.
928 memcpy(saved_dist_cache, dist_cache, 4 * sizeof(dist_cache[0]));
929
930 const size_t out_size = storage_ix >> 3;
931 total_out_size += out_size;
932 if (total_out_size <= max_out_size) {
933 memcpy(encoded_buffer, storage, out_size);
934 encoded_buffer += out_size;
935 } else {
936 ok = 0;
937 }
938 delete[] storage;
939 free(commands);
940 }
941
942 *encoded_size = total_out_size;
943 delete hasher;
944 return ok;
945 }
946
947 int BrotliCompressBuffer(BrotliParams params,
948 size_t input_size,
949 const uint8_t* input_buffer,
950 size_t* encoded_size,
951 uint8_t* encoded_buffer) {
952 if (*encoded_size == 0) {
953 // Output buffer needs at least one byte.
954 return 0;
955 }
956 if (input_size == 0) {
957 // Handle the special case of empty input.
958 *encoded_size = 1;
959 *encoded_buffer = 6;
960 return 1;
961 }
962 if (params.quality == 10) {
963 // TODO: Implement this direct path for all quality levels.
964 const int lgwin = std::min(24, std::max(16, params.lgwin));
965 return BrotliCompressBufferQuality10(lgwin, input_size, input_buffer,
966 encoded_size, encoded_buffer);
967 }
968 BrotliMemIn in(input_buffer, input_size);
969 BrotliMemOut out(encoded_buffer, *encoded_size);
970 if (!BrotliCompress(params, &in, &out)) {
971 return 0;
972 }
973 *encoded_size = out.position();
974 return 1;
975 }
976
977 static bool BrotliInIsFinished(BrotliIn* r) {
978 size_t read_bytes;
979 return r->Read(0, &read_bytes) == NULL;
980 }
981
982 static const uint8_t* BrotliInReadAndCheckEnd(const size_t block_size,
983 BrotliIn* r,
984 size_t* bytes_read,
985 bool* is_last) {
986 *bytes_read = 0;
987 const uint8_t* data = reinterpret_cast<const uint8_t*>(
988 r->Read(block_size, bytes_read));
989 assert((data == NULL) == (*bytes_read == 0));
990 *is_last = BrotliInIsFinished(r);
991 return data;
992 }
993
994 static bool CopyOneBlockToRingBuffer(BrotliIn* r,
995 BrotliCompressor* compressor,
996 size_t* bytes_read,
997 bool* is_last) {
998 const size_t block_size = compressor->input_block_size();
999 const uint8_t* data = BrotliInReadAndCheckEnd(block_size, r,
1000 bytes_read, is_last);
1001 if (data == NULL) {
1002 return *is_last;
1003 }
1004 compressor->CopyInputToRingBuffer(*bytes_read, data);
1005
1006 // Read more bytes until block_size is filled or an EOF (data == NULL) is
1007 // received. This is useful to get deterministic compressed output for the
1008 // same input no matter how r->Read splits the input to chunks.
1009 for (size_t remaining = block_size - *bytes_read; remaining > 0; ) {
1010 size_t more_bytes_read = 0;
1011 data = BrotliInReadAndCheckEnd(remaining, r, &more_bytes_read, is_last);
1012 if (data == NULL) {
1013 return *is_last;
1014 }
1015 compressor->CopyInputToRingBuffer(more_bytes_read, data);
1016 *bytes_read += more_bytes_read;
1017 remaining -= more_bytes_read;
1018 }
1019 return true;
1020 }
1021
1022
1023 int BrotliCompress(BrotliParams params, BrotliIn* in, BrotliOut* out) {
1024 return BrotliCompressWithCustomDictionary(0, 0, params, in, out);
1025 }
1026
1027 // Reads the provided input in 'block_size' blocks. Only the last read can be
1028 // smaller than 'block_size'.
1029 class BrotliBlockReader {
1030 public:
1031 explicit BrotliBlockReader(size_t block_size)
1032 : block_size_(block_size), buf_(NULL) {}
1033 ~BrotliBlockReader(void) { delete[] buf_; }
1034
1035 const uint8_t* Read(BrotliIn* in, size_t* bytes_read, bool* is_last) {
1036 *bytes_read = 0;
1037 const uint8_t* data = BrotliInReadAndCheckEnd(block_size_, in,
1038 bytes_read, is_last);
1039 if (data == NULL || *bytes_read == block_size_ || *is_last) {
1040 // If we could get the whole block in one read, or it is the last block,
1041 // we just return the pointer to the data without copying.
1042 return data;
1043 }
1044 // If the data comes in smaller chunks, we need to copy it into an internal
1045 // buffer until we get a whole block or reach the last chunk.
1046 if (buf_ == NULL) {
1047 buf_ = new uint8_t[block_size_];
1048 }
1049 memcpy(buf_, data, *bytes_read);
1050 do {
1051 size_t cur_bytes_read = 0;
1052 data = BrotliInReadAndCheckEnd(block_size_ - *bytes_read, in,
1053 &cur_bytes_read, is_last);
1054 if (data == NULL) {
1055 return *is_last ? buf_ : NULL;
1056 }
1057 memcpy(&buf_[*bytes_read], data, cur_bytes_read);
1058 *bytes_read += cur_bytes_read;
1059 } while (*bytes_read < block_size_ && !*is_last);
1060 return buf_;
1061 }
1062
1063 private:
1064 const size_t block_size_;
1065 uint8_t* buf_;
1066 };
1067
1068 int BrotliCompressWithCustomDictionary(size_t dictsize, const uint8_t* dict,
1069 BrotliParams params,
1070 BrotliIn* in, BrotliOut* out) {
1071 if (params.quality <= 1) {
1072 const int quality = std::max(0, params.quality);
1073 const int lgwin = std::min(kMaxWindowBits,
1074 std::max(kMinWindowBits, params.lgwin));
1075 uint8_t* storage = NULL;
1076 int* table = NULL;
1077 uint32_t* command_buf = NULL;
1078 uint8_t* literal_buf = NULL;
1079 uint8_t cmd_depths[128];
1080 uint16_t cmd_bits[128];
1081 uint8_t cmd_code[512];
1082 size_t cmd_code_numbits;
1083 if (quality == 0) {
1084 InitCommandPrefixCodes(cmd_depths, cmd_bits, cmd_code, &cmd_code_numbits);
1085 }
1086 uint8_t last_byte;
1087 uint8_t last_byte_bits;
1088 EncodeWindowBits(lgwin, &last_byte, &last_byte_bits);
1089 BrotliBlockReader r(1u << lgwin);
1090 int ok = 1;
1091 bool is_last = false;
1092 while (ok && !is_last) {
1093 // Read next block of input.
1094 size_t bytes;
1095 const uint8_t* data = r.Read(in, &bytes, &is_last);
1096 if (data == NULL) {
1097 if (!is_last) {
1098 ok = 0;
1099 break;
1100 }
1101 assert(bytes == 0);
1102 }
1103 // Set up output storage.
1104 const size_t max_out_size = 2 * bytes + 500;
1105 if (storage == NULL) {
1106 storage = new uint8_t[max_out_size];
1107 }
1108 storage[0] = last_byte;
1109 size_t storage_ix = last_byte_bits;
1110 // Set up hash table.
1111 size_t htsize = HashTableSize(MaxHashTableSize(quality), bytes);
1112 if (table == NULL) {
1113 table = new int[htsize];
1114 }
1115 memset(table, 0, htsize * sizeof(table[0]));
1116 // Set up command and literal buffers for two pass mode.
1117 if (quality == 1 && command_buf == NULL) {
1118 size_t buf_size = std::min(bytes, kCompressFragmentTwoPassBlockSize);
1119 command_buf = new uint32_t[buf_size];
1120 literal_buf = new uint8_t[buf_size];
1121 }
1122 // Do the actual compression.
1123 if (quality == 0) {
1124 BrotliCompressFragmentFast(data, bytes, is_last, table, htsize,
1125 cmd_depths, cmd_bits,
1126 &cmd_code_numbits, cmd_code,
1127 &storage_ix, storage);
1128 } else {
1129 BrotliCompressFragmentTwoPass(data, bytes, is_last,
1130 command_buf, literal_buf,
1131 table, htsize,
1132 &storage_ix, storage);
1133 }
1134 // Save last bytes to stitch it together with the next output block.
1135 last_byte = storage[storage_ix >> 3];
1136 last_byte_bits = storage_ix & 7u;
1137 // Write output block.
1138 size_t out_bytes = storage_ix >> 3;
1139 if (out_bytes > 0 && !out->Write(storage, out_bytes)) {
1140 ok = 0;
1141 break;
1142 }
1143 }
1144 delete[] storage;
1145 delete[] table;
1146 delete[] command_buf;
1147 delete[] literal_buf;
1148 return ok;
1149 }
1150
1151 size_t in_bytes = 0;
1152 size_t out_bytes = 0;
1153 uint8_t* output;
1154 bool final_block = false;
1155 BrotliCompressor compressor(params);
1156 if (dictsize != 0) compressor.BrotliSetCustomDictionary(dictsize, dict);
1157 while (!final_block) {
1158 if (!CopyOneBlockToRingBuffer(in, &compressor, &in_bytes, &final_block)) {
1159 return false;
1160 }
1161 out_bytes = 0;
1162 if (!compressor.WriteBrotliData(final_block,
1163 /* force_flush = */ false,
1164 &out_bytes, &output)) {
1165 return false;
1166 }
1167 if (out_bytes > 0 && !out->Write(output, out_bytes)) {
1168 return false;
1169 }
1170 }
1171 return true;
1172 }
1173
1174
1175 } // namespace brotli
OLDNEW
« no previous file with comments | « third_party/brotli/enc/encode.h ('k') | third_party/brotli/enc/encode_parallel.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698