OLD | NEW |
1 /* Copyright 2015 Google Inc. All Rights Reserved. | 1 /* Copyright 2015 Google Inc. All Rights Reserved. |
2 | 2 |
3 Distributed under MIT license. | 3 Distributed under MIT license. |
4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT | 4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
5 */ | 5 */ |
6 | 6 |
7 // Function for fast encoding of an input fragment, independently from the input | 7 /* Function for fast encoding of an input fragment, independently from the input |
8 // history. This function uses two-pass processing: in the first pass we save | 8 history. This function uses two-pass processing: in the first pass we save |
9 // the found backward matches and literal bytes into a buffer, and in the | 9 the found backward matches and literal bytes into a buffer, and in the |
10 // second pass we emit them into the bit stream using prefix codes built based | 10 second pass we emit them into the bit stream using prefix codes built based |
11 // on the actual command and literal byte histograms. | 11 on the actual command and literal byte histograms. */ |
12 | 12 |
13 #include "./compress_fragment_two_pass.h" | 13 #include "./compress_fragment_two_pass.h" |
14 | 14 |
15 #include <algorithm> | 15 #include <string.h> /* memcmp, memcpy, memset */ |
16 | 16 |
| 17 #include "../common/constants.h" |
| 18 #include <brotli/types.h> |
| 19 #include "./bit_cost.h" |
17 #include "./brotli_bit_stream.h" | 20 #include "./brotli_bit_stream.h" |
18 #include "./bit_cost.h" | |
19 #include "./entropy_encode.h" | 21 #include "./entropy_encode.h" |
20 #include "./fast_log.h" | 22 #include "./fast_log.h" |
21 #include "./find_match_length.h" | 23 #include "./find_match_length.h" |
| 24 #include "./memory.h" |
22 #include "./port.h" | 25 #include "./port.h" |
23 #include "./types.h" | |
24 #include "./write_bits.h" | 26 #include "./write_bits.h" |
25 | 27 |
26 namespace brotli { | |
27 | 28 |
28 // kHashMul32 multiplier has these properties: | 29 #if defined(__cplusplus) || defined(c_plusplus) |
29 // * The multiplier must be odd. Otherwise we may lose the highest bit. | 30 extern "C" { |
30 // * No long streaks of 1s or 0s. | 31 #endif |
31 // * There is no effort to ensure that it is a prime, the oddity is enough | 32 |
32 // for this use. | 33 /* Same as MaxBackwardLimit(18) */ |
33 // * The number has been tuned heuristically against compression benchmarks. | 34 #define MAX_DISTANCE ((1 << 18) - BROTLI_WINDOW_GAP) |
| 35 |
| 36 /* kHashMul32 multiplier has these properties: |
| 37 * The multiplier must be odd. Otherwise we may lose the highest bit. |
| 38 * No long streaks of ones or zeros. |
| 39 * There is no effort to ensure that it is a prime, the oddity is enough |
| 40 for this use. |
| 41 * The number has been tuned heuristically against compression benchmarks. */ |
34 static const uint32_t kHashMul32 = 0x1e35a7bd; | 42 static const uint32_t kHashMul32 = 0x1e35a7bd; |
35 | 43 |
36 static inline uint32_t Hash(const uint8_t* p, size_t shift) { | 44 static BROTLI_INLINE uint32_t Hash(const uint8_t* p, size_t shift) { |
37 const uint64_t h = (BROTLI_UNALIGNED_LOAD64(p) << 16) * kHashMul32; | 45 const uint64_t h = (BROTLI_UNALIGNED_LOAD64(p) << 16) * kHashMul32; |
38 return static_cast<uint32_t>(h >> shift); | 46 return (uint32_t)(h >> shift); |
39 } | 47 } |
40 | 48 |
41 static inline uint32_t HashBytesAtOffset(uint64_t v, int offset, size_t shift) { | 49 static BROTLI_INLINE uint32_t HashBytesAtOffset( |
| 50 uint64_t v, int offset, size_t shift) { |
42 assert(offset >= 0); | 51 assert(offset >= 0); |
43 assert(offset <= 2); | 52 assert(offset <= 2); |
44 const uint64_t h = ((v >> (8 * offset)) << 16) * kHashMul32; | 53 { |
45 return static_cast<uint32_t>(h >> shift); | 54 const uint64_t h = ((v >> (8 * offset)) << 16) * kHashMul32; |
| 55 return (uint32_t)(h >> shift); |
| 56 } |
46 } | 57 } |
47 | 58 |
48 static inline int IsMatch(const uint8_t* p1, const uint8_t* p2) { | 59 static BROTLI_INLINE BROTLI_BOOL IsMatch(const uint8_t* p1, const uint8_t* p2) { |
49 return (BROTLI_UNALIGNED_LOAD32(p1) == BROTLI_UNALIGNED_LOAD32(p2) && | 60 return TO_BROTLI_BOOL( |
50 p1[4] == p2[4] && | 61 BROTLI_UNALIGNED_LOAD32(p1) == BROTLI_UNALIGNED_LOAD32(p2) && |
51 p1[5] == p2[5]); | 62 p1[4] == p2[4] && |
| 63 p1[5] == p2[5]); |
52 } | 64 } |
53 | 65 |
54 // Builds a command and distance prefix code (each 64 symbols) into "depth" and | 66 /* Builds a command and distance prefix code (each 64 symbols) into "depth" and |
55 // "bits" based on "histogram" and stores it into the bit stream. | 67 "bits" based on "histogram" and stores it into the bit stream. */ |
56 static void BuildAndStoreCommandPrefixCode( | 68 static void BuildAndStoreCommandPrefixCode( |
57 const uint32_t histogram[128], | 69 const uint32_t histogram[128], |
58 uint8_t depth[128], uint16_t bits[128], | 70 uint8_t depth[128], uint16_t bits[128], |
59 size_t* storage_ix, uint8_t* storage) { | 71 size_t* storage_ix, uint8_t* storage) { |
60 // Tree size for building a tree over 64 symbols is 2 * 64 + 1. | 72 /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ |
61 static const size_t kTreeSize = 129; | 73 HuffmanTree tree[129]; |
62 HuffmanTree tree[kTreeSize]; | 74 uint8_t cmd_depth[BROTLI_NUM_COMMAND_SYMBOLS] = { 0 }; |
63 CreateHuffmanTree(histogram, 64, 15, tree, depth); | |
64 CreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]); | |
65 // We have to jump through a few hoopes here in order to compute | |
66 // the command bits because the symbols are in a different order than in | |
67 // the full alphabet. This looks complicated, but having the symbols | |
68 // in this order in the command bits saves a few branches in the Emit* | |
69 // functions. | |
70 uint8_t cmd_depth[64]; | |
71 uint16_t cmd_bits[64]; | 75 uint16_t cmd_bits[64]; |
| 76 BrotliCreateHuffmanTree(histogram, 64, 15, tree, depth); |
| 77 BrotliCreateHuffmanTree(&histogram[64], 64, 14, tree, &depth[64]); |
| 78 /* We have to jump through a few hoops here in order to compute |
| 79 the command bits because the symbols are in a different order than in |
| 80 the full alphabet. This looks complicated, but having the symbols |
| 81 in this order in the command bits saves a few branches in the Emit* |
| 82 functions. */ |
72 memcpy(cmd_depth, depth + 24, 24); | 83 memcpy(cmd_depth, depth + 24, 24); |
73 memcpy(cmd_depth + 24, depth, 8); | 84 memcpy(cmd_depth + 24, depth, 8); |
74 memcpy(cmd_depth + 32, depth + 48, 8); | 85 memcpy(cmd_depth + 32, depth + 48, 8); |
75 memcpy(cmd_depth + 40, depth + 8, 8); | 86 memcpy(cmd_depth + 40, depth + 8, 8); |
76 memcpy(cmd_depth + 48, depth + 56, 8); | 87 memcpy(cmd_depth + 48, depth + 56, 8); |
77 memcpy(cmd_depth + 56, depth + 16, 8); | 88 memcpy(cmd_depth + 56, depth + 16, 8); |
78 ConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits); | 89 BrotliConvertBitDepthsToSymbols(cmd_depth, 64, cmd_bits); |
79 memcpy(bits, cmd_bits + 24, 16); | 90 memcpy(bits, cmd_bits + 24, 16); |
80 memcpy(bits + 8, cmd_bits + 40, 16); | 91 memcpy(bits + 8, cmd_bits + 40, 16); |
81 memcpy(bits + 16, cmd_bits + 56, 16); | 92 memcpy(bits + 16, cmd_bits + 56, 16); |
82 memcpy(bits + 24, cmd_bits, 48); | 93 memcpy(bits + 24, cmd_bits, 48); |
83 memcpy(bits + 48, cmd_bits + 32, 16); | 94 memcpy(bits + 48, cmd_bits + 32, 16); |
84 memcpy(bits + 56, cmd_bits + 48, 16); | 95 memcpy(bits + 56, cmd_bits + 48, 16); |
85 ConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]); | 96 BrotliConvertBitDepthsToSymbols(&depth[64], 64, &bits[64]); |
86 { | 97 { |
87 // Create the bit length array for the full command alphabet. | 98 /* Create the bit length array for the full command alphabet. */ |
88 uint8_t cmd_depth[704] = { 0 }; | 99 size_t i; |
| 100 memset(cmd_depth, 0, 64); /* only 64 first values were used */ |
89 memcpy(cmd_depth, depth + 24, 8); | 101 memcpy(cmd_depth, depth + 24, 8); |
90 memcpy(cmd_depth + 64, depth + 32, 8); | 102 memcpy(cmd_depth + 64, depth + 32, 8); |
91 memcpy(cmd_depth + 128, depth + 40, 8); | 103 memcpy(cmd_depth + 128, depth + 40, 8); |
92 memcpy(cmd_depth + 192, depth + 48, 8); | 104 memcpy(cmd_depth + 192, depth + 48, 8); |
93 memcpy(cmd_depth + 384, depth + 56, 8); | 105 memcpy(cmd_depth + 384, depth + 56, 8); |
94 for (size_t i = 0; i < 8; ++i) { | 106 for (i = 0; i < 8; ++i) { |
95 cmd_depth[128 + 8 * i] = depth[i]; | 107 cmd_depth[128 + 8 * i] = depth[i]; |
96 cmd_depth[256 + 8 * i] = depth[8 + i]; | 108 cmd_depth[256 + 8 * i] = depth[8 + i]; |
97 cmd_depth[448 + 8 * i] = depth[16 + i]; | 109 cmd_depth[448 + 8 * i] = depth[16 + i]; |
98 } | 110 } |
99 StoreHuffmanTree(cmd_depth, 704, tree, storage_ix, storage); | 111 BrotliStoreHuffmanTree( |
| 112 cmd_depth, BROTLI_NUM_COMMAND_SYMBOLS, tree, storage_ix, storage); |
100 } | 113 } |
101 StoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage); | 114 BrotliStoreHuffmanTree(&depth[64], 64, tree, storage_ix, storage); |
102 } | 115 } |
103 | 116 |
104 inline void EmitInsertLen(uint32_t insertlen, uint32_t** commands) { | 117 static BROTLI_INLINE void EmitInsertLen( |
| 118 uint32_t insertlen, uint32_t** commands) { |
105 if (insertlen < 6) { | 119 if (insertlen < 6) { |
106 **commands = insertlen; | 120 **commands = insertlen; |
107 } else if (insertlen < 130) { | 121 } else if (insertlen < 130) { |
108 insertlen -= 2; | 122 const uint32_t tail = insertlen - 2; |
109 const uint32_t nbits = Log2FloorNonZero(insertlen) - 1u; | 123 const uint32_t nbits = Log2FloorNonZero(tail) - 1u; |
110 const uint32_t prefix = insertlen >> nbits; | 124 const uint32_t prefix = tail >> nbits; |
111 const uint32_t inscode = (nbits << 1) + prefix + 2; | 125 const uint32_t inscode = (nbits << 1) + prefix + 2; |
112 const uint32_t extra = insertlen - (prefix << nbits); | 126 const uint32_t extra = tail - (prefix << nbits); |
113 **commands = inscode | (extra << 8); | 127 **commands = inscode | (extra << 8); |
114 } else if (insertlen < 2114) { | 128 } else if (insertlen < 2114) { |
115 insertlen -= 66; | 129 const uint32_t tail = insertlen - 66; |
116 const uint32_t nbits = Log2FloorNonZero(insertlen); | 130 const uint32_t nbits = Log2FloorNonZero(tail); |
117 const uint32_t code = nbits + 10; | 131 const uint32_t code = nbits + 10; |
118 const uint32_t extra = insertlen - (1 << nbits); | 132 const uint32_t extra = tail - (1u << nbits); |
119 **commands = code | (extra << 8); | 133 **commands = code | (extra << 8); |
120 } else if (insertlen < 6210) { | 134 } else if (insertlen < 6210) { |
121 const uint32_t extra = insertlen - 2114; | 135 const uint32_t extra = insertlen - 2114; |
122 **commands = 21 | (extra << 8); | 136 **commands = 21 | (extra << 8); |
123 } else if (insertlen < 22594) { | 137 } else if (insertlen < 22594) { |
124 const uint32_t extra = insertlen - 6210; | 138 const uint32_t extra = insertlen - 6210; |
125 **commands = 22 | (extra << 8); | 139 **commands = 22 | (extra << 8); |
126 } else { | 140 } else { |
127 const uint32_t extra = insertlen - 22594; | 141 const uint32_t extra = insertlen - 22594; |
128 **commands = 23 | (extra << 8); | 142 **commands = 23 | (extra << 8); |
129 } | 143 } |
130 ++(*commands); | 144 ++(*commands); |
131 } | 145 } |
132 | 146 |
133 inline void EmitCopyLen(size_t copylen, uint32_t** commands) { | 147 static BROTLI_INLINE void EmitCopyLen(size_t copylen, uint32_t** commands) { |
134 if (copylen < 10) { | 148 if (copylen < 10) { |
135 **commands = static_cast<uint32_t>(copylen + 38); | 149 **commands = (uint32_t)(copylen + 38); |
136 } else if (copylen < 134) { | 150 } else if (copylen < 134) { |
137 copylen -= 6; | 151 const size_t tail = copylen - 6; |
138 const size_t nbits = Log2FloorNonZero(copylen) - 1; | 152 const size_t nbits = Log2FloorNonZero(tail) - 1; |
139 const size_t prefix = copylen >> nbits; | 153 const size_t prefix = tail >> nbits; |
140 const size_t code = (nbits << 1) + prefix + 44; | 154 const size_t code = (nbits << 1) + prefix + 44; |
141 const size_t extra = copylen - (prefix << nbits); | 155 const size_t extra = tail - (prefix << nbits); |
142 **commands = static_cast<uint32_t>(code | (extra << 8)); | 156 **commands = (uint32_t)(code | (extra << 8)); |
143 } else if (copylen < 2118) { | 157 } else if (copylen < 2118) { |
144 copylen -= 70; | 158 const size_t tail = copylen - 70; |
145 const size_t nbits = Log2FloorNonZero(copylen); | 159 const size_t nbits = Log2FloorNonZero(tail); |
146 const size_t code = nbits + 52; | 160 const size_t code = nbits + 52; |
147 const size_t extra = copylen - (1 << nbits); | 161 const size_t extra = tail - ((size_t)1 << nbits); |
148 **commands = static_cast<uint32_t>(code | (extra << 8)); | 162 **commands = (uint32_t)(code | (extra << 8)); |
149 } else { | 163 } else { |
150 const size_t extra = copylen - 2118; | 164 const size_t extra = copylen - 2118; |
151 **commands = static_cast<uint32_t>(63 | (extra << 8)); | 165 **commands = (uint32_t)(63 | (extra << 8)); |
152 } | 166 } |
153 ++(*commands); | 167 ++(*commands); |
154 } | 168 } |
155 | 169 |
156 inline void EmitCopyLenLastDistance(size_t copylen, uint32_t** commands) { | 170 static BROTLI_INLINE void EmitCopyLenLastDistance( |
| 171 size_t copylen, uint32_t** commands) { |
157 if (copylen < 12) { | 172 if (copylen < 12) { |
158 **commands = static_cast<uint32_t>(copylen + 20); | 173 **commands = (uint32_t)(copylen + 20); |
159 ++(*commands); | 174 ++(*commands); |
160 } else if (copylen < 72) { | 175 } else if (copylen < 72) { |
161 copylen -= 8; | 176 const size_t tail = copylen - 8; |
162 const size_t nbits = Log2FloorNonZero(copylen) - 1; | 177 const size_t nbits = Log2FloorNonZero(tail) - 1; |
163 const size_t prefix = copylen >> nbits; | 178 const size_t prefix = tail >> nbits; |
164 const size_t code = (nbits << 1) + prefix + 28; | 179 const size_t code = (nbits << 1) + prefix + 28; |
165 const size_t extra = copylen - (prefix << nbits); | 180 const size_t extra = tail - (prefix << nbits); |
166 **commands = static_cast<uint32_t>(code | (extra << 8)); | 181 **commands = (uint32_t)(code | (extra << 8)); |
167 ++(*commands); | 182 ++(*commands); |
168 } else if (copylen < 136) { | 183 } else if (copylen < 136) { |
169 copylen -= 8; | 184 const size_t tail = copylen - 8; |
170 const size_t code = (copylen >> 5) + 54; | 185 const size_t code = (tail >> 5) + 54; |
171 const size_t extra = copylen & 31; | 186 const size_t extra = tail & 31; |
172 **commands = static_cast<uint32_t>(code | (extra << 8)); | 187 **commands = (uint32_t)(code | (extra << 8)); |
173 ++(*commands); | 188 ++(*commands); |
174 **commands = 64; | 189 **commands = 64; |
175 ++(*commands); | 190 ++(*commands); |
176 } else if (copylen < 2120) { | 191 } else if (copylen < 2120) { |
177 copylen -= 72; | 192 const size_t tail = copylen - 72; |
178 const size_t nbits = Log2FloorNonZero(copylen); | 193 const size_t nbits = Log2FloorNonZero(tail); |
179 const size_t code = nbits + 52; | 194 const size_t code = nbits + 52; |
180 const size_t extra = copylen - (1 << nbits); | 195 const size_t extra = tail - ((size_t)1 << nbits); |
181 **commands = static_cast<uint32_t>(code | (extra << 8)); | 196 **commands = (uint32_t)(code | (extra << 8)); |
182 ++(*commands); | 197 ++(*commands); |
183 **commands = 64; | 198 **commands = 64; |
184 ++(*commands); | 199 ++(*commands); |
185 } else { | 200 } else { |
186 const size_t extra = copylen - 2120; | 201 const size_t extra = copylen - 2120; |
187 **commands = static_cast<uint32_t>(63 | (extra << 8)); | 202 **commands = (uint32_t)(63 | (extra << 8)); |
188 ++(*commands); | 203 ++(*commands); |
189 **commands = 64; | 204 **commands = 64; |
190 ++(*commands); | 205 ++(*commands); |
191 } | 206 } |
192 } | 207 } |
193 | 208 |
194 inline void EmitDistance(uint32_t distance, uint32_t** commands) { | 209 static BROTLI_INLINE void EmitDistance(uint32_t distance, uint32_t** commands) { |
195 distance += 3; | 210 uint32_t d = distance + 3; |
196 uint32_t nbits = Log2FloorNonZero(distance) - 1; | 211 uint32_t nbits = Log2FloorNonZero(d) - 1; |
197 const uint32_t prefix = (distance >> nbits) & 1; | 212 const uint32_t prefix = (d >> nbits) & 1; |
198 const uint32_t offset = (2 + prefix) << nbits; | 213 const uint32_t offset = (2 + prefix) << nbits; |
199 const uint32_t distcode = 2 * (nbits - 1) + prefix + 80; | 214 const uint32_t distcode = 2 * (nbits - 1) + prefix + 80; |
200 uint32_t extra = distance - offset; | 215 uint32_t extra = d - offset; |
201 **commands = distcode | (extra << 8); | 216 **commands = distcode | (extra << 8); |
202 ++(*commands); | 217 ++(*commands); |
203 } | 218 } |
204 | 219 |
205 // REQUIRES: len <= 1 << 20. | 220 /* REQUIRES: len <= 1 << 20. */ |
206 static void StoreMetaBlockHeader( | 221 static void BrotliStoreMetaBlockHeader( |
207 size_t len, bool is_uncompressed, size_t* storage_ix, uint8_t* storage) { | 222 size_t len, BROTLI_BOOL is_uncompressed, size_t* storage_ix, |
208 // ISLAST | 223 uint8_t* storage) { |
209 WriteBits(1, 0, storage_ix, storage); | 224 /* ISLAST */ |
| 225 BrotliWriteBits(1, 0, storage_ix, storage); |
210 if (len <= (1U << 16)) { | 226 if (len <= (1U << 16)) { |
211 // MNIBBLES is 4 | 227 /* MNIBBLES is 4 */ |
212 WriteBits(2, 0, storage_ix, storage); | 228 BrotliWriteBits(2, 0, storage_ix, storage); |
213 WriteBits(16, len - 1, storage_ix, storage); | 229 BrotliWriteBits(16, len - 1, storage_ix, storage); |
214 } else { | 230 } else { |
215 // MNIBBLES is 5 | 231 /* MNIBBLES is 5 */ |
216 WriteBits(2, 1, storage_ix, storage); | 232 BrotliWriteBits(2, 1, storage_ix, storage); |
217 WriteBits(20, len - 1, storage_ix, storage); | 233 BrotliWriteBits(20, len - 1, storage_ix, storage); |
218 } | 234 } |
219 // ISUNCOMPRESSED | 235 /* ISUNCOMPRESSED */ |
220 WriteBits(1, is_uncompressed, storage_ix, storage); | 236 BrotliWriteBits(1, (uint64_t)is_uncompressed, storage_ix, storage); |
221 } | 237 } |
222 | 238 |
223 static void CreateCommands(const uint8_t* input, size_t block_size, | 239 static BROTLI_INLINE void CreateCommands(const uint8_t* input, |
224 size_t input_size, const uint8_t* base_ip, | 240 size_t block_size, size_t input_size, const uint8_t* base_ip, int* table, |
225 int* table, size_t table_size, | 241 size_t table_bits, uint8_t** literals, uint32_t** commands) { |
226 uint8_t** literals, uint32_t** commands) { | 242 /* "ip" is the input pointer. */ |
227 // "ip" is the input pointer. | |
228 const uint8_t* ip = input; | 243 const uint8_t* ip = input; |
229 assert(table_size); | 244 const size_t shift = 64u - table_bits; |
230 assert(table_size <= (1u << 31)); | |
231 assert((table_size & (table_size - 1)) == 0); // table must be power of two | |
232 const size_t shift = 64u - Log2FloorNonZero(table_size); | |
233 assert(table_size - 1 == static_cast<size_t>( | |
234 MAKE_UINT64_T(0xFFFFFFFF, 0xFFFFFF) >> shift)); | |
235 const uint8_t* ip_end = input + block_size; | 245 const uint8_t* ip_end = input + block_size; |
236 // "next_emit" is a pointer to the first byte that is not covered by a | 246 /* "next_emit" is a pointer to the first byte that is not covered by a |
237 // previous copy. Bytes between "next_emit" and the start of the next copy or | 247 previous copy. Bytes between "next_emit" and the start of the next copy or |
238 // the end of the input will be emitted as literal bytes. | 248 the end of the input will be emitted as literal bytes. */ |
239 const uint8_t* next_emit = input; | 249 const uint8_t* next_emit = input; |
240 | 250 |
241 int last_distance = -1; | 251 int last_distance = -1; |
242 const size_t kInputMarginBytes = 16; | 252 const size_t kInputMarginBytes = BROTLI_WINDOW_GAP; |
243 const size_t kMinMatchLen = 6; | 253 const size_t kMinMatchLen = 6; |
244 if (PREDICT_TRUE(block_size >= kInputMarginBytes)) { | 254 |
245 // For the last block, we need to keep a 16 bytes margin so that we can be | 255 if (BROTLI_PREDICT_TRUE(block_size >= kInputMarginBytes)) { |
246 // sure that all distances are at most window size - 16. | 256 /* For the last block, we need to keep a 16 bytes margin so that we can be |
247 // For all other blocks, we only need to keep a margin of 5 bytes so that | 257 sure that all distances are at most window size - 16. |
248 // we don't go over the block size with a copy. | 258 For all other blocks, we only need to keep a margin of 5 bytes so that |
249 const size_t len_limit = std::min(block_size - kMinMatchLen, | 259 we don't go over the block size with a copy. */ |
250 input_size - kInputMarginBytes); | 260 const size_t len_limit = BROTLI_MIN(size_t, block_size - kMinMatchLen, |
| 261 input_size - kInputMarginBytes); |
251 const uint8_t* ip_limit = input + len_limit; | 262 const uint8_t* ip_limit = input + len_limit; |
252 | 263 |
253 for (uint32_t next_hash = Hash(++ip, shift); ; ) { | 264 uint32_t next_hash; |
254 assert(next_emit < ip); | 265 for (next_hash = Hash(++ip, shift); ; ) { |
255 // Step 1: Scan forward in the input looking for a 6-byte-long match. | 266 /* Step 1: Scan forward in the input looking for a 6-byte-long match. |
256 // If we get close to exhausting the input then goto emit_remainder. | 267 If we get close to exhausting the input then goto emit_remainder. |
257 // | 268 |
258 // Heuristic match skipping: If 32 bytes are scanned with no matches | 269 Heuristic match skipping: If 32 bytes are scanned with no matches |
259 // found, start looking only at every other byte. If 32 more bytes are | 270 found, start looking only at every other byte. If 32 more bytes are |
260 // scanned, look at every third byte, etc.. When a match is found, | 271 scanned, look at every third byte, etc.. When a match is found, |
261 // immediately go back to looking at every byte. This is a small loss | 272 immediately go back to looking at every byte. This is a small loss |
262 // (~5% performance, ~0.1% density) for compressible data due to more | 273 (~5% performance, ~0.1% density) for compressible data due to more |
263 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge | 274 bookkeeping, but for non-compressible data (such as JPEG) it's a huge |
264 // win since the compressor quickly "realizes" the data is incompressible | 275 win since the compressor quickly "realizes" the data is incompressible |
265 // and doesn't bother looking for matches everywhere. | 276 and doesn't bother looking for matches everywhere. |
266 // | 277 |
267 // The "skip" variable keeps track of how many bytes there are since the | 278 The "skip" variable keeps track of how many bytes there are since the |
268 // last match; dividing it by 32 (ie. right-shifting by five) gives the | 279 last match; dividing it by 32 (ie. right-shifting by five) gives the |
269 // number of bytes to move ahead for each iteration. | 280 number of bytes to move ahead for each iteration. */ |
270 uint32_t skip = 32; | 281 uint32_t skip = 32; |
271 | 282 |
272 const uint8_t* next_ip = ip; | 283 const uint8_t* next_ip = ip; |
273 const uint8_t* candidate; | 284 const uint8_t* candidate; |
| 285 |
| 286 assert(next_emit < ip); |
| 287 trawl: |
274 do { | 288 do { |
| 289 uint32_t hash = next_hash; |
| 290 uint32_t bytes_between_hash_lookups = skip++ >> 5; |
275 ip = next_ip; | 291 ip = next_ip; |
276 uint32_t hash = next_hash; | |
277 assert(hash == Hash(ip, shift)); | 292 assert(hash == Hash(ip, shift)); |
278 uint32_t bytes_between_hash_lookups = skip++ >> 5; | |
279 next_ip = ip + bytes_between_hash_lookups; | 293 next_ip = ip + bytes_between_hash_lookups; |
280 if (PREDICT_FALSE(next_ip > ip_limit)) { | 294 if (BROTLI_PREDICT_FALSE(next_ip > ip_limit)) { |
281 goto emit_remainder; | 295 goto emit_remainder; |
282 } | 296 } |
283 next_hash = Hash(next_ip, shift); | 297 next_hash = Hash(next_ip, shift); |
284 candidate = ip - last_distance; | 298 candidate = ip - last_distance; |
285 if (IsMatch(ip, candidate)) { | 299 if (IsMatch(ip, candidate)) { |
286 if (PREDICT_TRUE(candidate < ip)) { | 300 if (BROTLI_PREDICT_TRUE(candidate < ip)) { |
287 table[hash] = static_cast<int>(ip - base_ip); | 301 table[hash] = (int)(ip - base_ip); |
288 break; | 302 break; |
289 } | 303 } |
290 } | 304 } |
291 candidate = base_ip + table[hash]; | 305 candidate = base_ip + table[hash]; |
292 assert(candidate >= base_ip); | 306 assert(candidate >= base_ip); |
293 assert(candidate < ip); | 307 assert(candidate < ip); |
294 | 308 |
295 table[hash] = static_cast<int>(ip - base_ip); | 309 table[hash] = (int)(ip - base_ip); |
296 } while (PREDICT_TRUE(!IsMatch(ip, candidate))); | 310 } while (BROTLI_PREDICT_TRUE(!IsMatch(ip, candidate))); |
297 | 311 |
298 // Step 2: Emit the found match together with the literal bytes from | 312 /* Check copy distance. If candidate is not feasible, continue search. |
299 // "next_emit", and then see if we can find a next macth immediately | 313 Checking is done outside of hot loop to reduce overhead. */ |
300 // afterwards. Repeat until we find no match for the input | 314 if (ip - candidate > MAX_DISTANCE) goto trawl; |
301 // without emitting some literal bytes. | 315 |
302 uint64_t input_bytes; | 316 /* Step 2: Emit the found match together with the literal bytes from |
| 317 "next_emit", and then see if we can find a next match immediately |
| 318 afterwards. Repeat until we find no match for the input |
| 319 without emitting some literal bytes. */ |
303 | 320 |
304 { | 321 { |
305 // We have a 6-byte match at ip, and we need to emit bytes in | 322 /* We have a 6-byte match at ip, and we need to emit bytes in |
306 // [next_emit, ip). | 323 [next_emit, ip). */ |
307 const uint8_t* base = ip; | 324 const uint8_t* base = ip; |
308 size_t matched = 6 + FindMatchLengthWithLimit( | 325 size_t matched = 6 + FindMatchLengthWithLimit( |
309 candidate + 6, ip + 6, static_cast<size_t>(ip_end - ip) - 6); | 326 candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6); |
| 327 int distance = (int)(base - candidate); /* > 0 */ |
| 328 int insert = (int)(base - next_emit); |
310 ip += matched; | 329 ip += matched; |
311 int distance = static_cast<int>(base - candidate); /* > 0 */ | |
312 int insert = static_cast<int>(base - next_emit); | |
313 assert(0 == memcmp(base, candidate, matched)); | 330 assert(0 == memcmp(base, candidate, matched)); |
314 EmitInsertLen(static_cast<uint32_t>(insert), commands); | 331 EmitInsertLen((uint32_t)insert, commands); |
315 memcpy(*literals, next_emit, static_cast<size_t>(insert)); | 332 memcpy(*literals, next_emit, (size_t)insert); |
316 *literals += insert; | 333 *literals += insert; |
317 if (distance == last_distance) { | 334 if (distance == last_distance) { |
318 **commands = 64; | 335 **commands = 64; |
319 ++(*commands); | 336 ++(*commands); |
320 } else { | 337 } else { |
321 EmitDistance(static_cast<uint32_t>(distance), commands); | 338 EmitDistance((uint32_t)distance, commands); |
322 last_distance = distance; | 339 last_distance = distance; |
323 } | 340 } |
324 EmitCopyLenLastDistance(matched, commands); | 341 EmitCopyLenLastDistance(matched, commands); |
325 | 342 |
326 next_emit = ip; | 343 next_emit = ip; |
327 if (PREDICT_FALSE(ip >= ip_limit)) { | 344 if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) { |
328 goto emit_remainder; | 345 goto emit_remainder; |
329 } | 346 } |
330 // We could immediately start working at ip now, but to improve | 347 { |
331 // compression we first update "table" with the hashes of some positions | 348 /* We could immediately start working at ip now, but to improve |
332 // within the last copy. | 349 compression we first update "table" with the hashes of some |
333 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); | 350 positions within the last copy. */ |
334 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | 351 uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); |
335 table[prev_hash] = static_cast<int>(ip - base_ip - 5); | 352 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
336 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | 353 uint32_t cur_hash; |
337 table[prev_hash] = static_cast<int>(ip - base_ip - 4); | 354 table[prev_hash] = (int)(ip - base_ip - 5); |
338 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); | 355 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
339 table[prev_hash] = static_cast<int>(ip - base_ip - 3); | 356 table[prev_hash] = (int)(ip - base_ip - 4); |
340 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); | 357 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); |
341 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | 358 table[prev_hash] = (int)(ip - base_ip - 3); |
342 table[prev_hash] = static_cast<int>(ip - base_ip - 2); | 359 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); |
343 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | 360 cur_hash = HashBytesAtOffset(input_bytes, 2, shift); |
344 table[prev_hash] = static_cast<int>(ip - base_ip - 1); | 361 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
| 362 table[prev_hash] = (int)(ip - base_ip - 2); |
| 363 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
| 364 table[prev_hash] = (int)(ip - base_ip - 1); |
345 | 365 |
346 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 2, shift); | 366 candidate = base_ip + table[cur_hash]; |
347 candidate = base_ip + table[cur_hash]; | 367 table[cur_hash] = (int)(ip - base_ip); |
348 table[cur_hash] = static_cast<int>(ip - base_ip); | 368 } |
349 } | 369 } |
350 | 370 |
351 while (IsMatch(ip, candidate)) { | 371 while (ip - candidate <= MAX_DISTANCE && IsMatch(ip, candidate)) { |
352 // We have a 6-byte match at ip, and no need to emit any | 372 /* We have a 6-byte match at ip, and no need to emit any |
353 // literal bytes prior to ip. | 373 literal bytes prior to ip. */ |
354 const uint8_t* base = ip; | 374 const uint8_t* base = ip; |
355 size_t matched = 6 + FindMatchLengthWithLimit( | 375 size_t matched = 6 + FindMatchLengthWithLimit( |
356 candidate + 6, ip + 6, static_cast<size_t>(ip_end - ip) - 6); | 376 candidate + 6, ip + 6, (size_t)(ip_end - ip) - 6); |
357 ip += matched; | 377 ip += matched; |
358 last_distance = static_cast<int>(base - candidate); /* > 0 */ | 378 last_distance = (int)(base - candidate); /* > 0 */ |
359 assert(0 == memcmp(base, candidate, matched)); | 379 assert(0 == memcmp(base, candidate, matched)); |
360 EmitCopyLen(matched, commands); | 380 EmitCopyLen(matched, commands); |
361 EmitDistance(static_cast<uint32_t>(last_distance), commands); | 381 EmitDistance((uint32_t)last_distance, commands); |
362 | 382 |
363 next_emit = ip; | 383 next_emit = ip; |
364 if (PREDICT_FALSE(ip >= ip_limit)) { | 384 if (BROTLI_PREDICT_FALSE(ip >= ip_limit)) { |
365 goto emit_remainder; | 385 goto emit_remainder; |
366 } | 386 } |
367 // We could immediately start working at ip now, but to improve | 387 { |
368 // compression we first update "table" with the hashes of some positions | 388 /* We could immediately start working at ip now, but to improve |
369 // within the last copy. | 389 compression we first update "table" with the hashes of some |
370 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); | 390 positions within the last copy. */ |
371 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | 391 uint64_t input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 5); |
372 table[prev_hash] = static_cast<int>(ip - base_ip - 5); | 392 uint32_t prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
373 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | 393 uint32_t cur_hash; |
374 table[prev_hash] = static_cast<int>(ip - base_ip - 4); | 394 table[prev_hash] = (int)(ip - base_ip - 5); |
375 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); | 395 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
376 table[prev_hash] = static_cast<int>(ip - base_ip - 3); | 396 table[prev_hash] = (int)(ip - base_ip - 4); |
377 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); | 397 prev_hash = HashBytesAtOffset(input_bytes, 2, shift); |
378 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); | 398 table[prev_hash] = (int)(ip - base_ip - 3); |
379 table[prev_hash] = static_cast<int>(ip - base_ip - 2); | 399 input_bytes = BROTLI_UNALIGNED_LOAD64(ip - 2); |
380 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); | 400 cur_hash = HashBytesAtOffset(input_bytes, 2, shift); |
381 table[prev_hash] = static_cast<int>(ip - base_ip - 1); | 401 prev_hash = HashBytesAtOffset(input_bytes, 0, shift); |
| 402 table[prev_hash] = (int)(ip - base_ip - 2); |
| 403 prev_hash = HashBytesAtOffset(input_bytes, 1, shift); |
| 404 table[prev_hash] = (int)(ip - base_ip - 1); |
382 | 405 |
383 uint32_t cur_hash = HashBytesAtOffset(input_bytes, 2, shift); | 406 candidate = base_ip + table[cur_hash]; |
384 candidate = base_ip + table[cur_hash]; | 407 table[cur_hash] = (int)(ip - base_ip); |
385 table[cur_hash] = static_cast<int>(ip - base_ip); | 408 } |
386 } | 409 } |
387 | 410 |
388 next_hash = Hash(++ip, shift); | 411 next_hash = Hash(++ip, shift); |
389 } | 412 } |
390 } | 413 } |
391 | 414 |
392 emit_remainder: | 415 emit_remainder: |
393 assert(next_emit <= ip_end); | 416 assert(next_emit <= ip_end); |
394 // Emit the remaining bytes as literals. | 417 /* Emit the remaining bytes as literals. */ |
395 if (next_emit < ip_end) { | 418 if (next_emit < ip_end) { |
396 const uint32_t insert = static_cast<uint32_t>(ip_end - next_emit); | 419 const uint32_t insert = (uint32_t)(ip_end - next_emit); |
397 EmitInsertLen(insert, commands); | 420 EmitInsertLen(insert, commands); |
398 memcpy(*literals, next_emit, insert); | 421 memcpy(*literals, next_emit, insert); |
399 *literals += insert; | 422 *literals += insert; |
400 } | 423 } |
401 } | 424 } |
402 | 425 |
403 static void StoreCommands(const uint8_t* literals, const size_t num_literals, | 426 static void StoreCommands(MemoryManager* m, |
| 427 const uint8_t* literals, const size_t num_literals, |
404 const uint32_t* commands, const size_t num_commands, | 428 const uint32_t* commands, const size_t num_commands, |
405 size_t* storage_ix, uint8_t* storage) { | 429 size_t* storage_ix, uint8_t* storage) { |
406 uint8_t lit_depths[256] = { 0 }; | |
407 uint16_t lit_bits[256] = { 0 }; | |
408 uint32_t lit_histo[256] = { 0 }; | |
409 for (size_t i = 0; i < num_literals; ++i) { | |
410 ++lit_histo[literals[i]]; | |
411 } | |
412 BuildAndStoreHuffmanTreeFast(lit_histo, num_literals, | |
413 /* max_bits = */ 8, | |
414 lit_depths, lit_bits, | |
415 storage_ix, storage); | |
416 | |
417 uint8_t cmd_depths[128] = { 0 }; | |
418 uint16_t cmd_bits[128] = { 0 }; | |
419 uint32_t cmd_histo[128] = { 0 }; | |
420 for (size_t i = 0; i < num_commands; ++i) { | |
421 ++cmd_histo[commands[i] & 0xff]; | |
422 } | |
423 cmd_histo[1] += 1; | |
424 cmd_histo[2] += 1; | |
425 cmd_histo[64] += 1; | |
426 cmd_histo[84] += 1; | |
427 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depths, cmd_bits, | |
428 storage_ix, storage); | |
429 | |
430 static const uint32_t kNumExtraBits[128] = { | 430 static const uint32_t kNumExtraBits[128] = { |
431 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24, | 431 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24, |
432 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, | 432 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, |
433 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24, | 433 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24, |
434 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 434 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
435 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, | 435 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, |
436 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, | 436 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, |
437 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, | 437 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, |
438 }; | 438 }; |
439 static const uint32_t kInsertOffset[24] = { | 439 static const uint32_t kInsertOffset[24] = { |
440 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578, | 440 0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578, |
441 1090, 2114, 6210, 22594, | 441 1090, 2114, 6210, 22594, |
442 }; | 442 }; |
443 | 443 |
444 for (size_t i = 0; i < num_commands; ++i) { | 444 uint8_t lit_depths[256]; |
| 445 uint16_t lit_bits[256]; |
| 446 uint32_t lit_histo[256] = { 0 }; |
| 447 uint8_t cmd_depths[128] = { 0 }; |
| 448 uint16_t cmd_bits[128] = { 0 }; |
| 449 uint32_t cmd_histo[128] = { 0 }; |
| 450 size_t i; |
| 451 for (i = 0; i < num_literals; ++i) { |
| 452 ++lit_histo[literals[i]]; |
| 453 } |
| 454 BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo, num_literals, |
| 455 /* max_bits = */ 8, |
| 456 lit_depths, lit_bits, |
| 457 storage_ix, storage); |
| 458 if (BROTLI_IS_OOM(m)) return; |
| 459 |
| 460 for (i = 0; i < num_commands; ++i) { |
| 461 ++cmd_histo[commands[i] & 0xff]; |
| 462 } |
| 463 cmd_histo[1] += 1; |
| 464 cmd_histo[2] += 1; |
| 465 cmd_histo[64] += 1; |
| 466 cmd_histo[84] += 1; |
| 467 BuildAndStoreCommandPrefixCode(cmd_histo, cmd_depths, cmd_bits, |
| 468 storage_ix, storage); |
| 469 |
| 470 for (i = 0; i < num_commands; ++i) { |
445 const uint32_t cmd = commands[i]; | 471 const uint32_t cmd = commands[i]; |
446 const uint32_t code = cmd & 0xff; | 472 const uint32_t code = cmd & 0xff; |
447 const uint32_t extra = cmd >> 8; | 473 const uint32_t extra = cmd >> 8; |
448 WriteBits(cmd_depths[code], cmd_bits[code], storage_ix, storage); | 474 BrotliWriteBits(cmd_depths[code], cmd_bits[code], storage_ix, storage); |
449 WriteBits(kNumExtraBits[code], extra, storage_ix, storage); | 475 BrotliWriteBits(kNumExtraBits[code], extra, storage_ix, storage); |
450 if (code < 24) { | 476 if (code < 24) { |
451 const uint32_t insert = kInsertOffset[code] + extra; | 477 const uint32_t insert = kInsertOffset[code] + extra; |
452 for (uint32_t j = 0; j < insert; ++j) { | 478 uint32_t j; |
| 479 for (j = 0; j < insert; ++j) { |
453 const uint8_t lit = *literals; | 480 const uint8_t lit = *literals; |
454 WriteBits(lit_depths[lit], lit_bits[lit], storage_ix, storage); | 481 BrotliWriteBits(lit_depths[lit], lit_bits[lit], storage_ix, storage); |
455 ++literals; | 482 ++literals; |
456 } | 483 } |
457 } | 484 } |
458 } | 485 } |
459 } | 486 } |
460 | 487 |
461 static bool ShouldCompress(const uint8_t* input, size_t input_size, | 488 /* Acceptable loss for uncompressible speedup is 2% */ |
462 size_t num_literals) { | 489 #define MIN_RATIO 0.98 |
463 static const double kAcceptableLossForUncompressibleSpeedup = 0.02; | 490 #define SAMPLE_RATE 43 |
464 static const double kMaxRatioOfLiterals = | 491 |
465 1.0 - kAcceptableLossForUncompressibleSpeedup; | 492 static BROTLI_BOOL ShouldCompress( |
466 if (num_literals < kMaxRatioOfLiterals * static_cast<double>(input_size)) { | 493 const uint8_t* input, size_t input_size, size_t num_literals) { |
467 return true; | 494 double corpus_size = (double)input_size; |
| 495 if (num_literals < MIN_RATIO * corpus_size) { |
| 496 return BROTLI_TRUE; |
| 497 } else { |
| 498 uint32_t literal_histo[256] = { 0 }; |
| 499 const double max_total_bit_cost = corpus_size * 8 * MIN_RATIO / SAMPLE_RATE; |
| 500 size_t i; |
| 501 for (i = 0; i < input_size; i += SAMPLE_RATE) { |
| 502 ++literal_histo[input[i]]; |
| 503 } |
| 504 return TO_BROTLI_BOOL(BitsEntropy(literal_histo, 256) < max_total_bit_cost); |
468 } | 505 } |
469 uint32_t literal_histo[256] = { 0 }; | |
470 static const uint32_t kSampleRate = 43; | |
471 static const double kMaxEntropy = | |
472 8 * (1.0 - kAcceptableLossForUncompressibleSpeedup); | |
473 const double max_total_bit_cost = | |
474 static_cast<double>(input_size) * kMaxEntropy / kSampleRate; | |
475 for (size_t i = 0; i < input_size; i += kSampleRate) { | |
476 ++literal_histo[input[i]]; | |
477 } | |
478 return BitsEntropy(literal_histo, 256) < max_total_bit_cost; | |
479 } | 506 } |
480 | 507 |
481 void BrotliCompressFragmentTwoPass(const uint8_t* input, size_t input_size, | 508 static BROTLI_INLINE void BrotliCompressFragmentTwoPassImpl( |
482 bool is_last, | 509 MemoryManager* m, const uint8_t* input, size_t input_size, |
483 uint32_t* command_buf, uint8_t* literal_buf, | 510 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, |
484 int* table, size_t table_size, | 511 int* table, size_t table_bits, size_t* storage_ix, uint8_t* storage) { |
485 size_t* storage_ix, uint8_t* storage) { | 512 /* Save the start of the first block for position and distance computations. |
486 // Save the start of the first block for position and distance computations. | 513 */ |
487 const uint8_t* base_ip = input; | 514 const uint8_t* base_ip = input; |
488 | 515 |
489 while (input_size > 0) { | 516 while (input_size > 0) { |
490 size_t block_size = std::min(input_size, kCompressFragmentTwoPassBlockSize); | 517 size_t block_size = |
| 518 BROTLI_MIN(size_t, input_size, kCompressFragmentTwoPassBlockSize); |
491 uint32_t* commands = command_buf; | 519 uint32_t* commands = command_buf; |
492 uint8_t* literals = literal_buf; | 520 uint8_t* literals = literal_buf; |
493 CreateCommands(input, block_size, input_size, base_ip, table, table_size, | 521 size_t num_literals; |
| 522 CreateCommands(input, block_size, input_size, base_ip, table, table_bits, |
494 &literals, &commands); | 523 &literals, &commands); |
495 const size_t num_literals = static_cast<size_t>(literals - literal_buf); | 524 num_literals = (size_t)(literals - literal_buf); |
496 const size_t num_commands = static_cast<size_t>(commands - command_buf); | |
497 if (ShouldCompress(input, block_size, num_literals)) { | 525 if (ShouldCompress(input, block_size, num_literals)) { |
498 StoreMetaBlockHeader(block_size, 0, storage_ix, storage); | 526 const size_t num_commands = (size_t)(commands - command_buf); |
499 // No block splits, no contexts. | 527 BrotliStoreMetaBlockHeader(block_size, 0, storage_ix, storage); |
500 WriteBits(13, 0, storage_ix, storage); | 528 /* No block splits, no contexts. */ |
501 StoreCommands(literal_buf, num_literals, command_buf, num_commands, | 529 BrotliWriteBits(13, 0, storage_ix, storage); |
| 530 StoreCommands(m, literal_buf, num_literals, command_buf, num_commands, |
502 storage_ix, storage); | 531 storage_ix, storage); |
| 532 if (BROTLI_IS_OOM(m)) return; |
503 } else { | 533 } else { |
504 // Since we did not find many backward references and the entropy of | 534 /* Since we did not find many backward references and the entropy of |
505 // the data is close to 8 bits, we can simply emit an uncompressed block. | 535 the data is close to 8 bits, we can simply emit an uncompressed block. |
506 // This makes compression speed of uncompressible data about 3x faster. | 536 This makes compression speed of uncompressible data about 3x faster. */ |
507 StoreMetaBlockHeader(block_size, 1, storage_ix, storage); | 537 BrotliStoreMetaBlockHeader(block_size, 1, storage_ix, storage); |
508 *storage_ix = (*storage_ix + 7u) & ~7u; | 538 *storage_ix = (*storage_ix + 7u) & ~7u; |
509 memcpy(&storage[*storage_ix >> 3], input, block_size); | 539 memcpy(&storage[*storage_ix >> 3], input, block_size); |
510 *storage_ix += block_size << 3; | 540 *storage_ix += block_size << 3; |
511 storage[*storage_ix >> 3] = 0; | 541 storage[*storage_ix >> 3] = 0; |
512 } | 542 } |
513 input += block_size; | 543 input += block_size; |
514 input_size -= block_size; | 544 input_size -= block_size; |
515 } | 545 } |
516 | 546 |
517 if (is_last) { | 547 if (is_last) { |
518 WriteBits(1, 1, storage_ix, storage); // islast | 548 BrotliWriteBits(1, 1, storage_ix, storage); /* islast */ |
519 WriteBits(1, 1, storage_ix, storage); // isempty | 549 BrotliWriteBits(1, 1, storage_ix, storage); /* isempty */ |
520 *storage_ix = (*storage_ix + 7u) & ~7u; | 550 *storage_ix = (*storage_ix + 7u) & ~7u; |
521 } | 551 } |
522 } | 552 } |
523 | 553 |
524 } // namespace brotli | 554 #define FOR_TABLE_BITS_(X) \ |
| 555 X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15) X(16) X(17) |
| 556 |
| 557 #define BAKE_METHOD_PARAM_(B) \ |
| 558 static BROTLI_NOINLINE void BrotliCompressFragmentTwoPassImpl ## B( \ |
| 559 MemoryManager* m, const uint8_t* input, size_t input_size, \ |
| 560 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, \ |
| 561 int* table, size_t* storage_ix, uint8_t* storage) { \ |
| 562 BrotliCompressFragmentTwoPassImpl(m, input, input_size, is_last, command_buf,\ |
| 563 literal_buf, table, B, storage_ix, storage); \ |
| 564 } |
| 565 FOR_TABLE_BITS_(BAKE_METHOD_PARAM_) |
| 566 #undef BAKE_METHOD_PARAM_ |
| 567 |
| 568 void BrotliCompressFragmentTwoPass( |
| 569 MemoryManager* m, const uint8_t* input, size_t input_size, |
| 570 BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, |
| 571 int* table, size_t table_size, size_t* storage_ix, uint8_t* storage) { |
| 572 const size_t table_bits = Log2FloorNonZero(table_size); |
| 573 switch (table_bits) { |
| 574 #define CASE_(B) \ |
| 575 case B: \ |
| 576 BrotliCompressFragmentTwoPassImpl ## B( \ |
| 577 m, input, input_size, is_last, command_buf, \ |
| 578 literal_buf, table, storage_ix, storage); \ |
| 579 break; |
| 580 FOR_TABLE_BITS_(CASE_) |
| 581 #undef CASE_ |
| 582 default: assert(0); break; |
| 583 } |
| 584 } |
| 585 |
| 586 #undef FOR_TABLE_BITS_ |
| 587 |
| 588 #if defined(__cplusplus) || defined(c_plusplus) |
| 589 } /* extern "C" */ |
| 590 #endif |
OLD | NEW |