OLD | NEW |
| 1 /* NOLINT(build/header_guard) */ |
1 /* Copyright 2013 Google Inc. All Rights Reserved. | 2 /* Copyright 2013 Google Inc. All Rights Reserved. |
2 | 3 |
3 Distributed under MIT license. | 4 Distributed under MIT license. |
4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT | 5 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
5 */ | 6 */ |
6 | 7 |
7 // Block split point selection utilities. | 8 /* template parameters: FN, DataType */ |
8 | 9 |
9 #include "./block_splitter.h" | 10 #define HistogramType FN(Histogram) |
10 | 11 |
11 #include <assert.h> | 12 static void FN(InitialEntropyCodes)(const DataType* data, size_t length, |
12 #include <math.h> | 13 size_t stride, |
13 | 14 size_t num_histograms, |
14 #include <algorithm> | 15 HistogramType* histograms) { |
15 #include <cstring> | |
16 #include <vector> | |
17 | |
18 #include "./cluster.h" | |
19 #include "./command.h" | |
20 #include "./fast_log.h" | |
21 #include "./histogram.h" | |
22 | |
23 namespace brotli { | |
24 | |
25 static const size_t kMaxLiteralHistograms = 100; | |
26 static const size_t kMaxCommandHistograms = 50; | |
27 static const double kLiteralBlockSwitchCost = 28.1; | |
28 static const double kCommandBlockSwitchCost = 13.5; | |
29 static const double kDistanceBlockSwitchCost = 14.6; | |
30 static const size_t kLiteralStrideLength = 70; | |
31 static const size_t kCommandStrideLength = 40; | |
32 static const size_t kSymbolsPerLiteralHistogram = 544; | |
33 static const size_t kSymbolsPerCommandHistogram = 530; | |
34 static const size_t kSymbolsPerDistanceHistogram = 544; | |
35 static const size_t kMinLengthForBlockSplitting = 128; | |
36 static const size_t kIterMulForRefining = 2; | |
37 static const size_t kMinItersForRefining = 100; | |
38 | |
39 void CopyLiteralsToByteArray(const Command* cmds, | |
40 const size_t num_commands, | |
41 const uint8_t* data, | |
42 const size_t offset, | |
43 const size_t mask, | |
44 std::vector<uint8_t>* literals) { | |
45 // Count how many we have. | |
46 size_t total_length = 0; | |
47 for (size_t i = 0; i < num_commands; ++i) { | |
48 total_length += cmds[i].insert_len_; | |
49 } | |
50 if (total_length == 0) { | |
51 return; | |
52 } | |
53 | |
54 // Allocate. | |
55 literals->resize(total_length); | |
56 | |
57 // Loop again, and copy this time. | |
58 size_t pos = 0; | |
59 size_t from_pos = offset & mask; | |
60 for (size_t i = 0; i < num_commands && pos < total_length; ++i) { | |
61 size_t insert_len = cmds[i].insert_len_; | |
62 if (from_pos + insert_len > mask) { | |
63 size_t head_size = mask + 1 - from_pos; | |
64 memcpy(&(*literals)[pos], data + from_pos, head_size); | |
65 from_pos = 0; | |
66 pos += head_size; | |
67 insert_len -= head_size; | |
68 } | |
69 if (insert_len > 0) { | |
70 memcpy(&(*literals)[pos], data + from_pos, insert_len); | |
71 pos += insert_len; | |
72 } | |
73 from_pos = (from_pos + insert_len + cmds[i].copy_len()) & mask; | |
74 } | |
75 } | |
76 | |
77 inline static unsigned int MyRand(unsigned int* seed) { | |
78 *seed *= 16807U; | |
79 if (*seed == 0) { | |
80 *seed = 1; | |
81 } | |
82 return *seed; | |
83 } | |
84 | |
85 template<typename HistogramType, typename DataType> | |
86 void InitialEntropyCodes(const DataType* data, size_t length, | |
87 size_t stride, | |
88 size_t num_histograms, | |
89 HistogramType* histograms) { | |
90 for (size_t i = 0; i < num_histograms; ++i) { | |
91 histograms[i].Clear(); | |
92 } | |
93 unsigned int seed = 7; | 16 unsigned int seed = 7; |
94 size_t block_length = length / num_histograms; | 17 size_t block_length = length / num_histograms; |
95 for (size_t i = 0; i < num_histograms; ++i) { | 18 size_t i; |
| 19 FN(ClearHistograms)(histograms, num_histograms); |
| 20 for (i = 0; i < num_histograms; ++i) { |
96 size_t pos = length * i / num_histograms; | 21 size_t pos = length * i / num_histograms; |
97 if (i != 0) { | 22 if (i != 0) { |
98 pos += MyRand(&seed) % block_length; | 23 pos += MyRand(&seed) % block_length; |
99 } | 24 } |
100 if (pos + stride >= length) { | 25 if (pos + stride >= length) { |
101 pos = length - stride - 1; | 26 pos = length - stride - 1; |
102 } | 27 } |
103 histograms[i].Add(data + pos, stride); | 28 FN(HistogramAddVector)(&histograms[i], data + pos, stride); |
104 } | 29 } |
105 } | 30 } |
106 | 31 |
107 template<typename HistogramType, typename DataType> | 32 static void FN(RandomSample)(unsigned int* seed, |
108 void RandomSample(unsigned int* seed, | 33 const DataType* data, |
109 const DataType* data, | 34 size_t length, |
110 size_t length, | 35 size_t stride, |
111 size_t stride, | 36 HistogramType* sample) { |
112 HistogramType* sample) { | |
113 size_t pos = 0; | 37 size_t pos = 0; |
114 if (stride >= length) { | 38 if (stride >= length) { |
115 pos = 0; | 39 pos = 0; |
116 stride = length; | 40 stride = length; |
117 } else { | 41 } else { |
118 pos = MyRand(seed) % (length - stride + 1); | 42 pos = MyRand(seed) % (length - stride + 1); |
119 } | 43 } |
120 sample->Add(data + pos, stride); | 44 FN(HistogramAddVector)(sample, data + pos, stride); |
121 } | 45 } |
122 | 46 |
123 template<typename HistogramType, typename DataType> | 47 static void FN(RefineEntropyCodes)(const DataType* data, size_t length, |
124 void RefineEntropyCodes(const DataType* data, size_t length, | 48 size_t stride, |
125 size_t stride, | 49 size_t num_histograms, |
126 size_t num_histograms, | 50 HistogramType* histograms) { |
127 HistogramType* histograms) { | |
128 size_t iters = | 51 size_t iters = |
129 kIterMulForRefining * length / stride + kMinItersForRefining; | 52 kIterMulForRefining * length / stride + kMinItersForRefining; |
130 unsigned int seed = 7; | 53 unsigned int seed = 7; |
| 54 size_t iter; |
131 iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms; | 55 iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms; |
132 for (size_t iter = 0; iter < iters; ++iter) { | 56 for (iter = 0; iter < iters; ++iter) { |
133 HistogramType sample; | 57 HistogramType sample; |
134 RandomSample(&seed, data, length, stride, &sample); | 58 FN(HistogramClear)(&sample); |
135 size_t ix = iter % num_histograms; | 59 FN(RandomSample)(&seed, data, length, stride, &sample); |
136 histograms[ix].AddHistogram(sample); | 60 FN(HistogramAddHistogram)(&histograms[iter % num_histograms], &sample); |
137 } | 61 } |
138 } | 62 } |
139 | 63 |
140 inline static double BitCost(size_t count) { | 64 /* Assigns a block id from the range [0, num_histograms) to each data element |
141 return count == 0 ? -2.0 : FastLog2(count); | 65 in data[0..length) and fills in block_id[0..length) with the assigned values. |
142 } | 66 Returns the number of blocks, i.e. one plus the number of block switches. */ |
143 | 67 static size_t FN(FindBlocks)(const DataType* data, const size_t length, |
144 // Assigns a block id from the range [0, vec.size()) to each data element | 68 const double block_switch_bitcost, |
145 // in data[0..length) and fills in block_id[0..length) with the assigned values. | 69 const size_t num_histograms, |
146 // Returns the number of blocks, i.e. one plus the number of block switches. | 70 const HistogramType* histograms, |
147 template<typename DataType, int kSize> | 71 double* insert_cost, |
148 size_t FindBlocks(const DataType* data, const size_t length, | 72 double* cost, |
149 const double block_switch_bitcost, | 73 uint8_t* switch_signal, |
150 const size_t num_histograms, | 74 uint8_t *block_id) { |
151 const Histogram<kSize>* histograms, | 75 const size_t data_size = FN(HistogramDataSize)(); |
152 double* insert_cost, | 76 const size_t bitmaplen = (num_histograms + 7) >> 3; |
153 double* cost, | 77 size_t num_blocks = 1; |
154 uint8_t* switch_signal, | 78 size_t i; |
155 uint8_t *block_id) { | 79 size_t j; |
| 80 assert(num_histograms <= 256); |
156 if (num_histograms <= 1) { | 81 if (num_histograms <= 1) { |
157 for (size_t i = 0; i < length; ++i) { | 82 for (i = 0; i < length; ++i) { |
158 block_id[i] = 0; | 83 block_id[i] = 0; |
159 } | 84 } |
160 return 1; | 85 return 1; |
161 } | 86 } |
162 const size_t bitmaplen = (num_histograms + 7) >> 3; | 87 memset(insert_cost, 0, sizeof(insert_cost[0]) * data_size * num_histograms); |
163 assert(num_histograms <= 256); | 88 for (i = 0; i < num_histograms; ++i) { |
164 memset(insert_cost, 0, sizeof(insert_cost[0]) * kSize * num_histograms); | 89 insert_cost[i] = FastLog2((uint32_t)histograms[i].total_count_); |
165 for (size_t j = 0; j < num_histograms; ++j) { | 90 } |
166 insert_cost[j] = FastLog2(static_cast<uint32_t>( | 91 for (i = data_size; i != 0;) { |
167 histograms[j].total_count_)); | |
168 } | |
169 for (size_t i = kSize; i != 0;) { | |
170 --i; | 92 --i; |
171 for (size_t j = 0; j < num_histograms; ++j) { | 93 for (j = 0; j < num_histograms; ++j) { |
172 insert_cost[i * num_histograms + j] = | 94 insert_cost[i * num_histograms + j] = |
173 insert_cost[j] - BitCost(histograms[j].data_[i]); | 95 insert_cost[j] - BitCost(histograms[j].data_[i]); |
174 } | 96 } |
175 } | 97 } |
176 memset(cost, 0, sizeof(cost[0]) * num_histograms); | 98 memset(cost, 0, sizeof(cost[0]) * num_histograms); |
177 memset(switch_signal, 0, sizeof(switch_signal[0]) * length * bitmaplen); | 99 memset(switch_signal, 0, sizeof(switch_signal[0]) * length * bitmaplen); |
178 // After each iteration of this loop, cost[k] will contain the difference | 100 /* After each iteration of this loop, cost[k] will contain the difference |
179 // between the minimum cost of arriving at the current byte position using | 101 between the minimum cost of arriving at the current byte position using |
180 // entropy code k, and the minimum cost of arriving at the current byte | 102 entropy code k, and the minimum cost of arriving at the current byte |
181 // position. This difference is capped at the block switch cost, and if it | 103 position. This difference is capped at the block switch cost, and if it |
182 // reaches block switch cost, it means that when we trace back from the last | 104 reaches block switch cost, it means that when we trace back from the last |
183 // position, we need to switch here. | 105 position, we need to switch here. */ |
184 for (size_t byte_ix = 0; byte_ix < length; ++byte_ix) { | 106 for (i = 0; i < length; ++i) { |
| 107 const size_t byte_ix = i; |
185 size_t ix = byte_ix * bitmaplen; | 108 size_t ix = byte_ix * bitmaplen; |
186 size_t insert_cost_ix = data[byte_ix] * num_histograms; | 109 size_t insert_cost_ix = data[byte_ix] * num_histograms; |
187 double min_cost = 1e99; | 110 double min_cost = 1e99; |
188 for (size_t k = 0; k < num_histograms; ++k) { | 111 double block_switch_cost = block_switch_bitcost; |
189 // We are coding the symbol in data[byte_ix] with entropy code k. | 112 size_t k; |
| 113 for (k = 0; k < num_histograms; ++k) { |
| 114 /* We are coding the symbol in data[byte_ix] with entropy code k. */ |
190 cost[k] += insert_cost[insert_cost_ix + k]; | 115 cost[k] += insert_cost[insert_cost_ix + k]; |
191 if (cost[k] < min_cost) { | 116 if (cost[k] < min_cost) { |
192 min_cost = cost[k]; | 117 min_cost = cost[k]; |
193 block_id[byte_ix] = static_cast<uint8_t>(k); | 118 block_id[byte_ix] = (uint8_t)k; |
194 } | 119 } |
195 } | 120 } |
196 double block_switch_cost = block_switch_bitcost; | 121 /* More blocks for the beginning. */ |
197 // More blocks for the beginning. | |
198 if (byte_ix < 2000) { | 122 if (byte_ix < 2000) { |
199 block_switch_cost *= 0.77 + 0.07 * static_cast<double>(byte_ix) / 2000; | 123 block_switch_cost *= 0.77 + 0.07 * (double)byte_ix / 2000; |
200 } | 124 } |
201 for (size_t k = 0; k < num_histograms; ++k) { | 125 for (k = 0; k < num_histograms; ++k) { |
202 cost[k] -= min_cost; | 126 cost[k] -= min_cost; |
203 if (cost[k] >= block_switch_cost) { | 127 if (cost[k] >= block_switch_cost) { |
| 128 const uint8_t mask = (uint8_t)(1u << (k & 7)); |
204 cost[k] = block_switch_cost; | 129 cost[k] = block_switch_cost; |
205 const uint8_t mask = static_cast<uint8_t>(1u << (k & 7)); | |
206 assert((k >> 3) < bitmaplen); | 130 assert((k >> 3) < bitmaplen); |
207 switch_signal[ix + (k >> 3)] |= mask; | 131 switch_signal[ix + (k >> 3)] |= mask; |
208 } | 132 } |
209 } | 133 } |
210 } | 134 } |
211 // Now trace back from the last position and switch at the marked places. | 135 { /* Trace back from the last position and switch at the marked places. */ |
212 size_t byte_ix = length - 1; | 136 size_t byte_ix = length - 1; |
213 size_t ix = byte_ix * bitmaplen; | 137 size_t ix = byte_ix * bitmaplen; |
214 uint8_t cur_id = block_id[byte_ix]; | 138 uint8_t cur_id = block_id[byte_ix]; |
215 size_t num_blocks = 1; | 139 while (byte_ix > 0) { |
216 while (byte_ix > 0) { | 140 const uint8_t mask = (uint8_t)(1u << (cur_id & 7)); |
217 --byte_ix; | 141 assert(((size_t)cur_id >> 3) < bitmaplen); |
218 ix -= bitmaplen; | 142 --byte_ix; |
219 const uint8_t mask = static_cast<uint8_t>(1u << (cur_id & 7)); | 143 ix -= bitmaplen; |
220 assert((static_cast<size_t>(cur_id) >> 3) < bitmaplen); | 144 if (switch_signal[ix + (cur_id >> 3)] & mask) { |
221 if (switch_signal[ix + (cur_id >> 3)] & mask) { | 145 if (cur_id != block_id[byte_ix]) { |
222 if (cur_id != block_id[byte_ix]) { | 146 cur_id = block_id[byte_ix]; |
223 cur_id = block_id[byte_ix]; | 147 ++num_blocks; |
224 ++num_blocks; | 148 } |
225 } | 149 } |
226 } | 150 block_id[byte_ix] = cur_id; |
227 block_id[byte_ix] = cur_id; | 151 } |
228 } | 152 } |
229 return num_blocks; | 153 return num_blocks; |
230 } | 154 } |
231 | 155 |
232 static size_t RemapBlockIds(uint8_t* block_ids, const size_t length, | 156 static size_t FN(RemapBlockIds)(uint8_t* block_ids, const size_t length, |
233 uint16_t* new_id, const size_t num_histograms) { | 157 uint16_t* new_id, const size_t num_histograms) { |
234 static const uint16_t kInvalidId = 256; | 158 static const uint16_t kInvalidId = 256; |
235 for (size_t i = 0; i < num_histograms; ++i) { | 159 uint16_t next_id = 0; |
| 160 size_t i; |
| 161 for (i = 0; i < num_histograms; ++i) { |
236 new_id[i] = kInvalidId; | 162 new_id[i] = kInvalidId; |
237 } | 163 } |
238 uint16_t next_id = 0; | 164 for (i = 0; i < length; ++i) { |
239 for (size_t i = 0; i < length; ++i) { | |
240 assert(block_ids[i] < num_histograms); | 165 assert(block_ids[i] < num_histograms); |
241 if (new_id[block_ids[i]] == kInvalidId) { | 166 if (new_id[block_ids[i]] == kInvalidId) { |
242 new_id[block_ids[i]] = next_id++; | 167 new_id[block_ids[i]] = next_id++; |
243 } | 168 } |
244 } | 169 } |
245 for (size_t i = 0; i < length; ++i) { | 170 for (i = 0; i < length; ++i) { |
246 block_ids[i] = static_cast<uint8_t>(new_id[block_ids[i]]); | 171 block_ids[i] = (uint8_t)new_id[block_ids[i]]; |
247 assert(block_ids[i] < num_histograms); | 172 assert(block_ids[i] < num_histograms); |
248 } | 173 } |
249 assert(next_id <= num_histograms); | 174 assert(next_id <= num_histograms); |
250 return next_id; | 175 return next_id; |
251 } | 176 } |
252 | 177 |
253 template<typename HistogramType, typename DataType> | 178 static void FN(BuildBlockHistograms)(const DataType* data, const size_t length, |
254 void BuildBlockHistograms(const DataType* data, const size_t length, | 179 const uint8_t* block_ids, |
255 const uint8_t* block_ids, | 180 const size_t num_histograms, |
256 const size_t num_histograms, | 181 HistogramType* histograms) { |
257 HistogramType* histograms) { | 182 size_t i; |
258 for (size_t i = 0; i < num_histograms; ++i) { | 183 FN(ClearHistograms)(histograms, num_histograms); |
259 histograms[i].Clear(); | 184 for (i = 0; i < length; ++i) { |
260 } | 185 FN(HistogramAdd)(&histograms[block_ids[i]], data[i]); |
261 for (size_t i = 0; i < length; ++i) { | 186 } |
262 histograms[block_ids[i]].Add(data[i]); | 187 } |
263 } | 188 |
264 } | 189 static void FN(ClusterBlocks)(MemoryManager* m, |
265 | 190 const DataType* data, const size_t length, |
266 template<typename HistogramType, typename DataType> | 191 const size_t num_blocks, |
267 void ClusterBlocks(const DataType* data, const size_t length, | 192 uint8_t* block_ids, |
268 const size_t num_blocks, | 193 BlockSplit* split) { |
269 uint8_t* block_ids, | 194 uint32_t* histogram_symbols = BROTLI_ALLOC(m, uint32_t, num_blocks); |
270 BlockSplit* split) { | 195 uint32_t* block_lengths = BROTLI_ALLOC(m, uint32_t, num_blocks); |
271 static const size_t kMaxNumberOfBlockTypes = 256; | 196 const size_t expected_num_clusters = CLUSTERS_PER_BATCH * |
272 static const size_t kHistogramsPerBatch = 64; | 197 (num_blocks + HISTOGRAMS_PER_BATCH - 1) / HISTOGRAMS_PER_BATCH; |
273 static const size_t kClustersPerBatch = 16; | 198 size_t all_histograms_size = 0; |
274 std::vector<uint32_t> histogram_symbols(num_blocks); | 199 size_t all_histograms_capacity = expected_num_clusters; |
275 std::vector<uint32_t> block_lengths(num_blocks); | 200 HistogramType* all_histograms = |
276 | 201 BROTLI_ALLOC(m, HistogramType, all_histograms_capacity); |
277 size_t block_idx = 0; | 202 size_t cluster_size_size = 0; |
278 for (size_t i = 0; i < length; ++i) { | 203 size_t cluster_size_capacity = expected_num_clusters; |
279 assert(block_idx < num_blocks); | 204 uint32_t* cluster_size = BROTLI_ALLOC(m, uint32_t, cluster_size_capacity); |
280 ++block_lengths[block_idx]; | |
281 if (i + 1 == length || block_ids[i] != block_ids[i + 1]) { | |
282 ++block_idx; | |
283 } | |
284 } | |
285 assert(block_idx == num_blocks); | |
286 | |
287 const size_t expected_num_clusters = | |
288 kClustersPerBatch * | |
289 (num_blocks + kHistogramsPerBatch - 1) / kHistogramsPerBatch; | |
290 std::vector<HistogramType> all_histograms; | |
291 std::vector<uint32_t> cluster_size; | |
292 all_histograms.reserve(expected_num_clusters); | |
293 cluster_size.reserve(expected_num_clusters); | |
294 size_t num_clusters = 0; | 205 size_t num_clusters = 0; |
295 std::vector<HistogramType> histograms( | 206 HistogramType* histograms = BROTLI_ALLOC(m, HistogramType, |
296 std::min(num_blocks, kHistogramsPerBatch)); | 207 BROTLI_MIN(size_t, num_blocks, HISTOGRAMS_PER_BATCH)); |
297 size_t max_num_pairs = kHistogramsPerBatch * kHistogramsPerBatch / 2; | 208 size_t max_num_pairs = |
298 std::vector<HistogramPair> pairs(max_num_pairs + 1); | 209 HISTOGRAMS_PER_BATCH * HISTOGRAMS_PER_BATCH / 2; |
| 210 size_t pairs_capacity = max_num_pairs + 1; |
| 211 HistogramPair* pairs = BROTLI_ALLOC(m, HistogramPair, pairs_capacity); |
299 size_t pos = 0; | 212 size_t pos = 0; |
300 for (size_t i = 0; i < num_blocks; i += kHistogramsPerBatch) { | 213 uint32_t* clusters; |
301 const size_t num_to_combine = std::min(num_blocks - i, kHistogramsPerBatch); | 214 size_t num_final_clusters; |
302 uint32_t sizes[kHistogramsPerBatch]; | 215 static const uint32_t kInvalidIndex = BROTLI_UINT32_MAX; |
303 uint32_t clusters[kHistogramsPerBatch]; | 216 uint32_t* new_index; |
304 uint32_t symbols[kHistogramsPerBatch]; | 217 uint8_t max_type = 0; |
305 uint32_t remap[kHistogramsPerBatch]; | 218 size_t i; |
306 for (size_t j = 0; j < num_to_combine; ++j) { | 219 uint32_t sizes[HISTOGRAMS_PER_BATCH] = { 0 }; |
307 histograms[j].Clear(); | 220 uint32_t new_clusters[HISTOGRAMS_PER_BATCH] = { 0 }; |
308 for (size_t k = 0; k < block_lengths[i + j]; ++k) { | 221 uint32_t symbols[HISTOGRAMS_PER_BATCH] = { 0 }; |
309 histograms[j].Add(data[pos++]); | 222 uint32_t remap[HISTOGRAMS_PER_BATCH] = { 0 }; |
310 } | 223 |
311 histograms[j].bit_cost_ = PopulationCost(histograms[j]); | 224 if (BROTLI_IS_OOM(m)) return; |
312 symbols[j] = clusters[j] = static_cast<uint32_t>(j); | 225 |
| 226 memset(block_lengths, 0, num_blocks * sizeof(uint32_t)); |
| 227 |
| 228 { |
| 229 size_t block_idx = 0; |
| 230 for (i = 0; i < length; ++i) { |
| 231 assert(block_idx < num_blocks); |
| 232 ++block_lengths[block_idx]; |
| 233 if (i + 1 == length || block_ids[i] != block_ids[i + 1]) { |
| 234 ++block_idx; |
| 235 } |
| 236 } |
| 237 assert(block_idx == num_blocks); |
| 238 } |
| 239 |
| 240 for (i = 0; i < num_blocks; i += HISTOGRAMS_PER_BATCH) { |
| 241 const size_t num_to_combine = |
| 242 BROTLI_MIN(size_t, num_blocks - i, HISTOGRAMS_PER_BATCH); |
| 243 size_t num_new_clusters; |
| 244 size_t j; |
| 245 for (j = 0; j < num_to_combine; ++j) { |
| 246 size_t k; |
| 247 FN(HistogramClear)(&histograms[j]); |
| 248 for (k = 0; k < block_lengths[i + j]; ++k) { |
| 249 FN(HistogramAdd)(&histograms[j], data[pos++]); |
| 250 } |
| 251 histograms[j].bit_cost_ = FN(BrotliPopulationCost)(&histograms[j]); |
| 252 new_clusters[j] = (uint32_t)j; |
| 253 symbols[j] = (uint32_t)j; |
313 sizes[j] = 1; | 254 sizes[j] = 1; |
314 } | 255 } |
315 size_t num_new_clusters = HistogramCombine( | 256 num_new_clusters = FN(BrotliHistogramCombine)( |
316 &histograms[0], sizes, symbols, clusters, &pairs[0], num_to_combine, | 257 histograms, sizes, symbols, new_clusters, pairs, num_to_combine, |
317 num_to_combine, kHistogramsPerBatch, max_num_pairs); | 258 num_to_combine, HISTOGRAMS_PER_BATCH, max_num_pairs); |
318 for (size_t j = 0; j < num_new_clusters; ++j) { | 259 BROTLI_ENSURE_CAPACITY(m, HistogramType, all_histograms, |
319 all_histograms.push_back(histograms[clusters[j]]); | 260 all_histograms_capacity, all_histograms_size + num_new_clusters); |
320 cluster_size.push_back(sizes[clusters[j]]); | 261 BROTLI_ENSURE_CAPACITY(m, uint32_t, cluster_size, |
321 remap[clusters[j]] = static_cast<uint32_t>(j); | 262 cluster_size_capacity, cluster_size_size + num_new_clusters); |
322 } | 263 if (BROTLI_IS_OOM(m)) return; |
323 for (size_t j = 0; j < num_to_combine; ++j) { | 264 for (j = 0; j < num_new_clusters; ++j) { |
324 histogram_symbols[i + j] = | 265 all_histograms[all_histograms_size++] = histograms[new_clusters[j]]; |
325 static_cast<uint32_t>(num_clusters) + remap[symbols[j]]; | 266 cluster_size[cluster_size_size++] = sizes[new_clusters[j]]; |
| 267 remap[new_clusters[j]] = (uint32_t)j; |
| 268 } |
| 269 for (j = 0; j < num_to_combine; ++j) { |
| 270 histogram_symbols[i + j] = (uint32_t)num_clusters + remap[symbols[j]]; |
326 } | 271 } |
327 num_clusters += num_new_clusters; | 272 num_clusters += num_new_clusters; |
328 assert(num_clusters == cluster_size.size()); | 273 assert(num_clusters == cluster_size_size); |
329 assert(num_clusters == all_histograms.size()); | 274 assert(num_clusters == all_histograms_size); |
330 } | 275 } |
| 276 BROTLI_FREE(m, histograms); |
331 | 277 |
332 max_num_pairs = | 278 max_num_pairs = |
333 std::min(64 * num_clusters, (num_clusters / 2) * num_clusters); | 279 BROTLI_MIN(size_t, 64 * num_clusters, (num_clusters / 2) * num_clusters); |
334 pairs.resize(max_num_pairs + 1); | 280 if (pairs_capacity < max_num_pairs + 1) { |
335 | 281 BROTLI_FREE(m, pairs); |
336 std::vector<uint32_t> clusters(num_clusters); | 282 pairs = BROTLI_ALLOC(m, HistogramPair, max_num_pairs + 1); |
337 for (size_t i = 0; i < num_clusters; ++i) { | 283 if (BROTLI_IS_OOM(m)) return; |
338 clusters[i] = static_cast<uint32_t>(i); | 284 } |
339 } | 285 |
340 size_t num_final_clusters = | 286 clusters = BROTLI_ALLOC(m, uint32_t, num_clusters); |
341 HistogramCombine(&all_histograms[0], &cluster_size[0], | 287 if (BROTLI_IS_OOM(m)) return; |
342 &histogram_symbols[0], | 288 for (i = 0; i < num_clusters; ++i) { |
343 &clusters[0], &pairs[0], num_clusters, | 289 clusters[i] = (uint32_t)i; |
344 num_blocks, kMaxNumberOfBlockTypes, max_num_pairs); | 290 } |
345 | 291 num_final_clusters = FN(BrotliHistogramCombine)( |
346 static const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max(); | 292 all_histograms, cluster_size, histogram_symbols, clusters, pairs, |
347 std::vector<uint32_t> new_index(num_clusters, kInvalidIndex); | 293 num_clusters, num_blocks, BROTLI_MAX_NUMBER_OF_BLOCK_TYPES, |
348 uint32_t next_index = 0; | 294 max_num_pairs); |
| 295 BROTLI_FREE(m, pairs); |
| 296 BROTLI_FREE(m, cluster_size); |
| 297 |
| 298 new_index = BROTLI_ALLOC(m, uint32_t, num_clusters); |
| 299 if (BROTLI_IS_OOM(m)) return; |
| 300 for (i = 0; i < num_clusters; ++i) new_index[i] = kInvalidIndex; |
349 pos = 0; | 301 pos = 0; |
350 for (size_t i = 0; i < num_blocks; ++i) { | 302 { |
351 HistogramType histo; | 303 uint32_t next_index = 0; |
352 for (size_t j = 0; j < block_lengths[i]; ++j) { | 304 for (i = 0; i < num_blocks; ++i) { |
353 histo.Add(data[pos++]); | 305 HistogramType histo; |
354 } | 306 size_t j; |
355 uint32_t best_out = | 307 uint32_t best_out; |
356 i == 0 ? histogram_symbols[0] : histogram_symbols[i - 1]; | 308 double best_bits; |
357 double best_bits = HistogramBitCostDistance( | 309 FN(HistogramClear)(&histo); |
358 histo, all_histograms[best_out]); | 310 for (j = 0; j < block_lengths[i]; ++j) { |
359 for (size_t j = 0; j < num_final_clusters; ++j) { | 311 FN(HistogramAdd)(&histo, data[pos++]); |
360 const double cur_bits = HistogramBitCostDistance( | 312 } |
361 histo, all_histograms[clusters[j]]); | 313 best_out = (i == 0) ? histogram_symbols[0] : histogram_symbols[i - 1]; |
362 if (cur_bits < best_bits) { | 314 best_bits = |
363 best_bits = cur_bits; | 315 FN(BrotliHistogramBitCostDistance)(&histo, &all_histograms[best_out]); |
364 best_out = clusters[j]; | 316 for (j = 0; j < num_final_clusters; ++j) { |
365 } | 317 const double cur_bits = FN(BrotliHistogramBitCostDistance)( |
366 } | 318 &histo, &all_histograms[clusters[j]]); |
367 histogram_symbols[i] = best_out; | 319 if (cur_bits < best_bits) { |
368 if (new_index[best_out] == kInvalidIndex) { | 320 best_bits = cur_bits; |
369 new_index[best_out] = next_index++; | 321 best_out = clusters[j]; |
370 } | 322 } |
371 } | 323 } |
372 uint8_t max_type = 0; | 324 histogram_symbols[i] = best_out; |
373 uint32_t cur_length = 0; | 325 if (new_index[best_out] == kInvalidIndex) { |
374 block_idx = 0; | 326 new_index[best_out] = next_index++; |
375 split->types.resize(num_blocks); | 327 } |
376 split->lengths.resize(num_blocks); | 328 } |
377 for (size_t i = 0; i < num_blocks; ++i) { | 329 } |
378 cur_length += block_lengths[i]; | 330 BROTLI_FREE(m, clusters); |
379 if (i + 1 == num_blocks || | 331 BROTLI_FREE(m, all_histograms); |
380 histogram_symbols[i] != histogram_symbols[i + 1]) { | 332 BROTLI_ENSURE_CAPACITY( |
381 const uint8_t id = static_cast<uint8_t>(new_index[histogram_symbols[i]]); | 333 m, uint8_t, split->types, split->types_alloc_size, num_blocks); |
382 split->types[block_idx] = id; | 334 BROTLI_ENSURE_CAPACITY( |
383 split->lengths[block_idx] = cur_length; | 335 m, uint32_t, split->lengths, split->lengths_alloc_size, num_blocks); |
384 max_type = std::max(max_type, id); | 336 if (BROTLI_IS_OOM(m)) return; |
385 cur_length = 0; | 337 { |
386 ++block_idx; | 338 uint32_t cur_length = 0; |
387 } | 339 size_t block_idx = 0; |
388 } | 340 for (i = 0; i < num_blocks; ++i) { |
389 split->types.resize(block_idx); | 341 cur_length += block_lengths[i]; |
390 split->lengths.resize(block_idx); | 342 if (i + 1 == num_blocks || |
391 split->num_types = static_cast<size_t>(max_type) + 1; | 343 histogram_symbols[i] != histogram_symbols[i + 1]) { |
392 } | 344 const uint8_t id = (uint8_t)new_index[histogram_symbols[i]]; |
393 | 345 split->types[block_idx] = id; |
394 template<int kSize, typename DataType> | 346 split->lengths[block_idx] = cur_length; |
395 void SplitByteVector(const std::vector<DataType>& data, | 347 max_type = BROTLI_MAX(uint8_t, max_type, id); |
396 const size_t literals_per_histogram, | 348 cur_length = 0; |
397 const size_t max_histograms, | 349 ++block_idx; |
398 const size_t sampling_stride_length, | 350 } |
399 const double block_switch_cost, | 351 } |
400 BlockSplit* split) { | 352 split->num_blocks = block_idx; |
401 if (data.empty()) { | 353 split->num_types = (size_t)max_type + 1; |
| 354 } |
| 355 BROTLI_FREE(m, new_index); |
| 356 BROTLI_FREE(m, block_lengths); |
| 357 BROTLI_FREE(m, histogram_symbols); |
| 358 } |
| 359 |
| 360 static void FN(SplitByteVector)(MemoryManager* m, |
| 361 const DataType* data, const size_t length, |
| 362 const size_t literals_per_histogram, |
| 363 const size_t max_histograms, |
| 364 const size_t sampling_stride_length, |
| 365 const double block_switch_cost, |
| 366 const BrotliEncoderParams* params, |
| 367 BlockSplit* split) { |
| 368 const size_t data_size = FN(HistogramDataSize)(); |
| 369 size_t num_histograms = length / literals_per_histogram + 1; |
| 370 HistogramType* histograms; |
| 371 if (num_histograms > max_histograms) { |
| 372 num_histograms = max_histograms; |
| 373 } |
| 374 if (length == 0) { |
402 split->num_types = 1; | 375 split->num_types = 1; |
403 return; | 376 return; |
404 } else if (data.size() < kMinLengthForBlockSplitting) { | 377 } else if (length < kMinLengthForBlockSplitting) { |
| 378 BROTLI_ENSURE_CAPACITY(m, uint8_t, |
| 379 split->types, split->types_alloc_size, split->num_blocks + 1); |
| 380 BROTLI_ENSURE_CAPACITY(m, uint32_t, |
| 381 split->lengths, split->lengths_alloc_size, split->num_blocks + 1); |
| 382 if (BROTLI_IS_OOM(m)) return; |
405 split->num_types = 1; | 383 split->num_types = 1; |
406 split->types.push_back(0); | 384 split->types[split->num_blocks] = 0; |
407 split->lengths.push_back(static_cast<uint32_t>(data.size())); | 385 split->lengths[split->num_blocks] = (uint32_t)length; |
| 386 split->num_blocks++; |
408 return; | 387 return; |
409 } | 388 } |
410 size_t num_histograms = data.size() / literals_per_histogram + 1; | 389 histograms = BROTLI_ALLOC(m, HistogramType, num_histograms); |
411 if (num_histograms > max_histograms) { | 390 if (BROTLI_IS_OOM(m)) return; |
412 num_histograms = max_histograms; | 391 /* Find good entropy codes. */ |
413 } | 392 FN(InitialEntropyCodes)(data, length, |
414 Histogram<kSize>* histograms = new Histogram<kSize>[num_histograms]; | 393 sampling_stride_length, |
415 // Find good entropy codes. | 394 num_histograms, histograms); |
416 InitialEntropyCodes(&data[0], data.size(), | 395 FN(RefineEntropyCodes)(data, length, |
417 sampling_stride_length, | 396 sampling_stride_length, |
418 num_histograms, histograms); | |
419 RefineEntropyCodes(&data[0], data.size(), | |
420 sampling_stride_length, | |
421 num_histograms, histograms); | |
422 // Find a good path through literals with the good entropy codes. | |
423 std::vector<uint8_t> block_ids(data.size()); | |
424 size_t num_blocks; | |
425 const size_t bitmaplen = (num_histograms + 7) >> 3; | |
426 double* insert_cost = new double[kSize * num_histograms]; | |
427 double *cost = new double[num_histograms]; | |
428 uint8_t* switch_signal = new uint8_t[data.size() * bitmaplen]; | |
429 uint16_t* new_id = new uint16_t[num_histograms]; | |
430 for (size_t i = 0; i < 10; ++i) { | |
431 num_blocks = FindBlocks(&data[0], data.size(), | |
432 block_switch_cost, | |
433 num_histograms, histograms, | |
434 insert_cost, cost, switch_signal, | |
435 &block_ids[0]); | |
436 num_histograms = RemapBlockIds(&block_ids[0], data.size(), | |
437 new_id, num_histograms); | |
438 BuildBlockHistograms(&data[0], data.size(), &block_ids[0], | |
439 num_histograms, histograms); | 397 num_histograms, histograms); |
440 } | |
441 delete[] insert_cost; | |
442 delete[] cost; | |
443 delete[] switch_signal; | |
444 delete[] new_id; | |
445 delete[] histograms; | |
446 ClusterBlocks<Histogram<kSize> >(&data[0], data.size(), num_blocks, | |
447 &block_ids[0], split); | |
448 } | |
449 | |
450 void SplitBlock(const Command* cmds, | |
451 const size_t num_commands, | |
452 const uint8_t* data, | |
453 const size_t pos, | |
454 const size_t mask, | |
455 BlockSplit* literal_split, | |
456 BlockSplit* insert_and_copy_split, | |
457 BlockSplit* dist_split) { | |
458 { | 398 { |
459 // Create a continuous array of literals. | 399 /* Find a good path through literals with the good entropy codes. */ |
460 std::vector<uint8_t> literals; | 400 uint8_t* block_ids = BROTLI_ALLOC(m, uint8_t, length); |
461 CopyLiteralsToByteArray(cmds, num_commands, data, pos, mask, &literals); | 401 size_t num_blocks = 0; |
462 // Create the block split on the array of literals. | 402 const size_t bitmaplen = (num_histograms + 7) >> 3; |
463 // Literal histograms have alphabet size 256. | 403 double* insert_cost = BROTLI_ALLOC(m, double, data_size * num_histograms); |
464 SplitByteVector<256>( | 404 double* cost = BROTLI_ALLOC(m, double, num_histograms); |
465 literals, | 405 uint8_t* switch_signal = BROTLI_ALLOC(m, uint8_t, length * bitmaplen); |
466 kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, | 406 uint16_t* new_id = BROTLI_ALLOC(m, uint16_t, num_histograms); |
467 kLiteralStrideLength, kLiteralBlockSwitchCost, | 407 const size_t iters = params->quality < HQ_ZOPFLIFICATION_QUALITY ? 3 : 10; |
468 literal_split); | 408 size_t i; |
469 } | 409 if (BROTLI_IS_OOM(m)) return; |
470 | 410 for (i = 0; i < iters; ++i) { |
471 { | 411 num_blocks = FN(FindBlocks)(data, length, |
472 // Compute prefix codes for commands. | 412 block_switch_cost, |
473 std::vector<uint16_t> insert_and_copy_codes(num_commands); | 413 num_histograms, histograms, |
474 for (size_t i = 0; i < num_commands; ++i) { | 414 insert_cost, cost, switch_signal, |
475 insert_and_copy_codes[i] = cmds[i].cmd_prefix_; | 415 block_ids); |
476 } | 416 num_histograms = FN(RemapBlockIds)(block_ids, length, |
477 // Create the block split on the array of command prefixes. | 417 new_id, num_histograms); |
478 SplitByteVector<kNumCommandPrefixes>( | 418 FN(BuildBlockHistograms)(data, length, block_ids, |
479 insert_and_copy_codes, | 419 num_histograms, histograms); |
480 kSymbolsPerCommandHistogram, kMaxCommandHistograms, | 420 } |
481 kCommandStrideLength, kCommandBlockSwitchCost, | 421 BROTLI_FREE(m, insert_cost); |
482 insert_and_copy_split); | 422 BROTLI_FREE(m, cost); |
483 } | 423 BROTLI_FREE(m, switch_signal); |
484 | 424 BROTLI_FREE(m, new_id); |
485 { | 425 BROTLI_FREE(m, histograms); |
486 // Create a continuous array of distance prefixes. | 426 FN(ClusterBlocks)(m, data, length, num_blocks, block_ids, split); |
487 std::vector<uint16_t> distance_prefixes(num_commands); | 427 if (BROTLI_IS_OOM(m)) return; |
488 size_t pos = 0; | 428 BROTLI_FREE(m, block_ids); |
489 for (size_t i = 0; i < num_commands; ++i) { | 429 } |
490 const Command& cmd = cmds[i]; | 430 } |
491 if (cmd.copy_len() && cmd.cmd_prefix_ >= 128) { | 431 |
492 distance_prefixes[pos++] = cmd.dist_prefix_; | 432 #undef HistogramType |
493 } | |
494 } | |
495 distance_prefixes.resize(pos); | |
496 // Create the block split on the array of distance prefixes. | |
497 SplitByteVector<kNumDistancePrefixes>( | |
498 distance_prefixes, | |
499 kSymbolsPerDistanceHistogram, kMaxCommandHistograms, | |
500 kCommandStrideLength, kDistanceBlockSwitchCost, | |
501 dist_split); | |
502 } | |
503 } | |
504 | |
505 } // namespace brotli | |
OLD | NEW |