OLD | NEW |
(Empty) | |
| 1 /* NOLINT(build/header_guard) */ |
| 2 /* Copyright 2016 Google Inc. All Rights Reserved. |
| 3 |
| 4 Distributed under MIT license. |
| 5 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
| 6 */ |
| 7 |
| 8 /* template parameters: FN, BUCKET_BITS, NUM_BANKS, BANK_BITS, |
| 9 NUM_LAST_DISTANCES_TO_CHECK */ |
| 10 |
| 11 /* A (forgetful) hash table to the data seen by the compressor, to |
| 12 help create backward references to previous data. |
| 13 |
| 14 Hashes are stored in chains which are bucketed to groups. Group of chains |
| 15 share a storage "bank". When more than "bank size" chain nodes are added, |
| 16 oldest nodes are replaced; this way several chains may share a tail. */ |
| 17 |
| 18 #define HashForgetfulChain HASHER() |
| 19 |
| 20 #define BANK_SIZE (1 << BANK_BITS) |
| 21 |
| 22 /* Number of hash buckets. */ |
| 23 #define BUCKET_SIZE (1 << BUCKET_BITS) |
| 24 |
| 25 #define CAPPED_CHAINS 0 |
| 26 |
| 27 static BROTLI_INLINE size_t FN(HashTypeLength)(void) { return 4; } |
| 28 static BROTLI_INLINE size_t FN(StoreLookahead)(void) { return 4; } |
| 29 |
| 30 /* HashBytes is the function that chooses the bucket to place the address in.*/ |
| 31 static BROTLI_INLINE size_t FN(HashBytes)(const uint8_t *data) { |
| 32 const uint32_t h = BROTLI_UNALIGNED_LOAD32(data) * kHashMul32; |
| 33 /* The higher bits contain more mixture from the multiplication, |
| 34 so we take our results from there. */ |
| 35 return h >> (32 - BUCKET_BITS); |
| 36 } |
| 37 |
| 38 typedef struct FN(Slot) { |
| 39 uint16_t delta; |
| 40 uint16_t next; |
| 41 } FN(Slot); |
| 42 |
| 43 typedef struct FN(Bank) { |
| 44 FN(Slot) slots[BANK_SIZE]; |
| 45 } FN(Bank); |
| 46 |
| 47 typedef struct HashForgetfulChain { |
| 48 uint32_t addr[BUCKET_SIZE]; |
| 49 uint16_t head[BUCKET_SIZE]; |
| 50 /* Truncated hash used for quick rejection of "distance cache" candidates. */ |
| 51 uint8_t tiny_hash[65536]; |
| 52 FN(Bank) banks[NUM_BANKS]; |
| 53 uint16_t free_slot_idx[NUM_BANKS]; |
| 54 BROTLI_BOOL is_dirty_; |
| 55 DictionarySearchStatictics dict_search_stats_; |
| 56 size_t max_hops; |
| 57 } HashForgetfulChain; |
| 58 |
| 59 static void FN(Reset)(HashForgetfulChain* self) { |
| 60 self->is_dirty_ = BROTLI_TRUE; |
| 61 DictionarySearchStaticticsReset(&self->dict_search_stats_); |
| 62 } |
| 63 |
| 64 static void FN(InitEmpty)(HashForgetfulChain* self) { |
| 65 if (self->is_dirty_) { |
| 66 /* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position |
| 67 processed by hasher never reaches 3GB + 64M; this makes all new chains |
| 68 to be terminated after the first node. */ |
| 69 memset(self->addr, 0xCC, sizeof(self->addr)); |
| 70 memset(self->head, 0, sizeof(self->head)); |
| 71 memset(self->tiny_hash, 0, sizeof(self->tiny_hash)); |
| 72 memset(self->free_slot_idx, 0, sizeof(self->free_slot_idx)); |
| 73 self->is_dirty_ = BROTLI_FALSE; |
| 74 } |
| 75 } |
| 76 |
| 77 static void FN(InitForData)(HashForgetfulChain* self, const uint8_t* data, |
| 78 size_t num) { |
| 79 size_t i; |
| 80 for (i = 0; i < num; ++i) { |
| 81 size_t bucket = FN(HashBytes)(&data[i]); |
| 82 /* See InitEmpty comment. */ |
| 83 self->addr[bucket] = 0xCCCCCCCC; |
| 84 self->head[bucket] = 0xCCCC; |
| 85 } |
| 86 memset(self->tiny_hash, 0, sizeof(self->tiny_hash)); |
| 87 memset(self->free_slot_idx, 0, sizeof(self->free_slot_idx)); |
| 88 if (num != 0) { |
| 89 self->is_dirty_ = BROTLI_FALSE; |
| 90 } |
| 91 } |
| 92 |
| 93 static void FN(Init)( |
| 94 MemoryManager* m, HashForgetfulChain* self, const uint8_t* data, |
| 95 const BrotliEncoderParams* params, size_t position, size_t bytes, |
| 96 BROTLI_BOOL is_last) { |
| 97 /* Choose which initialization method is faster. |
| 98 Init() is about 100 times faster than InitForData(). */ |
| 99 const size_t kMaxBytesForPartialHashInit = BUCKET_SIZE >> 6; |
| 100 BROTLI_UNUSED(m); |
| 101 self->max_hops = (params->quality > 6 ? 7u : 8u) << (params->quality - 4); |
| 102 if (position == 0 && is_last && bytes <= kMaxBytesForPartialHashInit) { |
| 103 FN(InitForData)(self, data, bytes); |
| 104 } else { |
| 105 FN(InitEmpty)(self); |
| 106 } |
| 107 } |
| 108 |
| 109 /* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend |
| 110 node to corresponding chain; also update tiny_hash for current position. */ |
| 111 static BROTLI_INLINE void FN(Store)(HashForgetfulChain* BROTLI_RESTRICT self, |
| 112 const uint8_t* BROTLI_RESTRICT data, const size_t mask, const size_t ix) { |
| 113 const size_t key = FN(HashBytes)(&data[ix & mask]); |
| 114 const size_t bank = key & (NUM_BANKS - 1); |
| 115 const size_t idx = self->free_slot_idx[bank]++ & (BANK_SIZE - 1); |
| 116 size_t delta = ix - self->addr[key]; |
| 117 self->tiny_hash[(uint16_t)ix] = (uint8_t)key; |
| 118 if (delta > 0xFFFF) delta = CAPPED_CHAINS ? 0 : 0xFFFF; |
| 119 self->banks[bank].slots[idx].delta = (uint16_t)delta; |
| 120 self->banks[bank].slots[idx].next = self->head[key]; |
| 121 self->addr[key] = (uint32_t)ix; |
| 122 self->head[key] = (uint16_t)idx; |
| 123 } |
| 124 |
| 125 static BROTLI_INLINE void FN(StoreRange)(HashForgetfulChain* self, |
| 126 const uint8_t *data, const size_t mask, const size_t ix_start, |
| 127 const size_t ix_end) { |
| 128 size_t i; |
| 129 for (i = ix_start; i < ix_end; ++i) { |
| 130 FN(Store)(self, data, mask, i); |
| 131 } |
| 132 } |
| 133 |
| 134 static BROTLI_INLINE void FN(StitchToPreviousBlock)(HashForgetfulChain* self, |
| 135 size_t num_bytes, size_t position, const uint8_t* ringbuffer, |
| 136 size_t ring_buffer_mask) { |
| 137 if (num_bytes >= FN(HashTypeLength)() - 1 && position >= 3) { |
| 138 /* Prepare the hashes for three last bytes of the last write. |
| 139 These could not be calculated before, since they require knowledge |
| 140 of both the previous and the current block. */ |
| 141 FN(Store)(self, ringbuffer, ring_buffer_mask, position - 3); |
| 142 FN(Store)(self, ringbuffer, ring_buffer_mask, position - 2); |
| 143 FN(Store)(self, ringbuffer, ring_buffer_mask, position - 1); |
| 144 } |
| 145 } |
| 146 |
| 147 /* Find a longest backward match of &data[cur_ix] up to the length of |
| 148 max_length and stores the position cur_ix in the hash table. |
| 149 |
| 150 Does not look for matches longer than max_length. |
| 151 Does not look for matches further away than max_backward. |
| 152 Writes the best match into |out|. |
| 153 Returns 1 when match is found, otherwise 0. */ |
| 154 static BROTLI_INLINE BROTLI_BOOL FN(FindLongestMatch)( |
| 155 HashForgetfulChain* self, const uint8_t* BROTLI_RESTRICT data, |
| 156 const size_t ring_buffer_mask, const int* BROTLI_RESTRICT distance_cache, |
| 157 const size_t cur_ix, const size_t max_length, const size_t max_backward, |
| 158 HasherSearchResult* BROTLI_RESTRICT out) { |
| 159 const size_t cur_ix_masked = cur_ix & ring_buffer_mask; |
| 160 BROTLI_BOOL is_match_found = BROTLI_FALSE; |
| 161 /* Don't accept a short copy from far away. */ |
| 162 score_t best_score = out->score; |
| 163 size_t best_len = out->len; |
| 164 size_t i; |
| 165 const size_t key = FN(HashBytes)(&data[cur_ix_masked]); |
| 166 const uint8_t tiny_hash = (uint8_t)(key); |
| 167 out->len = 0; |
| 168 out->len_x_code = 0; |
| 169 /* Try last distance first. */ |
| 170 for (i = 0; i < NUM_LAST_DISTANCES_TO_CHECK; ++i) { |
| 171 const size_t idx = kDistanceCacheIndex[i]; |
| 172 const size_t backward = |
| 173 (size_t)(distance_cache[idx] + kDistanceCacheOffset[i]); |
| 174 size_t prev_ix = (cur_ix - backward); |
| 175 /* For distance code 0 we want to consider 2-byte matches. */ |
| 176 if (i > 0 && self->tiny_hash[(uint16_t)prev_ix] != tiny_hash) continue; |
| 177 if (prev_ix >= cur_ix || backward > max_backward) { |
| 178 continue; |
| 179 } |
| 180 prev_ix &= ring_buffer_mask; |
| 181 { |
| 182 const size_t len = FindMatchLengthWithLimit(&data[prev_ix], |
| 183 &data[cur_ix_masked], |
| 184 max_length); |
| 185 if (len >= 2) { |
| 186 score_t score = BackwardReferenceScoreUsingLastDistance(len, i); |
| 187 if (best_score < score) { |
| 188 best_score = score; |
| 189 best_len = len; |
| 190 out->len = best_len; |
| 191 out->distance = backward; |
| 192 out->score = best_score; |
| 193 is_match_found = BROTLI_TRUE; |
| 194 } |
| 195 } |
| 196 } |
| 197 } |
| 198 { |
| 199 const size_t bank = key & (NUM_BANKS - 1); |
| 200 size_t backward = 0; |
| 201 size_t hops = self->max_hops; |
| 202 size_t delta = cur_ix - self->addr[key]; |
| 203 size_t slot = self->head[key]; |
| 204 while (hops--) { |
| 205 size_t prev_ix; |
| 206 size_t last = slot; |
| 207 backward += delta; |
| 208 if (backward > max_backward || (CAPPED_CHAINS && !delta)) break; |
| 209 prev_ix = (cur_ix - backward) & ring_buffer_mask; |
| 210 slot = self->banks[bank].slots[last].next; |
| 211 delta = self->banks[bank].slots[last].delta; |
| 212 if (cur_ix_masked + best_len > ring_buffer_mask || |
| 213 prev_ix + best_len > ring_buffer_mask || |
| 214 data[cur_ix_masked + best_len] != data[prev_ix + best_len]) { |
| 215 continue; |
| 216 } |
| 217 { |
| 218 const size_t len = FindMatchLengthWithLimit(&data[prev_ix], |
| 219 &data[cur_ix_masked], |
| 220 max_length); |
| 221 if (len >= 4) { |
| 222 /* Comparing for >= 3 does not change the semantics, but just saves |
| 223 for a few unnecessary binary logarithms in backward reference |
| 224 score, since we are not interested in such short matches. */ |
| 225 score_t score = BackwardReferenceScore(len, backward); |
| 226 if (best_score < score) { |
| 227 best_score = score; |
| 228 best_len = len; |
| 229 out->len = best_len; |
| 230 out->distance = backward; |
| 231 out->score = best_score; |
| 232 is_match_found = BROTLI_TRUE; |
| 233 } |
| 234 } |
| 235 } |
| 236 } |
| 237 FN(Store)(self, data, ring_buffer_mask, cur_ix); |
| 238 } |
| 239 if (!is_match_found) { |
| 240 is_match_found = SearchInStaticDictionary(&self->dict_search_stats_, |
| 241 &data[cur_ix_masked], max_length, max_backward, out, BROTLI_FALSE); |
| 242 } |
| 243 return is_match_found; |
| 244 } |
| 245 |
| 246 #undef BANK_SIZE |
| 247 #undef BUCKET_SIZE |
| 248 #undef CAPPED_CHAINS |
| 249 |
| 250 #undef HashForgetfulChain |
OLD | NEW |