Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: third_party/WebKit/Source/wtf/FlatTable.h

Issue 2396533004: Introduce a FlatMap and FlatSet into WTF (Closed)
Patch Set: Add missing ostream override Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef WTF_FlatTable_h
6 #define WTF_FlatTable_h
7
8 #include "base/logging.h"
9 #include <vector>
10
11 namespace WTF {
12
13 // Iterator customization.
14 template <typename Delegate>
15 struct ForwardIteratorDelegate : public Delegate {
16 using FlatTableDelegate = Delegate;
17 static constexpr bool is_const = false;
18 static inline size_t next(size_t index) { return index + 1; }
19 static inline size_t prev(size_t index) { return index - 1; }
20 };
21
22 template <typename Delegate>
23 struct ConstForwardIteratorDelegate : public Delegate {
24 using FlatTableDelegate = Delegate;
25 static constexpr bool is_const = true;
26 static inline size_t next(size_t index) { return index + 1; }
27 static inline size_t prev(size_t index) { return index - 1; }
28 };
29
30 template <typename Delegate>
31 struct ReverseIteratorDelegate : public Delegate {
32 using FlatTableDelegate = Delegate;
33 static constexpr bool is_const = false;
34 static inline size_t next(size_t index) { return index - 1; }
35 static inline size_t prev(size_t index) { return index + 1; }
36 };
37
38 template <typename Delegate>
39 struct ConstReverseIteratorDelegate : public Delegate {
40 using FlatTableDelegate = Delegate;
41 static constexpr bool is_const = true;
42 static inline size_t next(size_t index) { return index - 1; }
43 static inline size_t prev(size_t index) { return index + 1; }
44 };
45
46 // A FlatTable containing common code for FlatMap and FlatSet. Typically it
47 // shouldn't be used directly.
48 //
49 // FlatTable is an associative container where the elements are held in a ring
50 // buffer. This means the worst case for insert / erase moves O(n/2) elements,
51 // unless the ring buffer needs to grow (infrequent), in which case it's O(n).
52 template <typename Delegate>
53 class FlatTable {
54 public:
55 FlatTable() : ring_buffer_(4), front_(0), back_(0), search_step_size_(1) {
56 ComputeModulusMask();
57 }
58
59 using key_type = typename Delegate::key_type;
60 using value_type = typename Delegate::value_type;
61
62 bool empty() const { return front_ == back_; }
63
64 void clear() {
65 ring_buffer_.clear();
66 ring_buffer_.resize(4);
67 front_ = 0;
68 back_ = 0;
69 search_step_size_ = 1;
70 ComputeModulusMask();
71 }
72
73 size_t size() const { return RingDistance(front_, back_); }
74
75 template <typename IteratorDelegate>
76 class FlatTableIterator {
77 public:
78 using ValueType =
79 typename std::conditional<IteratorDelegate::is_const,
80 const typename IteratorDelegate::value_type,
81 typename IteratorDelegate::value_type>::type;
82 using FlatTableDelegate = typename IteratorDelegate::FlatTableDelegate;
83 using FlatTableType =
84 typename std::conditional<IteratorDelegate::is_const,
85 const FlatTable<FlatTableDelegate>,
86 FlatTable<FlatTableDelegate>>::type;
87
88 FlatTableIterator& operator++() {
89 pos_ = IteratorDelegate::next(pos_) & flatTable_->modulus_mask_;
90 return *this;
91 }
92
93 FlatTableIterator& operator--() {
94 pos_ = IteratorDelegate::prev(pos_) & flatTable_->modulus_mask_;
95 return *this;
96 }
97
98 FlatTableIterator operator++(int /*unused*/) {
99 FlatTableIterator result(*this);
100 pos_ = IteratorDelegate::next(pos_) & flatTable_->modulus_mask_;
101 return result;
102 }
103
104 FlatTableIterator operator--(int /*unused*/) {
105 FlatTableIterator result(*this);
106 pos_ = IteratorDelegate::prev(pos_) & flatTable_->modulus_mask;
107 return result;
108 }
109
110 ValueType* operator->() const {
111 DCHECK(flatTable_->ValidIndex(pos_)) << "pos_ = " << pos_;
112 #ifndef NDEBUG
113 DCHECK_EQ(iterator_generation_, flatTable_->iterator_generation_);
114 #endif
115 return &flatTable_->ring_buffer_[pos_];
116 }
117
118 ValueType& operator*() const {
119 DCHECK(flatTable_->ValidIndex(pos_)) << "pos_ = " << pos_;
120 #ifndef NDEBUG
121 DCHECK_EQ(iterator_generation_, flatTable_->iterator_generation_);
122 #endif
123 return flatTable_->ring_buffer_[pos_];
124 }
125
126 template <typename OtherT>
127 bool operator==(const OtherT& other) const {
128 return flatTable_ == other.flatTable_ && pos_ == other.pos_;
129 }
130
131 template <typename OtherT>
132 bool operator!=(const OtherT& other) const {
133 return flatTable_ != other.flatTable_ || pos_ != other.pos_;
134 }
135
136 private:
137 friend class FlatTable<FlatTableDelegate>;
138
139 FlatTableIterator(size_t pos, FlatTableType* flatTable)
140 : pos_(pos), flatTable_(flatTable) {
141 DCHECK(flatTable_);
142 #ifndef NDEBUG
143 iterator_generation_ = flatTable_->iterator_generation_;
144 #endif
145 }
146
147 size_t pos_;
148 FlatTableType* flatTable_;
149
150 #ifndef NDEBUG
151 size_t iterator_generation_;
152 #endif
153 };
154
155 using iterator = FlatTableIterator<ForwardIteratorDelegate<Delegate>>;
156 using const_iterator =
157 FlatTableIterator<ConstForwardIteratorDelegate<Delegate>>;
158 using reverse_iterator = FlatTableIterator<ReverseIteratorDelegate<Delegate>>;
159 using const_reverse_iterator =
160 FlatTableIterator<ConstReverseIteratorDelegate<Delegate>>;
161
162 // Inserts |value| into the map, invalidating any iterators.
163 // Returns a std::pair containing an iterator pointing to the inserted |value|
164 // and a boolean which is true if a new entry was inserted or false if an
165 // existing entry was overwritten.
166 // O(log n + n / 2)
167 std::pair<iterator, bool> insert(value_type value) {
168 if (empty()) {
169 ring_buffer_[back_] = std::move(value);
170 back_ = RingNext(back_);
171 search_step_size_ = 1;
172 #ifndef NDEBUG
173 InvalidateIterators();
174 #endif
175 return std::make_pair(iterator(front_, this), true);
176 }
177
178 size_t index;
179 if (BinarySearch(Delegate::ToKey(value), &index)) {
180 // Replace existing value.
181 ring_buffer_[index] = std::move(value);
182 return std::make_pair(iterator(index, this), false);
183 }
184
185 return std::make_pair(iterator(InsertUniqueImpl(std::move(value)), this),
186 true);
187 }
188
189 // Inserts |value| into the map, whose key must be unique, invalidating any
190 // iterators. O(n / 2)
191 void insertUnique(value_type value) {
192 if (empty()) {
193 ring_buffer_[back_] = std::move(value);
194 back_ = RingNext(back_);
195 search_step_size_ = 1;
196
197 #ifndef NDEBUG
198 InvalidateIterators();
199 #endif
200 return;
201 }
202
203 InsertUniqueImpl(value);
204 }
205
206 // Erases an entry corresponding to |key|, if any, from the map.
207 // Invalidates any iterators. O(log n + n / 2)
208 size_t erase(const key_type& key) {
209 if (empty())
210 return 0;
211
212 size_t keyIndex;
213 if (!BinarySearch(key, &keyIndex))
214 return 0;
215
216 erase(iterator(keyIndex, this));
217 return 1u;
218 }
219
220 // Erases |it| from the map, invalidating any iterators. O(n / 2)
221 template <typename Iterator>
222 void erase(const Iterator& it) {
223 DCHECK_EQ(it.flatTable_, this);
224 #ifndef NDEBUG
225 DCHECK_EQ(iterator_generation_, it.iterator_generation_);
226 key_type key = Delegate::ToKey(ring_buffer_[it.pos_]);
227 #endif
228 if (it.pos_ == back_)
229 return;
230
231 DCHECK(ValidIndex(it.pos_)) << "it.pos_ = " << it.pos_;
232 size_t eraseFrontCost = RingDistance(front_, it.pos_);
233 size_t eraseBackCost = RingDistance(it.pos_, back_);
234
235 if (eraseFrontCost >= eraseBackCost) {
236 EraseByMovingBack(it.pos_);
237 } else {
238 EraseByMovingFront(it.pos_);
239 }
240
241 if (search_step_size_ > 1 && size() <= search_step_size_)
242 search_step_size_ /= 2;
243
244 #ifndef NDEBUG
245 DCHECK(find(key) == end());
246 InvalidateIterators();
247 #endif
248 }
249
250 // O(log n)
251 iterator find(const key_type& key) {
252 size_t keyIndex;
253 if (!BinarySearch(key, &keyIndex))
254 return end();
255
256 return iterator(keyIndex, this);
257 }
258
259 // O(log n)
260 const_iterator find(const key_type& key) const {
261 size_t keyIndex;
262 if (!BinarySearch(key, &keyIndex))
263 return end();
264
265 return iterator(keyIndex, this);
266 }
267
268 iterator begin() { return iterator(front_, this); }
269 iterator end() { return iterator(back_, this); }
270 const_iterator begin() const { return const_iterator(front_, this); }
271 const_iterator end() const { return const_iterator(back_, this); }
272
273 reverse_iterator rbegin() { return reverse_iterator(RingPrev(back_), this); }
274 const_reverse_iterator rbegin() const {
275 return const_reverse_iterator(RingPrev(back_), this);
276 }
277 reverse_iterator rend() { return reverse_iterator(RingPrev(front_), this); }
278 const_reverse_iterator rend() const {
279 return const_reverse_iterator(RingPrev(front_), this);
280 }
281
282 protected:
283 template <typename Iterator>
284 size_t IteratorPosition(const Iterator& it) const {
285 DCHECK_EQ(this, it.flatTable_);
286 #ifndef NDEBUG
287 DCHECK_EQ(iterator_generation_, it.iterator_generation_);
288 #endif
289 return it.pos_;
290 }
291
292 size_t InsertUniqueImpl(value_type value) {
293 // Grow the vector if needed.
294 if (RingNext(back_) == front_)
295 Grow();
296
297 // Shuffle Elements to make way for |value|.
298 size_t mid = (front_ + (size() / 2)) & modulus_mask_;
299 DCHECK(ValidIndex(mid)) << "mid = " << mid;
300 size_t i;
301 if (value < ring_buffer_[mid]) {
302 front_ = RingPrev(front_);
303 for (i = front_; ring_buffer_[RingNext(i)] < value; i = RingNext(i)) {
304 ring_buffer_[i] = std::move(ring_buffer_[RingNext(i)]);
305 }
306 } else {
307 DCHECK_LT(Delegate::ToKey(ring_buffer_[mid]), Delegate::ToKey(value));
308 for (i = back_; value < ring_buffer_[RingPrev(i)]; i = RingPrev(i)) {
309 ring_buffer_[i] = std::move(ring_buffer_[RingPrev(i)]);
310 }
311 back_ = RingNext(back_);
312 }
313
314 DCHECK(i == front_ || i == RingPrev(back_) || ring_buffer_[i] != value)
315 << "expected a unique key " << Delegate::ToKey(value);
316 ring_buffer_[i] = std::move(value);
317
318 if ((size() / 2) >= search_step_size_)
319 search_step_size_ *= 2;
320
321 #ifndef NDEBUG
322 InvalidateIterators();
323 #endif
324 return i;
325 }
326
327 void Grow() {
328 std::vector<value_type> new_buffer(ring_buffer_.size() * 2);
329 size_t old_back = back_;
330 back_ = 0;
331 for (size_t i = front_; i != old_back; i = RingNext(i)) {
332 new_buffer[back_++] = std::move(ring_buffer_[i]);
333 }
334 front_ = 0;
335 std::swap(ring_buffer_, new_buffer);
336 ComputeModulusMask();
337 }
338
339 bool BinarySearch(const key_type& key, size_t* index) const {
340 size_t low = front_;
341 for (size_t step_size = search_step_size_; step_size; step_size /= 2) {
342 size_t mid = low + step_size;
343 if (mid >= ContigiousBack())
344 continue;
345 if (!(key < Delegate::ToKey(ring_buffer_[mid & modulus_mask_]))) {
346 low = mid;
347 }
348 }
349 low &= modulus_mask_;
350 *index = low;
351 return key == Delegate::ToKey(ring_buffer_[low]);
352 }
353
354 void EraseByMovingFront(size_t index_to_erase) {
355 DCHECK(ValidIndex(index_to_erase)) << "index_to_erase = " << index_to_erase;
356 if (index_to_erase == front_) {
357 ring_buffer_[index_to_erase] = value_type();
358 }
359 for (size_t i = index_to_erase; i != front_; i = RingPrev(i)) {
360 ring_buffer_[i] = std::move(ring_buffer_[RingPrev(i)]);
361 }
362 front_ = RingNext(front_);
363 }
364
365 void EraseByMovingBack(size_t index_to_erase) {
366 DCHECK(ValidIndex(index_to_erase)) << "index_to_erase = " << index_to_erase;
367 size_t old_back = back_;
368 back_ = RingPrev(back_);
369 if (index_to_erase == back_) {
370 ring_buffer_[index_to_erase] = value_type();
371 }
372
373 for (size_t i = index_to_erase; i != old_back; i = RingNext(i)) {
374 ring_buffer_[i] = std::move(ring_buffer_[RingNext(i)]);
375 }
376 }
377
378 size_t RingPrev(size_t index) const { return (index - 1) & modulus_mask_; }
379
380 size_t RingNext(size_t index) const { return (index + 1) & modulus_mask_; }
381
382 // Returns true if |index| is within the active section of the ring buffer.
383 bool ValidIndex(size_t index) const {
384 if (back_ > front_)
385 return index >= front_ && index < back_;
386 return index <= back_ || (index >= front_ && index < ring_buffer_.size());
387 }
388
389 void ComputeModulusMask() {
390 DCHECK_EQ(0u, (ring_buffer_.size() & (ring_buffer_.size() - 1)))
391 << "ring_buffer_.size() must be a power of two.";
392
393 modulus_mask_ = ring_buffer_.size() - 1;
394 }
395
396 size_t ContigiousBack() const { return front_ + size(); }
397
398 // Computes how many positions |later_in_ring| is ahead of |earlier_in_ring|.
399 size_t RingDistance(size_t earlier_in_ring, size_t later_in_ring) const {
400 return (later_in_ring - earlier_in_ring) & modulus_mask_;
401 }
402
403 #ifndef NDEBUG
404 void InvalidateIterators() { iterator_generation_++; }
405 #endif
406
407 // The ring buffer is always a power of two in size, because computing
408 // modulus for powers of two is super fast.
409 std::vector<value_type> ring_buffer_;
410
411 // Since |ring_buffer_| is a power of two in size, we can compute the modulus
412 // using a bitwise and with |modulus_mask_|.
413 size_t modulus_mask_;
414
415 // The index of the first element.
416 size_t front_;
417
418 // The index of the last element + 1.
419 size_t back_;
420
421 // Equivalent to 2 ^ ceil(log2(size()) - 1).
422 size_t search_step_size_;
423
424 #ifndef NDEBUG
425 // Used to check for stale iterators.
426 size_t iterator_generation_ = 0;
427 #endif
428 };
429
430 } // namespace WTF
431
432 #endif // WTF_FlatTable_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/FlatSetTest.cpp ('k') | third_party/WebKit/Source/wtf/SetPerfTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698