Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(80)

Unified Diff: third_party/WebKit/Source/wtf/FlatTable.h

Issue 2396533004: Introduce a FlatMap and FlatSet into WTF (Closed)
Patch Set: Add missing ostream override Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/WebKit/Source/wtf/FlatSetTest.cpp ('k') | third_party/WebKit/Source/wtf/SetPerfTest.cpp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/WebKit/Source/wtf/FlatTable.h
diff --git a/third_party/WebKit/Source/wtf/FlatTable.h b/third_party/WebKit/Source/wtf/FlatTable.h
new file mode 100644
index 0000000000000000000000000000000000000000..985d4aaebb39cb306ecbba2e94eb5d7ed0235a75
--- /dev/null
+++ b/third_party/WebKit/Source/wtf/FlatTable.h
@@ -0,0 +1,432 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WTF_FlatTable_h
+#define WTF_FlatTable_h
+
+#include "base/logging.h"
+#include <vector>
+
+namespace WTF {
+
+// Iterator customization.
+template <typename Delegate>
+struct ForwardIteratorDelegate : public Delegate {
+ using FlatTableDelegate = Delegate;
+ static constexpr bool is_const = false;
+ static inline size_t next(size_t index) { return index + 1; }
+ static inline size_t prev(size_t index) { return index - 1; }
+};
+
+template <typename Delegate>
+struct ConstForwardIteratorDelegate : public Delegate {
+ using FlatTableDelegate = Delegate;
+ static constexpr bool is_const = true;
+ static inline size_t next(size_t index) { return index + 1; }
+ static inline size_t prev(size_t index) { return index - 1; }
+};
+
+template <typename Delegate>
+struct ReverseIteratorDelegate : public Delegate {
+ using FlatTableDelegate = Delegate;
+ static constexpr bool is_const = false;
+ static inline size_t next(size_t index) { return index - 1; }
+ static inline size_t prev(size_t index) { return index + 1; }
+};
+
+template <typename Delegate>
+struct ConstReverseIteratorDelegate : public Delegate {
+ using FlatTableDelegate = Delegate;
+ static constexpr bool is_const = true;
+ static inline size_t next(size_t index) { return index - 1; }
+ static inline size_t prev(size_t index) { return index + 1; }
+};
+
+// A FlatTable containing common code for FlatMap and FlatSet. Typically it
+// shouldn't be used directly.
+//
+// FlatTable is an associative container where the elements are held in a ring
+// buffer. This means the worst case for insert / erase moves O(n/2) elements,
+// unless the ring buffer needs to grow (infrequent), in which case it's O(n).
+template <typename Delegate>
+class FlatTable {
+ public:
+ FlatTable() : ring_buffer_(4), front_(0), back_(0), search_step_size_(1) {
+ ComputeModulusMask();
+ }
+
+ using key_type = typename Delegate::key_type;
+ using value_type = typename Delegate::value_type;
+
+ bool empty() const { return front_ == back_; }
+
+ void clear() {
+ ring_buffer_.clear();
+ ring_buffer_.resize(4);
+ front_ = 0;
+ back_ = 0;
+ search_step_size_ = 1;
+ ComputeModulusMask();
+ }
+
+ size_t size() const { return RingDistance(front_, back_); }
+
+ template <typename IteratorDelegate>
+ class FlatTableIterator {
+ public:
+ using ValueType =
+ typename std::conditional<IteratorDelegate::is_const,
+ const typename IteratorDelegate::value_type,
+ typename IteratorDelegate::value_type>::type;
+ using FlatTableDelegate = typename IteratorDelegate::FlatTableDelegate;
+ using FlatTableType =
+ typename std::conditional<IteratorDelegate::is_const,
+ const FlatTable<FlatTableDelegate>,
+ FlatTable<FlatTableDelegate>>::type;
+
+ FlatTableIterator& operator++() {
+ pos_ = IteratorDelegate::next(pos_) & flatTable_->modulus_mask_;
+ return *this;
+ }
+
+ FlatTableIterator& operator--() {
+ pos_ = IteratorDelegate::prev(pos_) & flatTable_->modulus_mask_;
+ return *this;
+ }
+
+ FlatTableIterator operator++(int /*unused*/) {
+ FlatTableIterator result(*this);
+ pos_ = IteratorDelegate::next(pos_) & flatTable_->modulus_mask_;
+ return result;
+ }
+
+ FlatTableIterator operator--(int /*unused*/) {
+ FlatTableIterator result(*this);
+ pos_ = IteratorDelegate::prev(pos_) & flatTable_->modulus_mask;
+ return result;
+ }
+
+ ValueType* operator->() const {
+ DCHECK(flatTable_->ValidIndex(pos_)) << "pos_ = " << pos_;
+#ifndef NDEBUG
+ DCHECK_EQ(iterator_generation_, flatTable_->iterator_generation_);
+#endif
+ return &flatTable_->ring_buffer_[pos_];
+ }
+
+ ValueType& operator*() const {
+ DCHECK(flatTable_->ValidIndex(pos_)) << "pos_ = " << pos_;
+#ifndef NDEBUG
+ DCHECK_EQ(iterator_generation_, flatTable_->iterator_generation_);
+#endif
+ return flatTable_->ring_buffer_[pos_];
+ }
+
+ template <typename OtherT>
+ bool operator==(const OtherT& other) const {
+ return flatTable_ == other.flatTable_ && pos_ == other.pos_;
+ }
+
+ template <typename OtherT>
+ bool operator!=(const OtherT& other) const {
+ return flatTable_ != other.flatTable_ || pos_ != other.pos_;
+ }
+
+ private:
+ friend class FlatTable<FlatTableDelegate>;
+
+ FlatTableIterator(size_t pos, FlatTableType* flatTable)
+ : pos_(pos), flatTable_(flatTable) {
+ DCHECK(flatTable_);
+#ifndef NDEBUG
+ iterator_generation_ = flatTable_->iterator_generation_;
+#endif
+ }
+
+ size_t pos_;
+ FlatTableType* flatTable_;
+
+#ifndef NDEBUG
+ size_t iterator_generation_;
+#endif
+ };
+
+ using iterator = FlatTableIterator<ForwardIteratorDelegate<Delegate>>;
+ using const_iterator =
+ FlatTableIterator<ConstForwardIteratorDelegate<Delegate>>;
+ using reverse_iterator = FlatTableIterator<ReverseIteratorDelegate<Delegate>>;
+ using const_reverse_iterator =
+ FlatTableIterator<ConstReverseIteratorDelegate<Delegate>>;
+
+ // Inserts |value| into the map, invalidating any iterators.
+ // Returns a std::pair containing an iterator pointing to the inserted |value|
+ // and a boolean which is true if a new entry was inserted or false if an
+ // existing entry was overwritten.
+ // O(log n + n / 2)
+ std::pair<iterator, bool> insert(value_type value) {
+ if (empty()) {
+ ring_buffer_[back_] = std::move(value);
+ back_ = RingNext(back_);
+ search_step_size_ = 1;
+#ifndef NDEBUG
+ InvalidateIterators();
+#endif
+ return std::make_pair(iterator(front_, this), true);
+ }
+
+ size_t index;
+ if (BinarySearch(Delegate::ToKey(value), &index)) {
+ // Replace existing value.
+ ring_buffer_[index] = std::move(value);
+ return std::make_pair(iterator(index, this), false);
+ }
+
+ return std::make_pair(iterator(InsertUniqueImpl(std::move(value)), this),
+ true);
+ }
+
+ // Inserts |value| into the map, whose key must be unique, invalidating any
+ // iterators. O(n / 2)
+ void insertUnique(value_type value) {
+ if (empty()) {
+ ring_buffer_[back_] = std::move(value);
+ back_ = RingNext(back_);
+ search_step_size_ = 1;
+
+#ifndef NDEBUG
+ InvalidateIterators();
+#endif
+ return;
+ }
+
+ InsertUniqueImpl(value);
+ }
+
+ // Erases an entry corresponding to |key|, if any, from the map.
+ // Invalidates any iterators. O(log n + n / 2)
+ size_t erase(const key_type& key) {
+ if (empty())
+ return 0;
+
+ size_t keyIndex;
+ if (!BinarySearch(key, &keyIndex))
+ return 0;
+
+ erase(iterator(keyIndex, this));
+ return 1u;
+ }
+
+ // Erases |it| from the map, invalidating any iterators. O(n / 2)
+ template <typename Iterator>
+ void erase(const Iterator& it) {
+ DCHECK_EQ(it.flatTable_, this);
+#ifndef NDEBUG
+ DCHECK_EQ(iterator_generation_, it.iterator_generation_);
+ key_type key = Delegate::ToKey(ring_buffer_[it.pos_]);
+#endif
+ if (it.pos_ == back_)
+ return;
+
+ DCHECK(ValidIndex(it.pos_)) << "it.pos_ = " << it.pos_;
+ size_t eraseFrontCost = RingDistance(front_, it.pos_);
+ size_t eraseBackCost = RingDistance(it.pos_, back_);
+
+ if (eraseFrontCost >= eraseBackCost) {
+ EraseByMovingBack(it.pos_);
+ } else {
+ EraseByMovingFront(it.pos_);
+ }
+
+ if (search_step_size_ > 1 && size() <= search_step_size_)
+ search_step_size_ /= 2;
+
+#ifndef NDEBUG
+ DCHECK(find(key) == end());
+ InvalidateIterators();
+#endif
+ }
+
+ // O(log n)
+ iterator find(const key_type& key) {
+ size_t keyIndex;
+ if (!BinarySearch(key, &keyIndex))
+ return end();
+
+ return iterator(keyIndex, this);
+ }
+
+ // O(log n)
+ const_iterator find(const key_type& key) const {
+ size_t keyIndex;
+ if (!BinarySearch(key, &keyIndex))
+ return end();
+
+ return iterator(keyIndex, this);
+ }
+
+ iterator begin() { return iterator(front_, this); }
+ iterator end() { return iterator(back_, this); }
+ const_iterator begin() const { return const_iterator(front_, this); }
+ const_iterator end() const { return const_iterator(back_, this); }
+
+ reverse_iterator rbegin() { return reverse_iterator(RingPrev(back_), this); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(RingPrev(back_), this);
+ }
+ reverse_iterator rend() { return reverse_iterator(RingPrev(front_), this); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(RingPrev(front_), this);
+ }
+
+ protected:
+ template <typename Iterator>
+ size_t IteratorPosition(const Iterator& it) const {
+ DCHECK_EQ(this, it.flatTable_);
+#ifndef NDEBUG
+ DCHECK_EQ(iterator_generation_, it.iterator_generation_);
+#endif
+ return it.pos_;
+ }
+
+ size_t InsertUniqueImpl(value_type value) {
+ // Grow the vector if needed.
+ if (RingNext(back_) == front_)
+ Grow();
+
+ // Shuffle Elements to make way for |value|.
+ size_t mid = (front_ + (size() / 2)) & modulus_mask_;
+ DCHECK(ValidIndex(mid)) << "mid = " << mid;
+ size_t i;
+ if (value < ring_buffer_[mid]) {
+ front_ = RingPrev(front_);
+ for (i = front_; ring_buffer_[RingNext(i)] < value; i = RingNext(i)) {
+ ring_buffer_[i] = std::move(ring_buffer_[RingNext(i)]);
+ }
+ } else {
+ DCHECK_LT(Delegate::ToKey(ring_buffer_[mid]), Delegate::ToKey(value));
+ for (i = back_; value < ring_buffer_[RingPrev(i)]; i = RingPrev(i)) {
+ ring_buffer_[i] = std::move(ring_buffer_[RingPrev(i)]);
+ }
+ back_ = RingNext(back_);
+ }
+
+ DCHECK(i == front_ || i == RingPrev(back_) || ring_buffer_[i] != value)
+ << "expected a unique key " << Delegate::ToKey(value);
+ ring_buffer_[i] = std::move(value);
+
+ if ((size() / 2) >= search_step_size_)
+ search_step_size_ *= 2;
+
+#ifndef NDEBUG
+ InvalidateIterators();
+#endif
+ return i;
+ }
+
+ void Grow() {
+ std::vector<value_type> new_buffer(ring_buffer_.size() * 2);
+ size_t old_back = back_;
+ back_ = 0;
+ for (size_t i = front_; i != old_back; i = RingNext(i)) {
+ new_buffer[back_++] = std::move(ring_buffer_[i]);
+ }
+ front_ = 0;
+ std::swap(ring_buffer_, new_buffer);
+ ComputeModulusMask();
+ }
+
+ bool BinarySearch(const key_type& key, size_t* index) const {
+ size_t low = front_;
+ for (size_t step_size = search_step_size_; step_size; step_size /= 2) {
+ size_t mid = low + step_size;
+ if (mid >= ContigiousBack())
+ continue;
+ if (!(key < Delegate::ToKey(ring_buffer_[mid & modulus_mask_]))) {
+ low = mid;
+ }
+ }
+ low &= modulus_mask_;
+ *index = low;
+ return key == Delegate::ToKey(ring_buffer_[low]);
+ }
+
+ void EraseByMovingFront(size_t index_to_erase) {
+ DCHECK(ValidIndex(index_to_erase)) << "index_to_erase = " << index_to_erase;
+ if (index_to_erase == front_) {
+ ring_buffer_[index_to_erase] = value_type();
+ }
+ for (size_t i = index_to_erase; i != front_; i = RingPrev(i)) {
+ ring_buffer_[i] = std::move(ring_buffer_[RingPrev(i)]);
+ }
+ front_ = RingNext(front_);
+ }
+
+ void EraseByMovingBack(size_t index_to_erase) {
+ DCHECK(ValidIndex(index_to_erase)) << "index_to_erase = " << index_to_erase;
+ size_t old_back = back_;
+ back_ = RingPrev(back_);
+ if (index_to_erase == back_) {
+ ring_buffer_[index_to_erase] = value_type();
+ }
+
+ for (size_t i = index_to_erase; i != old_back; i = RingNext(i)) {
+ ring_buffer_[i] = std::move(ring_buffer_[RingNext(i)]);
+ }
+ }
+
+ size_t RingPrev(size_t index) const { return (index - 1) & modulus_mask_; }
+
+ size_t RingNext(size_t index) const { return (index + 1) & modulus_mask_; }
+
+ // Returns true if |index| is within the active section of the ring buffer.
+ bool ValidIndex(size_t index) const {
+ if (back_ > front_)
+ return index >= front_ && index < back_;
+ return index <= back_ || (index >= front_ && index < ring_buffer_.size());
+ }
+
+ void ComputeModulusMask() {
+ DCHECK_EQ(0u, (ring_buffer_.size() & (ring_buffer_.size() - 1)))
+ << "ring_buffer_.size() must be a power of two.";
+
+ modulus_mask_ = ring_buffer_.size() - 1;
+ }
+
+ size_t ContigiousBack() const { return front_ + size(); }
+
+ // Computes how many positions |later_in_ring| is ahead of |earlier_in_ring|.
+ size_t RingDistance(size_t earlier_in_ring, size_t later_in_ring) const {
+ return (later_in_ring - earlier_in_ring) & modulus_mask_;
+ }
+
+#ifndef NDEBUG
+ void InvalidateIterators() { iterator_generation_++; }
+#endif
+
+ // The ring buffer is always a power of two in size, because computing
+ // modulus for powers of two is super fast.
+ std::vector<value_type> ring_buffer_;
+
+ // Since |ring_buffer_| is a power of two in size, we can compute the modulus
+ // using a bitwise and with |modulus_mask_|.
+ size_t modulus_mask_;
+
+ // The index of the first element.
+ size_t front_;
+
+ // The index of the last element + 1.
+ size_t back_;
+
+ // Equivalent to 2 ^ ceil(log2(size()) - 1).
+ size_t search_step_size_;
+
+#ifndef NDEBUG
+ // Used to check for stale iterators.
+ size_t iterator_generation_ = 0;
+#endif
+};
+
+} // namespace WTF
+
+#endif // WTF_FlatTable_h
« no previous file with comments | « third_party/WebKit/Source/wtf/FlatSetTest.cpp ('k') | third_party/WebKit/Source/wtf/SetPerfTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698