OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef V8_SLOT_SET_H |
| 6 #define V8_SLOT_SET_H |
| 7 |
| 8 #include "src/allocation.h" |
| 9 #include "src/base/bits.h" |
| 10 |
| 11 namespace v8 { |
| 12 namespace internal { |
| 13 |
| 14 // Data structure for maintaining a set of slots in a standard (non-large) |
| 15 // page. The base address of the page must be set with SetPageStart before any |
| 16 // operation. |
| 17 // The data structure assumes that the slots are pointer size aligned and |
| 18 // splits the valid slot offset range into kBuckets buckets. |
| 19 // Each bucket is a bitmap with a bit corresponding to a single slot offset. |
| 20 class SlotSet : public Malloced { |
| 21 public: |
| 22 enum CallbackResult { KEEP_SLOT, REMOVE_SLOT }; |
| 23 |
| 24 SlotSet() { |
| 25 for (int i = 0; i < kBuckets; i++) { |
| 26 bucket[i] = nullptr; |
| 27 } |
| 28 } |
| 29 |
| 30 ~SlotSet() { |
| 31 for (int i = 0; i < kBuckets; i++) { |
| 32 ReleaseBucket(i); |
| 33 } |
| 34 } |
| 35 |
| 36 void SetPageStart(Address page_start) { page_start_ = page_start; } |
| 37 |
| 38 // The slot offset specifies a slot at address page_start_ + slot_offset. |
| 39 void Insert(int slot_offset) { |
| 40 int bucket_index, cell_index, bit_index; |
| 41 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); |
| 42 if (bucket[bucket_index] == nullptr) { |
| 43 bucket[bucket_index] = AllocateBucket(); |
| 44 } |
| 45 bucket[bucket_index][cell_index] |= 1u << bit_index; |
| 46 } |
| 47 |
| 48 // The slot offset specifies a slot at address page_start_ + slot_offset. |
| 49 void Remove(int slot_offset) { |
| 50 int bucket_index, cell_index, bit_index; |
| 51 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); |
| 52 if (bucket[bucket_index] != nullptr) { |
| 53 uint32_t cell = bucket[bucket_index][cell_index]; |
| 54 if (cell) { |
| 55 uint32_t bit_mask = 1u << bit_index; |
| 56 if (cell & bit_mask) { |
| 57 bucket[bucket_index][cell_index] ^= bit_mask; |
| 58 } |
| 59 } |
| 60 } |
| 61 } |
| 62 |
| 63 // The slot offsets specify a range of slots at addresses: |
| 64 // [page_start_ + start_offset ... page_start_ + end_offset). |
| 65 void RemoveRange(int start_offset, int end_offset) { |
| 66 DCHECK_LE(start_offset, end_offset); |
| 67 int start_bucket, start_cell, start_bit; |
| 68 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit); |
| 69 int end_bucket, end_cell, end_bit; |
| 70 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit); |
| 71 uint32_t start_mask = (1u << start_bit) - 1; |
| 72 uint32_t end_mask = ~((1u << end_bit) - 1); |
| 73 if (start_bucket == end_bucket && start_cell == end_cell) { |
| 74 MaskCell(start_bucket, start_cell, start_mask | end_mask); |
| 75 return; |
| 76 } |
| 77 MaskCell(start_bucket, start_cell, start_mask); |
| 78 start_cell++; |
| 79 if (bucket[start_bucket] != nullptr && start_bucket < end_bucket) { |
| 80 while (start_cell < kCellsPerBucket) { |
| 81 bucket[start_bucket][start_cell] = 0; |
| 82 start_cell++; |
| 83 } |
| 84 } |
| 85 while (start_bucket < end_bucket) { |
| 86 delete[] bucket[start_bucket]; |
| 87 bucket[start_bucket] = nullptr; |
| 88 start_bucket++; |
| 89 } |
| 90 if (start_bucket < kBuckets && bucket[start_bucket] != nullptr) { |
| 91 while (start_cell < end_cell) { |
| 92 bucket[start_bucket][start_cell] = 0; |
| 93 start_cell++; |
| 94 } |
| 95 } |
| 96 if (end_bucket < kBuckets) { |
| 97 MaskCell(end_bucket, end_cell, end_mask); |
| 98 } |
| 99 } |
| 100 |
| 101 // The slot offset specifies a slot at address page_start_ + slot_offset. |
| 102 bool Lookup(int slot_offset) { |
| 103 int bucket_index, cell_index, bit_index; |
| 104 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); |
| 105 if (bucket[bucket_index] != nullptr) { |
| 106 uint32_t cell = bucket[bucket_index][cell_index]; |
| 107 return (cell & (1u << bit_index)) != 0; |
| 108 } |
| 109 return false; |
| 110 } |
| 111 |
| 112 // Iterate over all slots in the set and for each slot invoke the callback. |
| 113 // If the callback returns REMOVE_SLOT then the slot is removed from the set. |
| 114 // |
| 115 // Sample usage: |
| 116 // Iterate([](Address slot_address) { |
| 117 // if (good(slot_address)) return KEEP_SLOT; |
| 118 // else return REMOVE_SLOT; |
| 119 // }); |
| 120 template <typename Callback> |
| 121 void Iterate(Callback callback) { |
| 122 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { |
| 123 if (bucket[bucket_index] != nullptr) { |
| 124 bool bucket_is_empty = true; |
| 125 uint32_t* current_bucket = bucket[bucket_index]; |
| 126 int cell_offset = bucket_index * kBitsPerBucket; |
| 127 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { |
| 128 if (current_bucket[i]) { |
| 129 uint32_t cell = current_bucket[i]; |
| 130 uint32_t old_cell = cell; |
| 131 uint32_t new_cell = cell; |
| 132 while (cell) { |
| 133 int bit_offset = base::bits::CountTrailingZeros32(cell); |
| 134 uint32_t bit_mask = 1u << bit_offset; |
| 135 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; |
| 136 if (callback(page_start_ + slot) == KEEP_SLOT) { |
| 137 bucket_is_empty = false; |
| 138 } else { |
| 139 new_cell ^= bit_mask; |
| 140 } |
| 141 cell ^= bit_mask; |
| 142 } |
| 143 if (old_cell != new_cell) { |
| 144 current_bucket[i] = new_cell; |
| 145 } |
| 146 } |
| 147 } |
| 148 if (bucket_is_empty) { |
| 149 ReleaseBucket(bucket_index); |
| 150 } |
| 151 } |
| 152 } |
| 153 } |
| 154 |
| 155 private: |
| 156 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize; |
| 157 static const int kCellsPerBucket = 32; |
| 158 static const int kCellsPerBucketLog2 = 5; |
| 159 static const int kBitsPerCell = 32; |
| 160 static const int kBitsPerCellLog2 = 5; |
| 161 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; |
| 162 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; |
| 163 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; |
| 164 |
| 165 uint32_t* AllocateBucket() { |
| 166 uint32_t* result = NewArray<uint32_t>(kCellsPerBucket); |
| 167 for (int i = 0; i < kCellsPerBucket; i++) { |
| 168 result[i] = 0; |
| 169 } |
| 170 return result; |
| 171 } |
| 172 |
| 173 void ReleaseBucket(int bucket_index) { |
| 174 DeleteArray<uint32_t>(bucket[bucket_index]); |
| 175 bucket[bucket_index] = nullptr; |
| 176 } |
| 177 |
| 178 void MaskCell(int bucket_index, int cell_index, uint32_t mask) { |
| 179 uint32_t* cells = bucket[bucket_index]; |
| 180 if (cells != nullptr && cells[cell_index] != 0) { |
| 181 cells[cell_index] &= mask; |
| 182 } |
| 183 } |
| 184 |
| 185 // Converts the slot offset into bucket/cell/bit index. |
| 186 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index, |
| 187 int* bit_index) { |
| 188 DCHECK_EQ(slot_offset % kPointerSize, 0); |
| 189 int slot = slot_offset >> kPointerSizeLog2; |
| 190 DCHECK(slot >= 0 && slot <= kMaxSlots); |
| 191 *bucket_index = slot >> kBitsPerBucketLog2; |
| 192 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1); |
| 193 *bit_index = slot & (kBitsPerCell - 1); |
| 194 } |
| 195 |
| 196 uint32_t* bucket[kBuckets]; |
| 197 Address page_start_; |
| 198 }; |
| 199 |
| 200 } // namespace internal |
| 201 } // namespace v8 |
| 202 |
| 203 #endif // V8_SLOT_SET_H |
OLD | NEW |