Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef V8_SLOT_SET_H | |
| 6 #define V8_SLOT_SET_H | |
| 7 | |
| 8 #include "src/base/bits.h" | |
| 9 | |
| 10 namespace v8 { | |
| 11 namespace internal { | |
| 12 | |
| 13 | |
| 14 // Data structure for maintaining a set of slots in a standard (non-large) | |
|
Hannes Payer (out of office)
2016/01/20 19:43:00
Why don't we also support large pages right away,
ulan
2016/01/28 19:07:22
The large page takes care of that by using an arra
| |
| 15 // page. The base address of the page must be set with SetPageStart before any | |
| 16 // operation. | |
| 17 // The data structure assumes that the slots are pointer size aligned and | |
| 18 // splits the valid slot offset range into kBuckets buckets. | |
| 19 // Each bucket is a bitmap with a bit corresponding to a single slot offset. | |
| 20 class SlotSet { | |
|
Michael Lippautz
2016/01/21 10:07:32
Make it inherit from public Malloced, so that we g
ulan
2016/01/28 19:07:22
Done.
| |
| 21 public: | |
| 22 SlotSet() { | |
| 23 for (int i = 0; i < kBuckets; i++) { | |
| 24 bucket[i] = nullptr; | |
| 25 } | |
| 26 } | |
| 27 | |
| 28 | |
| 29 ~SlotSet() { | |
| 30 for (int i = 0; i < kBuckets; i++) { | |
| 31 ReleaseBucket(i); | |
| 32 } | |
| 33 } | |
| 34 | |
| 35 | |
| 36 void SetPageStart(Address page_start) { page_start_ = page_start; } | |
|
Hannes Payer (out of office)
2016/01/20 19:43:00
If we allow arbitrary size slot sets, we would not
ulan
2016/01/28 19:07:22
It is also used in the Iterate callback to convert
| |
| 37 | |
| 38 | |
| 39 // The slot offset specifies a slot at address page_start_ + slot_offset. | |
| 40 void Insert(int slot_offset) { | |
| 41 int bucket_index, cell_index, bit_index; | |
| 42 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); | |
| 43 if (bucket[bucket_index] == nullptr) { | |
| 44 bucket[bucket_index] = AllocateBucket(); | |
| 45 } | |
| 46 bucket[bucket_index][cell_index] |= 1u << bit_index; | |
|
Michael Lippautz
2016/01/26 09:12:24
Maybe we can additionally have a method {InsertSyn
ulan
2016/01/28 19:07:22
I will add this method in a separate CL as this CL
| |
| 47 } | |
| 48 | |
| 49 | |
| 50 // The slot offset specifies a slot at address page_start_ + slot_offset. | |
| 51 void Remove(int slot_offset) { | |
| 52 int bucket_index, cell_index, bit_index; | |
| 53 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); | |
| 54 if (bucket[bucket_index] != nullptr) { | |
| 55 uint32_t cell = bucket[bucket_index][cell_index]; | |
| 56 if (cell) { | |
| 57 uint32_t bit_mask = 1u << bit_index; | |
| 58 if (cell & bit_mask) { | |
| 59 bucket[bucket_index][cell_index] ^= bit_mask; | |
| 60 } | |
| 61 } | |
| 62 } | |
| 63 } | |
| 64 | |
| 65 | |
| 66 // The slot offsets specify a range of slots at addresses: | |
| 67 // [page_start_ + start_offset ... page_start_ + end_offset). | |
| 68 void RemoveRange(int start_offset, int end_offset) { | |
|
Michael Lippautz
2016/01/21 10:07:32
Amazing function :)
| |
| 69 DCHECK_LE(start_offset, end_offset); | |
| 70 int start_bucket, start_cell, start_bit; | |
| 71 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit); | |
| 72 int end_bucket, end_cell, end_bit; | |
| 73 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit); | |
| 74 uint32_t start_mask = (1u << start_bit) - 1; | |
| 75 uint32_t end_mask = (1u << end_bit) - 1; | |
|
Hannes Payer (out of office)
2016/01/20 19:43:00
~(1u << end_bit) - 1) will give you the right end_
Michael Lippautz
2016/01/21 10:07:32
Probably doesn't matter too much, but you could al
ulan
2016/01/28 19:07:22
The masks are used below too.
ulan
2016/01/28 19:07:22
Done.
| |
| 76 if (start_bucket == end_bucket && start_cell == end_cell) { | |
| 77 MaskCell(start_bucket, start_cell, start_mask | ~end_mask); | |
| 78 return; | |
| 79 } | |
| 80 MaskCell(start_bucket, start_cell, start_mask); | |
| 81 start_cell++; | |
| 82 if (bucket[start_bucket] != nullptr && start_bucket < end_bucket) { | |
| 83 while (start_cell < kCellsPerBucket) { | |
| 84 bucket[start_bucket][start_cell] = 0; | |
| 85 start_cell++; | |
| 86 } | |
| 87 } | |
| 88 while (start_bucket < end_bucket) { | |
| 89 delete[] bucket[start_bucket]; | |
| 90 bucket[start_bucket] = nullptr; | |
| 91 start_bucket++; | |
| 92 } | |
| 93 if (bucket[start_bucket] != nullptr) { | |
| 94 while (start_cell < end_cell) { | |
| 95 bucket[start_bucket][start_cell] = 0; | |
| 96 start_cell++; | |
| 97 } | |
| 98 } | |
| 99 MaskCell(end_bucket, end_cell, ~end_mask); | |
| 100 } | |
| 101 | |
| 102 | |
| 103 // The slot offset specifies a slot at address page_start_ + slot_offset. | |
| 104 bool Lookup(int slot_offset) { | |
| 105 int bucket_index, cell_index, bit_index; | |
| 106 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); | |
| 107 if (bucket[bucket_index] != nullptr) { | |
| 108 uint32_t cell = bucket[bucket_index][cell_index]; | |
| 109 return (cell & (1u << bit_index)) != 0; | |
| 110 } | |
| 111 return false; | |
| 112 } | |
| 113 | |
| 114 | |
| 115 enum CallbackResult { KEEP_SLOT, REMOVE_SLOT }; | |
|
Michael Lippautz
2016/01/21 10:07:32
nit: Move to top
ulan
2016/01/28 19:07:22
Done.
| |
| 116 | |
| 117 | |
| 118 // Iterate over all slots in the set and for each slot invoke the callback. | |
| 119 // If the callback returns REMOVE_SLOT then the slot is removed from the set. | |
| 120 // | |
| 121 // Sample usage: | |
| 122 // Iterate([](Address slot_address) { | |
| 123 // if (good(slot_address)) return KEEP_SLOT; | |
| 124 // else return REMOVE_SLOT; | |
| 125 // }); | |
| 126 template <typename Callback> | |
| 127 void Iterate(Callback callback) { | |
| 128 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { | |
| 129 if (bucket[bucket_index] != nullptr) { | |
| 130 bool bucket_is_empty = true; | |
| 131 uint32_t* current_bucket = bucket[bucket_index]; | |
| 132 int cell_offset = bucket_index * kBitsPerBucket; | |
| 133 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { | |
| 134 if (current_bucket[i]) { | |
| 135 uint32_t cell = current_bucket[i]; | |
| 136 uint32_t old_cell = cell; | |
| 137 uint32_t new_cell = cell; | |
| 138 while (cell) { | |
| 139 int bit_offset = base::bits::CountTrailingZeros32(cell); | |
| 140 uint32_t bit_mask = 1u << bit_offset; | |
| 141 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; | |
| 142 if (callback(page_start_ + slot) == KEEP_SLOT) { | |
| 143 bucket_is_empty = false; | |
| 144 } else { | |
| 145 new_cell ^= bit_mask; | |
| 146 } | |
| 147 cell ^= bit_mask; | |
| 148 } | |
| 149 if (old_cell != new_cell) { | |
| 150 current_bucket[i] = new_cell; | |
| 151 } | |
| 152 } | |
| 153 } | |
| 154 if (bucket_is_empty) { | |
| 155 ReleaseBucket(bucket_index); | |
| 156 } | |
| 157 } | |
| 158 } | |
| 159 } | |
| 160 | |
| 161 private: | |
| 162 uint32_t* AllocateBucket() { | |
| 163 uint32_t* result = new uint32_t[kCellsPerBucket]; | |
|
Hannes Payer (out of office)
2016/01/20 19:43:00
This is a nice way to safe memory, but it may be p
Michael Lippautz
2016/01/21 10:07:32
nit: If you use NewArray<uint32_t>(kCellsPerBucket
ulan
2016/01/28 19:07:22
Done.
ulan
2016/01/28 19:07:22
Ack. Let's discuss offline.
| |
| 164 for (int i = 0; i < kCellsPerBucket; i++) { | |
| 165 result[i] = 0; | |
| 166 } | |
| 167 return result; | |
| 168 } | |
| 169 | |
| 170 | |
| 171 void ReleaseBucket(int bucket_index) { | |
| 172 delete[] bucket[bucket_index]; | |
| 173 bucket[bucket_index] = nullptr; | |
| 174 } | |
| 175 | |
| 176 | |
| 177 void MaskCell(int bucket_index, int cell_index, uint32_t mask) { | |
| 178 uint32_t* cells = bucket[bucket_index]; | |
| 179 if (cells != nullptr && cells[cell_index] != 0) { | |
|
Michael Lippautz
2016/01/21 10:07:32
AFAIC this could just be DCHECKs, e.g.,
DCHECK_N
ulan
2016/01/28 19:07:22
The cells and cell[cell_index] can be 0.
| |
| 180 cells[cell_index] &= mask; | |
| 181 } | |
| 182 } | |
| 183 | |
| 184 | |
| 185 // Converts the slot offset into bucket/cell/bit index. | |
| 186 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index, | |
| 187 int* bit_index) { | |
|
Hannes Payer (out of office)
2016/01/20 19:43:00
Can we DCHECK that slot_offset is within the page?
ulan
2016/01/28 19:07:22
The equivalent DCHECK is performed below for slot.
| |
| 188 DCHECK(slot_offset % kPointerSize == 0); | |
|
Michael Lippautz
2016/01/21 10:07:32
nit: DCHECK_EQ
ulan
2016/01/28 19:07:22
Done.
| |
| 189 int slot = slot_offset >> kPointerSizeLog2; | |
| 190 DCHECK(slot >= 0 && slot <= kMaxSlots); | |
| 191 *bucket_index = slot >> kBitsPerBucketLog2; | |
| 192 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1); | |
| 193 *bit_index = slot & (kBitsPerCell - 1); | |
| 194 } | |
| 195 | |
| 196 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize; | |
|
Michael Lippautz
2016/01/21 10:07:32
nit: Move right below the "private:" section.
ulan
2016/01/28 19:07:22
Done.
| |
| 197 static const int kCellsPerBucket = 64; | |
| 198 static const int kCellsPerBucketLog2 = 6; | |
| 199 static const int kBitsPerCell = 32; | |
| 200 static const int kBitsPerCellLog2 = 5; | |
| 201 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; | |
| 202 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; | |
| 203 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; | |
| 204 | |
| 205 | |
| 206 uint32_t* bucket[kBuckets]; | |
| 207 Address page_start_; | |
| 208 }; | |
| 209 | |
| 210 | |
| 211 } // namespace internal | |
| 212 } // namespace v8 | |
| 213 | |
| 214 #endif // V8_SLOT_SET_H | |
| OLD | NEW |