Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(793)

Side by Side Diff: src/heap/slot-set.h

Issue 2353553003: [heap] Make slot set state and operations atomic. (Closed)
Patch Set: comments Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/base/atomic-utils.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_SLOT_SET_H 5 #ifndef V8_SLOT_SET_H
6 #define V8_SLOT_SET_H 6 #define V8_SLOT_SET_H
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/base/atomic-utils.h"
9 #include "src/base/bits.h" 10 #include "src/base/bits.h"
10 #include "src/utils.h" 11 #include "src/utils.h"
11 12
12 namespace v8 { 13 namespace v8 {
13 namespace internal { 14 namespace internal {
14 15
15 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT }; 16 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
16 17
17 // Data structure for maintaining a set of slots in a standard (non-large) 18 // Data structure for maintaining a set of slots in a standard (non-large)
18 // page. The base address of the page must be set with SetPageStart before any 19 // page. The base address of the page must be set with SetPageStart before any
19 // operation. 20 // operation.
20 // The data structure assumes that the slots are pointer size aligned and 21 // The data structure assumes that the slots are pointer size aligned and
21 // splits the valid slot offset range into kBuckets buckets. 22 // splits the valid slot offset range into kBuckets buckets.
22 // Each bucket is a bitmap with a bit corresponding to a single slot offset. 23 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
23 class SlotSet : public Malloced { 24 class SlotSet : public Malloced {
24 public: 25 public:
25 SlotSet() { 26 SlotSet() {
26 for (int i = 0; i < kBuckets; i++) { 27 for (int i = 0; i < kBuckets; i++) {
27 bucket[i] = nullptr; 28 bucket[i].SetValue(nullptr);
28 } 29 }
29 } 30 }
30 31
31 ~SlotSet() { 32 ~SlotSet() {
32 for (int i = 0; i < kBuckets; i++) { 33 for (int i = 0; i < kBuckets; i++) {
33 ReleaseBucket(i); 34 ReleaseBucket(i);
34 } 35 }
35 } 36 }
36 37
37 void SetPageStart(Address page_start) { page_start_ = page_start; } 38 void SetPageStart(Address page_start) { page_start_ = page_start; }
38 39
39 // The slot offset specifies a slot at address page_start_ + slot_offset. 40 // The slot offset specifies a slot at address page_start_ + slot_offset.
41 // This method should only be called on the main thread because concurrent
42 // allocation of the bucket is not thread-safe.
40 void Insert(int slot_offset) { 43 void Insert(int slot_offset) {
41 int bucket_index, cell_index, bit_index; 44 int bucket_index, cell_index, bit_index;
42 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 45 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
43 if (bucket[bucket_index] == nullptr) { 46 base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
44 bucket[bucket_index] = AllocateBucket(); 47 if (current_bucket == nullptr) {
48 current_bucket = AllocateBucket();
49 bucket[bucket_index].SetValue(current_bucket);
45 } 50 }
46 bucket[bucket_index][cell_index] |= 1u << bit_index; 51 current_bucket[cell_index].SetBit(bit_index);
47 } 52 }
48 53
49 // The slot offset specifies a slot at address page_start_ + slot_offset. 54 // The slot offset specifies a slot at address page_start_ + slot_offset.
50 void Remove(int slot_offset) { 55 void Remove(int slot_offset) {
51 int bucket_index, cell_index, bit_index; 56 int bucket_index, cell_index, bit_index;
52 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 57 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
53 if (bucket[bucket_index] != nullptr) { 58 base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
54 uint32_t cell = bucket[bucket_index][cell_index]; 59 if (current_bucket != nullptr) {
60 uint32_t cell = current_bucket[cell_index].Value();
55 if (cell) { 61 if (cell) {
56 uint32_t bit_mask = 1u << bit_index; 62 uint32_t bit_mask = 1u << bit_index;
57 if (cell & bit_mask) { 63 if (cell & bit_mask) {
58 bucket[bucket_index][cell_index] ^= bit_mask; 64 current_bucket[cell_index].ClearBit(bit_index);
59 } 65 }
60 } 66 }
61 } 67 }
62 } 68 }
63 69
64 // The slot offsets specify a range of slots at addresses: 70 // The slot offsets specify a range of slots at addresses:
65 // [page_start_ + start_offset ... page_start_ + end_offset). 71 // [page_start_ + start_offset ... page_start_ + end_offset).
66 void RemoveRange(int start_offset, int end_offset) { 72 void RemoveRange(int start_offset, int end_offset) {
67 CHECK_LE(end_offset, 1 << kPageSizeBits); 73 CHECK_LE(end_offset, 1 << kPageSizeBits);
68 DCHECK_LE(start_offset, end_offset); 74 DCHECK_LE(start_offset, end_offset);
69 int start_bucket, start_cell, start_bit; 75 int start_bucket, start_cell, start_bit;
70 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit); 76 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
71 int end_bucket, end_cell, end_bit; 77 int end_bucket, end_cell, end_bit;
72 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit); 78 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
73 uint32_t start_mask = (1u << start_bit) - 1; 79 uint32_t start_mask = (1u << start_bit) - 1;
74 uint32_t end_mask = ~((1u << end_bit) - 1); 80 uint32_t end_mask = ~((1u << end_bit) - 1);
75 if (start_bucket == end_bucket && start_cell == end_cell) { 81 if (start_bucket == end_bucket && start_cell == end_cell) {
76 MaskCell(start_bucket, start_cell, start_mask | end_mask); 82 ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
77 return; 83 return;
78 } 84 }
79 int current_bucket = start_bucket; 85 int current_bucket = start_bucket;
80 int current_cell = start_cell; 86 int current_cell = start_cell;
81 MaskCell(current_bucket, current_cell, start_mask); 87 ClearCell(current_bucket, current_cell, ~start_mask);
82 current_cell++; 88 current_cell++;
83 if (current_bucket < end_bucket) { 89 if (current_bucket < end_bucket) {
84 if (bucket[current_bucket] != nullptr) { 90 if (bucket[current_bucket].Value() != nullptr) {
85 while (current_cell < kCellsPerBucket) { 91 while (current_cell < kCellsPerBucket) {
86 bucket[current_bucket][current_cell] = 0; 92 bucket[current_bucket].Value()[current_cell].SetValue(0);
87 current_cell++; 93 current_cell++;
88 } 94 }
89 } 95 }
90 // The rest of the current bucket is cleared. 96 // The rest of the current bucket is cleared.
91 // Move on to the next bucket. 97 // Move on to the next bucket.
92 current_bucket++; 98 current_bucket++;
93 current_cell = 0; 99 current_cell = 0;
94 } 100 }
95 DCHECK(current_bucket == end_bucket || 101 DCHECK(current_bucket == end_bucket ||
96 (current_bucket < end_bucket && current_cell == 0)); 102 (current_bucket < end_bucket && current_cell == 0));
97 while (current_bucket < end_bucket) { 103 while (current_bucket < end_bucket) {
98 ReleaseBucket(current_bucket); 104 ReleaseBucket(current_bucket);
99 current_bucket++; 105 current_bucket++;
100 } 106 }
101 // All buckets between start_bucket and end_bucket are cleared. 107 // All buckets between start_bucket and end_bucket are cleared.
102 DCHECK(current_bucket == end_bucket && current_cell <= end_cell); 108 DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
103 if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) { 109 if (current_bucket == kBuckets ||
110 bucket[current_bucket].Value() == nullptr) {
104 return; 111 return;
105 } 112 }
106 while (current_cell < end_cell) { 113 while (current_cell < end_cell) {
107 bucket[current_bucket][current_cell] = 0; 114 bucket[current_bucket].Value()[current_cell].SetValue(0);
108 current_cell++; 115 current_cell++;
109 } 116 }
110 // All cells between start_cell and end_cell are cleared. 117 // All cells between start_cell and end_cell are cleared.
111 DCHECK(current_bucket == end_bucket && current_cell == end_cell); 118 DCHECK(current_bucket == end_bucket && current_cell == end_cell);
112 MaskCell(end_bucket, end_cell, end_mask); 119 ClearCell(end_bucket, end_cell, ~end_mask);
113 } 120 }
114 121
115 // The slot offset specifies a slot at address page_start_ + slot_offset. 122 // The slot offset specifies a slot at address page_start_ + slot_offset.
116 bool Lookup(int slot_offset) { 123 bool Lookup(int slot_offset) {
117 int bucket_index, cell_index, bit_index; 124 int bucket_index, cell_index, bit_index;
118 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 125 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
119 if (bucket[bucket_index] != nullptr) { 126 if (bucket[bucket_index].Value() != nullptr) {
120 uint32_t cell = bucket[bucket_index][cell_index]; 127 uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
121 return (cell & (1u << bit_index)) != 0; 128 return (cell & (1u << bit_index)) != 0;
122 } 129 }
123 return false; 130 return false;
124 } 131 }
125 132
126 // Iterate over all slots in the set and for each slot invoke the callback. 133 // Iterate over all slots in the set and for each slot invoke the callback.
127 // If the callback returns REMOVE_SLOT then the slot is removed from the set. 134 // If the callback returns REMOVE_SLOT then the slot is removed from the set.
128 // Returns the new number of slots. 135 // Returns the new number of slots.
136 // This method should only be called on the main thread.
129 // 137 //
130 // Sample usage: 138 // Sample usage:
131 // Iterate([](Address slot_address) { 139 // Iterate([](Address slot_address) {
132 // if (good(slot_address)) return KEEP_SLOT; 140 // if (good(slot_address)) return KEEP_SLOT;
133 // else return REMOVE_SLOT; 141 // else return REMOVE_SLOT;
134 // }); 142 // });
135 template <typename Callback> 143 template <typename Callback>
136 int Iterate(Callback callback) { 144 int Iterate(Callback callback) {
137 int new_count = 0; 145 int new_count = 0;
138 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { 146 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
139 if (bucket[bucket_index] != nullptr) { 147 if (bucket[bucket_index].Value() != nullptr) {
140 int in_bucket_count = 0; 148 int in_bucket_count = 0;
141 uint32_t* current_bucket = bucket[bucket_index]; 149 base::AtomicValue<uint32_t>* current_bucket =
150 bucket[bucket_index].Value();
142 int cell_offset = bucket_index * kBitsPerBucket; 151 int cell_offset = bucket_index * kBitsPerBucket;
143 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { 152 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
144 if (current_bucket[i]) { 153 if (current_bucket[i].Value()) {
145 uint32_t cell = current_bucket[i]; 154 uint32_t cell = current_bucket[i].Value();
146 uint32_t old_cell = cell; 155 uint32_t old_cell = cell;
147 uint32_t new_cell = cell; 156 uint32_t new_cell = cell;
148 while (cell) { 157 while (cell) {
149 int bit_offset = base::bits::CountTrailingZeros32(cell); 158 int bit_offset = base::bits::CountTrailingZeros32(cell);
150 uint32_t bit_mask = 1u << bit_offset; 159 uint32_t bit_mask = 1u << bit_offset;
151 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; 160 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
152 if (callback(page_start_ + slot) == KEEP_SLOT) { 161 if (callback(page_start_ + slot) == KEEP_SLOT) {
153 ++in_bucket_count; 162 ++in_bucket_count;
154 } else { 163 } else {
155 new_cell ^= bit_mask; 164 new_cell ^= bit_mask;
156 } 165 }
157 cell ^= bit_mask; 166 cell ^= bit_mask;
158 } 167 }
159 if (old_cell != new_cell) { 168 if (old_cell != new_cell) {
160 current_bucket[i] = new_cell; 169 while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
170 // If TrySetValue fails, the cell must have changed. We just
171 // have to read the current value of the cell, & it with the
172 // computed value, and retry. We can do this, because this
173 // method will only be called on the main thread and filtering
174 // threads will only remove slots.
175 old_cell = current_bucket[i].Value();
176 new_cell &= old_cell;
177 }
161 } 178 }
162 } 179 }
163 } 180 }
164 if (in_bucket_count == 0) { 181 if (in_bucket_count == 0) {
165 ReleaseBucket(bucket_index); 182 ReleaseBucket(bucket_index);
166 } 183 }
167 new_count += in_bucket_count; 184 new_count += in_bucket_count;
168 } 185 }
169 } 186 }
170 return new_count; 187 return new_count;
171 } 188 }
172 189
173 private: 190 private:
174 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize; 191 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
175 static const int kCellsPerBucket = 32; 192 static const int kCellsPerBucket = 32;
176 static const int kCellsPerBucketLog2 = 5; 193 static const int kCellsPerBucketLog2 = 5;
177 static const int kBitsPerCell = 32; 194 static const int kBitsPerCell = 32;
178 static const int kBitsPerCellLog2 = 5; 195 static const int kBitsPerCellLog2 = 5;
179 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; 196 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
180 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; 197 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
181 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; 198 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
182 199
183 uint32_t* AllocateBucket() { 200 base::AtomicValue<uint32_t>* AllocateBucket() {
184 uint32_t* result = NewArray<uint32_t>(kCellsPerBucket); 201 base::AtomicValue<uint32_t>* result =
202 NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
185 for (int i = 0; i < kCellsPerBucket; i++) { 203 for (int i = 0; i < kCellsPerBucket; i++) {
186 result[i] = 0; 204 result[i].SetValue(0);
187 } 205 }
188 return result; 206 return result;
189 } 207 }
190 208
191 void ReleaseBucket(int bucket_index) { 209 void ReleaseBucket(int bucket_index) {
192 DeleteArray<uint32_t>(bucket[bucket_index]); 210 DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
193 bucket[bucket_index] = nullptr; 211 bucket[bucket_index].SetValue(nullptr);
194 } 212 }
195 213
196 void MaskCell(int bucket_index, int cell_index, uint32_t mask) { 214 void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
197 if (bucket_index < kBuckets) { 215 if (bucket_index < kBuckets) {
198 uint32_t* cells = bucket[bucket_index]; 216 base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
199 if (cells != nullptr && cells[cell_index] != 0) { 217 if (cells != nullptr) {
200 cells[cell_index] &= mask; 218 uint32_t cell = cells[cell_index].Value();
219 if (cell) cells[cell_index].SetBits(0, mask);
201 } 220 }
202 } else { 221 } else {
203 // GCC bug 59124: Emits wrong warnings 222 // GCC bug 59124: Emits wrong warnings
204 // "array subscript is above array bounds" 223 // "array subscript is above array bounds"
205 UNREACHABLE(); 224 UNREACHABLE();
206 } 225 }
207 } 226 }
208 227
209 // Converts the slot offset into bucket/cell/bit index. 228 // Converts the slot offset into bucket/cell/bit index.
210 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index, 229 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
211 int* bit_index) { 230 int* bit_index) {
212 DCHECK_EQ(slot_offset % kPointerSize, 0); 231 DCHECK_EQ(slot_offset % kPointerSize, 0);
213 int slot = slot_offset >> kPointerSizeLog2; 232 int slot = slot_offset >> kPointerSizeLog2;
214 DCHECK(slot >= 0 && slot <= kMaxSlots); 233 DCHECK(slot >= 0 && slot <= kMaxSlots);
215 *bucket_index = slot >> kBitsPerBucketLog2; 234 *bucket_index = slot >> kBitsPerBucketLog2;
216 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1); 235 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
217 *bit_index = slot & (kBitsPerCell - 1); 236 *bit_index = slot & (kBitsPerCell - 1);
218 } 237 }
219 238
220 uint32_t* bucket[kBuckets]; 239 base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
221 Address page_start_; 240 Address page_start_;
222 }; 241 };
223 242
224 enum SlotType { 243 enum SlotType {
225 EMBEDDED_OBJECT_SLOT, 244 EMBEDDED_OBJECT_SLOT,
226 OBJECT_SLOT, 245 OBJECT_SLOT,
227 CELL_TARGET_SLOT, 246 CELL_TARGET_SLOT,
228 CODE_TARGET_SLOT, 247 CODE_TARGET_SLOT,
229 CODE_ENTRY_SLOT, 248 CODE_ENTRY_SLOT,
230 DEBUG_TARGET_SLOT, 249 DEBUG_TARGET_SLOT,
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
355 }; 374 };
356 375
357 Address page_start_; 376 Address page_start_;
358 Chunk* chunk_; 377 Chunk* chunk_;
359 }; 378 };
360 379
361 } // namespace internal 380 } // namespace internal
362 } // namespace v8 381 } // namespace v8
363 382
364 #endif // V8_SLOT_SET_H 383 #endif // V8_SLOT_SET_H
OLDNEW
« no previous file with comments | « src/base/atomic-utils.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698