Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/store-buffer.cc

Issue 1608583002: New page local store buffer. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase and fix signed unsigned conversion Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/store-buffer.h ('k') | src/heap/store-buffer-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/store-buffer.h" 5 #include "src/heap/store-buffer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "src/counters.h" 9 #include "src/counters.h"
10 #include "src/heap/incremental-marking.h" 10 #include "src/heap/incremental-marking.h"
11 #include "src/heap/store-buffer-inl.h" 11 #include "src/heap/store-buffer-inl.h"
12 #include "src/isolate.h" 12 #include "src/isolate.h"
13 #include "src/objects-inl.h" 13 #include "src/objects-inl.h"
14 #include "src/v8.h" 14 #include "src/v8.h"
15 15
16 namespace v8 { 16 namespace v8 {
17 namespace internal { 17 namespace internal {
18 18
19 StoreBuffer::StoreBuffer(Heap* heap) 19 StoreBuffer::StoreBuffer(Heap* heap)
20 : heap_(heap), 20 : heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
21 start_(NULL),
22 limit_(NULL),
23 old_start_(NULL),
24 old_limit_(NULL),
25 old_top_(NULL),
26 old_reserved_limit_(NULL),
27 old_buffer_is_sorted_(false),
28 old_buffer_is_filtered_(false),
29 during_gc_(false),
30 store_buffer_rebuilding_enabled_(false),
31 callback_(NULL),
32 may_move_store_buffer_entries_(true),
33 virtual_memory_(NULL),
34 hash_set_1_(NULL),
35 hash_set_2_(NULL),
36 hash_sets_are_empty_(true) {}
37
38 21
39 void StoreBuffer::SetUp() { 22 void StoreBuffer::SetUp() {
40 // Allocate 3x the buffer size, so that we can start the new store buffer 23 // Allocate 3x the buffer size, so that we can start the new store buffer
41 // aligned to 2x the size. This lets us use a bit test to detect the end of 24 // aligned to 2x the size. This lets us use a bit test to detect the end of
42 // the area. 25 // the area.
43 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); 26 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
44 uintptr_t start_as_int = 27 uintptr_t start_as_int =
45 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 28 reinterpret_cast<uintptr_t>(virtual_memory_->address());
46 start_ = 29 start_ =
47 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 30 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
48 limit_ = start_ + (kStoreBufferSize / kPointerSize); 31 limit_ = start_ + (kStoreBufferSize / kPointerSize);
49 32
50 // Reserve space for the larger old buffer.
51 old_virtual_memory_ =
52 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
53 old_top_ = old_start_ =
54 reinterpret_cast<Address*>(old_virtual_memory_->address());
55 // Don't know the alignment requirements of the OS, but it is certainly not
56 // less than 0xfff.
57 CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
58 CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
59 // Initial size of the old buffer is as big as the buffer for new pointers.
60 // This means even if we later fail to enlarge the old buffer due to OOM from
61 // the OS, we will still be able to empty the new pointer buffer into the old
62 // buffer.
63 int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
64 CHECK(initial_length > 0);
65 CHECK(initial_length <= kOldStoreBufferLength);
66 old_limit_ = old_start_ + initial_length;
67 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
68
69 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
70 (old_limit_ - old_start_) * kPointerSize,
71 false)) {
72 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
73 }
74
75 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 33 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
76 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 34 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
77 Address* vm_limit = reinterpret_cast<Address*>( 35 Address* vm_limit = reinterpret_cast<Address*>(
78 reinterpret_cast<char*>(virtual_memory_->address()) + 36 reinterpret_cast<char*>(virtual_memory_->address()) +
79 virtual_memory_->size()); 37 virtual_memory_->size());
80 DCHECK(start_ <= vm_limit); 38 DCHECK(start_ <= vm_limit);
81 DCHECK(limit_ <= vm_limit); 39 DCHECK(limit_ <= vm_limit);
82 USE(vm_limit); 40 USE(vm_limit);
83 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 41 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
84 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 42 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
85 0); 43 0);
86 44
87 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_), 45 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
88 kStoreBufferSize, 46 kStoreBufferSize,
89 false)) { // Not executable. 47 false)) { // Not executable.
90 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); 48 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
91 } 49 }
92 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_)); 50 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
93
94 hash_set_1_ = new uintptr_t[kHashSetLength];
95 hash_set_2_ = new uintptr_t[kHashSetLength];
96 hash_sets_are_empty_ = false;
97
98 ClearFilteringHashSets();
99 } 51 }
100 52
101 53
102 void StoreBuffer::TearDown() { 54 void StoreBuffer::TearDown() {
103 delete virtual_memory_; 55 delete virtual_memory_;
104 delete old_virtual_memory_;
105 delete[] hash_set_1_;
106 delete[] hash_set_2_;
107 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
108 start_ = limit_ = NULL; 56 start_ = limit_ = NULL;
109 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_)); 57 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
110 } 58 }
111 59
112 60
113 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 61 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
114 isolate->heap()->store_buffer()->Compact(); 62 isolate->heap()->store_buffer()->InsertEntriesFromBuffer();
115 isolate->counters()->store_buffer_overflows()->Increment(); 63 isolate->counters()->store_buffer_overflows()->Increment();
116 } 64 }
117 65
118 66
119 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
120 return old_limit_ - old_top_ >= space_needed;
121 }
122
123
124 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
125 while (old_limit_ - old_top_ < space_needed &&
126 old_limit_ < old_reserved_limit_) {
127 size_t grow = old_limit_ - old_start_; // Double size.
128 if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
129 grow * kPointerSize, false)) {
130 old_limit_ += grow;
131 } else {
132 break;
133 }
134 }
135
136 if (SpaceAvailable(space_needed)) return;
137
138 if (old_buffer_is_filtered_) return;
139 DCHECK(may_move_store_buffer_entries_);
140 Compact();
141
142 old_buffer_is_filtered_ = true;
143 bool page_has_scan_on_scavenge_flag = false;
144
145 PointerChunkIterator it(heap_);
146 MemoryChunk* chunk;
147 while ((chunk = it.next()) != NULL) {
148 if (chunk->scan_on_scavenge()) {
149 page_has_scan_on_scavenge_flag = true;
150 break;
151 }
152 }
153
154 if (page_has_scan_on_scavenge_flag) {
155 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
156 }
157
158 if (SpaceAvailable(space_needed)) return;
159
160 // Sample 1 entry in 97 and filter out the pages where we estimate that more
161 // than 1 in 8 pointers are to new space.
162 static const int kSampleFinenesses = 5;
163 static const struct Samples {
164 int prime_sample_step;
165 int threshold;
166 } samples[kSampleFinenesses] = {
167 {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
168 {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
169 {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
170 {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
171 {1, 0}};
172 for (int i = 0; i < kSampleFinenesses; i++) {
173 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
174 // As a last resort we mark all pages as being exempt from the store buffer.
175 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
176 if (SpaceAvailable(space_needed)) return;
177 }
178 UNREACHABLE();
179 }
180
181
182 // Sample the store buffer to see if some pages are taking up a lot of space
183 // in the store buffer.
184 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
185 HashMap store_buffer_counts(HashMap::PointersMatch, 16);
186 bool created_new_scan_on_scavenge_pages = false;
187 MemoryChunk* previous_chunk = NULL;
188 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
189 Address addr = *p;
190 MemoryChunk* containing_chunk = NULL;
191 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
192 containing_chunk = previous_chunk;
193 } else {
194 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
195 }
196 HashMap::Entry* e = store_buffer_counts.LookupOrInsert(
197 containing_chunk,
198 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(containing_chunk) >>
199 kPageSizeBits));
200 intptr_t old_counter = bit_cast<intptr_t>(e->value);
201 if (old_counter >= threshold) {
202 containing_chunk->set_scan_on_scavenge(true);
203 created_new_scan_on_scavenge_pages = true;
204 }
205 (*bit_cast<intptr_t*>(&e->value))++;
206 previous_chunk = containing_chunk;
207 }
208 if (created_new_scan_on_scavenge_pages) {
209 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
210 heap_->isolate()->CountUsage(
211 v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
212 }
213 old_buffer_is_filtered_ = true;
214 }
215
216
217 void StoreBuffer::Filter(int flag) {
218 Address* new_top = old_start_;
219 MemoryChunk* previous_chunk = NULL;
220 for (Address* p = old_start_; p < old_top_; p++) {
221 Address addr = *p;
222 MemoryChunk* containing_chunk = NULL;
223 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
224 containing_chunk = previous_chunk;
225 } else {
226 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
227 previous_chunk = containing_chunk;
228 }
229 if (!containing_chunk->IsFlagSet(flag)) {
230 *new_top++ = addr;
231 }
232 }
233 old_top_ = new_top;
234
235 // Filtering hash sets are inconsistent with the store buffer after this
236 // operation.
237 ClearFilteringHashSets();
238 }
239
240
241 bool StoreBuffer::PrepareForIteration() {
242 Compact();
243 PointerChunkIterator it(heap_);
244 MemoryChunk* chunk;
245 bool page_has_scan_on_scavenge_flag = false;
246 while ((chunk = it.next()) != NULL) {
247 if (chunk->scan_on_scavenge()) {
248 page_has_scan_on_scavenge_flag = true;
249 break;
250 }
251 }
252
253 if (page_has_scan_on_scavenge_flag) {
254 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
255 }
256
257 // Filtering hash sets are inconsistent with the store buffer after
258 // iteration.
259 ClearFilteringHashSets();
260
261 return page_has_scan_on_scavenge_flag;
262 }
263
264
265 void StoreBuffer::ClearFilteringHashSets() {
266 if (!hash_sets_are_empty_) {
267 memset(reinterpret_cast<void*>(hash_set_1_), 0,
268 sizeof(uintptr_t) * kHashSetLength);
269 memset(reinterpret_cast<void*>(hash_set_2_), 0,
270 sizeof(uintptr_t) * kHashSetLength);
271 hash_sets_are_empty_ = true;
272 }
273 }
274
275
276 void StoreBuffer::GCPrologue() {
277 ClearFilteringHashSets();
278 during_gc_ = true;
279 }
280
281
282 #ifdef VERIFY_HEAP 67 #ifdef VERIFY_HEAP
283 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { 68 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
284 LargeObjectIterator it(space); 69 LargeObjectIterator it(space);
285 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 70 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
286 if (object->IsFixedArray()) { 71 if (object->IsFixedArray()) {
287 Address slot_address = object->address(); 72 Address slot_address = object->address();
288 Address end = object->address() + object->Size(); 73 Address end = object->address() + object->Size();
289
290 while (slot_address < end) { 74 while (slot_address < end) {
291 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); 75 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
292 // When we are not in GC the Heap::InNewSpace() predicate 76 // When we are not in GC the Heap::InNewSpace() predicate
293 // checks that pointers which satisfy predicate point into 77 // checks that pointers which satisfy predicate point into
294 // the active semispace. 78 // the active semispace.
295 Object* object = *slot; 79 Object* object = *slot;
296 heap_->InNewSpace(object); 80 heap_->InNewSpace(object);
297 slot_address += kPointerSize; 81 slot_address += kPointerSize;
298 } 82 }
299 } 83 }
300 } 84 }
301 } 85 }
302 #endif 86 #endif
303 87
304 88
305 void StoreBuffer::Verify() { 89 void StoreBuffer::Verify() {
306 #ifdef VERIFY_HEAP 90 #ifdef VERIFY_HEAP
307 VerifyPointers(heap_->lo_space()); 91 VerifyPointers(heap_->lo_space());
308 #endif 92 #endif
309 } 93 }
310 94
311 95 void StoreBuffer::InsertEntriesFromBuffer() {
312 void StoreBuffer::GCEpilogue() { 96 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
313 during_gc_ = false; 97 if (top == start_) return;
314 #ifdef VERIFY_HEAP 98 // There's no check of the limit in the loop below so we check here for
315 if (FLAG_verify_heap) { 99 // the worst case (compaction doesn't eliminate any pointers).
316 Verify(); 100 DCHECK(top <= limit_);
101 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
102 Page* last_page = nullptr;
103 SlotSet* last_slot_set = nullptr;
104 for (Address* current = start_; current < top; current++) {
105 DCHECK(!heap_->code_space()->Contains(*current));
106 Address addr = *current;
107 Page* page = Page::FromAddress(addr);
108 SlotSet* slot_set;
109 uint32_t offset;
110 if (page == last_page) {
111 slot_set = last_slot_set;
112 offset = static_cast<uint32_t>(addr - page->address());
113 } else {
114 offset = AddressToSlotSetAndOffset(addr, &slot_set);
115 last_page = page;
116 last_slot_set = slot_set;
117 }
118 slot_set->Insert(offset);
317 } 119 }
318 #endif
319 } 120 }
320 121
321 122 static SlotSet::CallbackResult ProcessOldToNewSlot(
322 void StoreBuffer::ProcessOldToNewSlot(Address slot_address, 123 Heap* heap, Address slot_address, ObjectSlotCallback slot_callback) {
323 ObjectSlotCallback slot_callback) {
324 Object** slot = reinterpret_cast<Object**>(slot_address); 124 Object** slot = reinterpret_cast<Object**>(slot_address);
325 Object* object = *slot; 125 Object* object = *slot;
326 126 if (heap->InFromSpace(object)) {
327 // If the object is not in from space, it must be a duplicate store buffer
328 // entry and the slot was already updated.
329 if (heap_->InFromSpace(object)) {
330 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 127 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
331 DCHECK(heap_object->IsHeapObject()); 128 DCHECK(heap_object->IsHeapObject());
332 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 129 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
333 object = *slot; 130 object = *slot;
334 // If the object was in from space before and is after executing the 131 // If the object was in from space before and is after executing the
335 // callback in to space, the object is still live. 132 // callback in to space, the object is still live.
336 // Unfortunately, we do not know about the slot. It could be in a 133 // Unfortunately, we do not know about the slot. It could be in a
337 // just freed free space object. 134 // just freed free space object.
338 if (heap_->InToSpace(object)) { 135 if (heap->InToSpace(object)) {
339 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); 136 return SlotSet::KEEP_SLOT;
340 } 137 }
138 } else {
139 DCHECK(!heap->InNewSpace(object));
341 } 140 }
141 return SlotSet::REMOVE_SLOT;
342 } 142 }
343 143
344 144 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
345 void StoreBuffer::FindPointersToNewSpaceInRegion( 145 Heap* heap = heap_;
346 Address start, Address end, ObjectSlotCallback slot_callback) { 146 Iterate([heap, slot_callback](Address addr) {
347 for (Address slot_address = start; slot_address < end; 147 return ProcessOldToNewSlot(heap, addr, slot_callback);
348 slot_address += kPointerSize) { 148 });
349 ProcessOldToNewSlot(slot_address, slot_callback);
350 }
351 } 149 }
352 150
353 151 template <typename Callback>
354 void StoreBuffer::IteratePointersInStoreBuffer( 152 void StoreBuffer::Iterate(Callback callback) {
355 ObjectSlotCallback slot_callback) { 153 InsertEntriesFromBuffer();
356 Address* limit = old_top_; 154 PointerChunkIterator it(heap_);
357 old_top_ = old_start_; 155 MemoryChunk* chunk;
358 { 156 while ((chunk = it.next()) != nullptr) {
359 DontMoveStoreBufferEntriesScope scope(this); 157 if (chunk->old_to_new_slots() != nullptr) {
360 for (Address* current = old_start_; current < limit; current++) { 158 SlotSet* slots = chunk->old_to_new_slots();
361 #ifdef DEBUG 159 size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
362 Address* saved_top = old_top_; 160 for (size_t page = 0; page < pages; page++) {
363 #endif 161 slots[page].Iterate(callback);
364 ProcessOldToNewSlot(*current, slot_callback); 162 }
365 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
366 } 163 }
367 } 164 }
368 } 165 }
369 166
370 167
371 void StoreBuffer::ClearInvalidStoreBufferEntries() { 168 void StoreBuffer::ClearInvalidStoreBufferEntries() {
372 Compact(); 169 InsertEntriesFromBuffer();
373 Address* new_top = old_start_; 170
374 for (Address* current = old_start_; current < old_top_; current++) { 171 Heap* heap = heap_;
375 Address addr = *current; 172 PageIterator it(heap->old_space());
376 Object** slot = reinterpret_cast<Object**>(addr); 173 MemoryChunk* chunk;
377 Object* object = *slot; 174 while (it.has_next()) {
378 if (heap_->InNewSpace(object) && object->IsHeapObject()) { 175 chunk = it.next();
379 // If the target object is not black, the source slot must be part 176 if (chunk->old_to_new_slots() != nullptr) {
380 // of a non-black (dead) object. 177 SlotSet* slots = chunk->old_to_new_slots();
381 HeapObject* heap_object = HeapObject::cast(object); 178 size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
382 if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) && 179 if (pages > 1) {
383 heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) { 180 // Large pages were processed above.
384 *new_top++ = addr; 181 continue;
385 } 182 }
386 } 183 slots->Iterate([heap](Address addr) {
387 } 184 Object** slot = reinterpret_cast<Object**>(addr);
388 old_top_ = new_top; 185 Object* object = *slot;
389 ClearFilteringHashSets(); 186 if (heap->InNewSpace(object)) {
390 187 DCHECK(object->IsHeapObject());
391 // Don't scan on scavenge dead large objects. 188 // If the target object is not black, the source slot must be part
392 LargeObjectIterator it(heap_->lo_space()); 189 // of a non-black (dead) object.
393 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 190 HeapObject* heap_object = HeapObject::cast(object);
394 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); 191 bool live = Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
395 if (chunk->scan_on_scavenge() && 192 heap->mark_compact_collector()->IsSlotInLiveObject(addr);
396 Marking::IsWhite(Marking::MarkBitFrom(object))) { 193 return live ? SlotSet::KEEP_SLOT : SlotSet::REMOVE_SLOT;
397 chunk->set_scan_on_scavenge(false); 194 }
195 return SlotSet::REMOVE_SLOT;
196 });
398 } 197 }
399 } 198 }
400 } 199 }
401 200
402 201
403 void StoreBuffer::VerifyValidStoreBufferEntries() { 202 void StoreBuffer::VerifyValidStoreBufferEntries() {
404 for (Address* current = old_start_; current < old_top_; current++) { 203 Heap* heap = heap_;
405 Object** slot = reinterpret_cast<Object**>(*current); 204 Iterate([heap](Address addr) {
205 Object** slot = reinterpret_cast<Object**>(addr);
406 Object* object = *slot; 206 Object* object = *slot;
407 CHECK(object->IsHeapObject()); 207 if (Page::FromAddress(addr)->owner() != nullptr &&
408 CHECK(heap_->InNewSpace(object)); 208 Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
409 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject( 209 CHECK(object->IsHeapObject());
410 reinterpret_cast<Address>(slot), HeapObject::cast(object)); 210 CHECK(heap->InNewSpace(object));
411 } 211 heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
412 } 212 reinterpret_cast<Address>(slot), HeapObject::cast(object));
413
414
415 class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
416 public:
417 FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
418 ObjectSlotCallback callback)
419 : store_buffer_(store_buffer), callback_(callback) {}
420
421 V8_INLINE void VisitPointers(Object** start, Object** end) override {
422 store_buffer_->FindPointersToNewSpaceInRegion(
423 reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
424 callback_);
425 }
426
427 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
428
429 private:
430 StoreBuffer* store_buffer_;
431 ObjectSlotCallback callback_;
432 };
433
434
435 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
436 // We do not sort or remove duplicated entries from the store buffer because
437 // we expect that callback will rebuild the store buffer thus removing
438 // all duplicates and pointers to old space.
439 bool some_pages_to_scan = PrepareForIteration();
440
441 // TODO(gc): we want to skip slots on evacuation candidates
442 // but we can't simply figure that out from slot address
443 // because slot can belong to a large object.
444 IteratePointersInStoreBuffer(slot_callback);
445
446 // We are done scanning all the pointers that were in the store buffer, but
447 // there may be some pages marked scan_on_scavenge that have pointers to new
448 // space that are not in the store buffer. We must scan them now. As we
449 // scan, the surviving pointers to new space will be added to the store
450 // buffer. If there are still a lot of pointers to new space then we will
451 // keep the scan_on_scavenge flag on the page and discard the pointers that
452 // were added to the store buffer. If there are not many pointers to new
453 // space left on the page we will keep the pointers in the store buffer and
454 // remove the flag from the page.
455 if (some_pages_to_scan) {
456 if (callback_ != NULL) {
457 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
458 } 213 }
459 PointerChunkIterator it(heap_); 214 return SlotSet::KEEP_SLOT;
460 MemoryChunk* chunk; 215 });
461 FindPointersToNewSpaceVisitor visitor(this, slot_callback);
462 while ((chunk = it.next()) != NULL) {
463 if (chunk->scan_on_scavenge()) {
464 chunk->set_scan_on_scavenge(false);
465 if (callback_ != NULL) {
466 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
467 }
468 if (chunk->owner() == heap_->lo_space()) {
469 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
470 HeapObject* array = large_page->GetObject();
471 DCHECK(array->IsFixedArray());
472 Address start = array->address();
473 Address end = start + array->Size();
474 FindPointersToNewSpaceInRegion(start, end, slot_callback);
475 } else {
476 Page* page = reinterpret_cast<Page*>(chunk);
477 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
478 if (owner == heap_->map_space()) {
479 DCHECK(page->SweepingDone());
480 HeapObjectIterator iterator(page);
481 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
482 heap_object = iterator.Next()) {
483 // We skip free space objects.
484 if (!heap_object->IsFiller()) {
485 DCHECK(heap_object->IsMap());
486 FindPointersToNewSpaceInRegion(
487 heap_object->address() + Map::kPointerFieldsBeginOffset,
488 heap_object->address() + Map::kPointerFieldsEndOffset,
489 slot_callback);
490 }
491 }
492 } else {
493 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
494 // Aborted pages require iterating using mark bits because they
495 // don't have an iterable object layout before sweeping (which can
496 // only happen later). Note that we can never reach an
497 // aborted page through the scavenger.
498 DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
499 heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
500 &visitor);
501 } else {
502 heap_->mark_compact_collector()
503 ->SweepOrWaitUntilSweepingCompleted(page);
504 HeapObjectIterator iterator(page);
505 for (HeapObject* heap_object = iterator.Next();
506 heap_object != nullptr; heap_object = iterator.Next()) {
507 // We iterate over objects that contain new space pointers only.
508 heap_object->IterateBody(&visitor);
509 }
510 }
511 }
512 }
513 }
514 }
515 if (callback_ != NULL) {
516 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
517 }
518 }
519 }
520
521
522 void StoreBuffer::Compact() {
523 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
524
525 if (top == start_) return;
526
527 // There's no check of the limit in the loop below so we check here for
528 // the worst case (compaction doesn't eliminate any pointers).
529 DCHECK(top <= limit_);
530 heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
531 EnsureSpace(top - start_);
532 DCHECK(may_move_store_buffer_entries_);
533 // Goes through the addresses in the store buffer attempting to remove
534 // duplicates. In the interest of speed this is a lossy operation. Some
535 // duplicates will remain. We have two hash sets with different hash
536 // functions to reduce the number of unnecessary clashes.
537 hash_sets_are_empty_ = false; // Hash sets are in use.
538 for (Address* current = start_; current < top; current++) {
539 DCHECK(!heap_->code_space()->Contains(*current));
540 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
541 // Shift out the last bits including any tags.
542 int_addr >>= kPointerSizeLog2;
543 // The upper part of an address is basically random because of ASLR and OS
544 // non-determinism, so we use only the bits within a page for hashing to
545 // make v8's behavior (more) deterministic.
546 uintptr_t hash_addr =
547 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
548 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
549 (kHashSetLength - 1));
550 if (hash_set_1_[hash1] == int_addr) continue;
551 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
552 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
553 hash2 &= (kHashSetLength - 1);
554 if (hash_set_2_[hash2] == int_addr) continue;
555 if (hash_set_1_[hash1] == 0) {
556 hash_set_1_[hash1] = int_addr;
557 } else if (hash_set_2_[hash2] == 0) {
558 hash_set_2_[hash2] = int_addr;
559 } else {
560 // Rather than slowing down we just throw away some entries. This will
561 // cause some duplicates to remain undetected.
562 hash_set_1_[hash1] = int_addr;
563 hash_set_2_[hash2] = 0;
564 }
565 old_buffer_is_sorted_ = false;
566 old_buffer_is_filtered_ = false;
567 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
568 DCHECK(old_top_ <= old_limit_);
569 }
570 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
571 }
572
573
574 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
575 if (event == kStoreBufferStartScanningPagesEvent) {
576 start_of_current_page_ = NULL;
577 current_page_ = NULL;
578 } else if (event == kStoreBufferScanningPageEvent) {
579 if (current_page_ != NULL) {
580 // If this page already overflowed the store buffer during this iteration.
581 if (current_page_->scan_on_scavenge()) {
582 // Then we should wipe out the entries that have been added for it.
583 store_buffer_->SetTop(start_of_current_page_);
584 } else if (store_buffer_->Top() - start_of_current_page_ >=
585 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
586 // Did we find too many pointers in the previous page? The heuristic is
587 // that no page can take more then 1/5 the remaining slots in the store
588 // buffer.
589 current_page_->set_scan_on_scavenge(true);
590 store_buffer_->SetTop(start_of_current_page_);
591 } else {
592 // In this case the page we scanned took a reasonable number of slots in
593 // the store buffer. It has now been rehabilitated and is no longer
594 // marked scan_on_scavenge.
595 DCHECK(!current_page_->scan_on_scavenge());
596 }
597 }
598 start_of_current_page_ = store_buffer_->Top();
599 current_page_ = page;
600 } else if (event == kStoreBufferFullEvent) {
601 // The current page overflowed the store buffer again. Wipe out its entries
602 // in the store buffer and mark it scan-on-scavenge again. This may happen
603 // several times while scanning.
604 if (current_page_ == NULL) {
605 // Store Buffer overflowed while scanning promoted objects. These are not
606 // in any particular page, though they are likely to be clustered by the
607 // allocation routines.
608 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
609 } else {
610 // Store Buffer overflowed while scanning a particular old space page for
611 // pointers to new space.
612 DCHECK(current_page_ == page);
613 DCHECK(page != NULL);
614 current_page_->set_scan_on_scavenge(true);
615 DCHECK(start_of_current_page_ != store_buffer_->Top());
616 store_buffer_->SetTop(start_of_current_page_);
617 }
618 } else {
619 UNREACHABLE();
620 }
621 } 216 }
622 217
623 } // namespace internal 218 } // namespace internal
624 } // namespace v8 219 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/store-buffer.h ('k') | src/heap/store-buffer-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698