OLD | NEW |
| (Empty) |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/store-buffer.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "src/v8.h" | |
10 | |
11 #include "src/base/atomicops.h" | |
12 #include "src/counters.h" | |
13 #include "src/store-buffer-inl.h" | |
14 | |
15 namespace v8 { | |
16 namespace internal { | |
17 | |
18 StoreBuffer::StoreBuffer(Heap* heap) | |
19 : heap_(heap), | |
20 start_(NULL), | |
21 limit_(NULL), | |
22 old_start_(NULL), | |
23 old_limit_(NULL), | |
24 old_top_(NULL), | |
25 old_reserved_limit_(NULL), | |
26 old_buffer_is_sorted_(false), | |
27 old_buffer_is_filtered_(false), | |
28 during_gc_(false), | |
29 store_buffer_rebuilding_enabled_(false), | |
30 callback_(NULL), | |
31 may_move_store_buffer_entries_(true), | |
32 virtual_memory_(NULL), | |
33 hash_set_1_(NULL), | |
34 hash_set_2_(NULL), | |
35 hash_sets_are_empty_(true) { | |
36 } | |
37 | |
38 | |
39 void StoreBuffer::SetUp() { | |
40 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); | |
41 uintptr_t start_as_int = | |
42 reinterpret_cast<uintptr_t>(virtual_memory_->address()); | |
43 start_ = | |
44 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); | |
45 limit_ = start_ + (kStoreBufferSize / kPointerSize); | |
46 | |
47 old_virtual_memory_ = | |
48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize); | |
49 old_top_ = old_start_ = | |
50 reinterpret_cast<Address*>(old_virtual_memory_->address()); | |
51 // Don't know the alignment requirements of the OS, but it is certainly not | |
52 // less than 0xfff. | |
53 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); | |
54 int initial_length = | |
55 static_cast<int>(base::OS::CommitPageSize() / kPointerSize); | |
56 DCHECK(initial_length > 0); | |
57 DCHECK(initial_length <= kOldStoreBufferLength); | |
58 old_limit_ = old_start_ + initial_length; | |
59 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; | |
60 | |
61 CHECK(old_virtual_memory_->Commit( | |
62 reinterpret_cast<void*>(old_start_), | |
63 (old_limit_ - old_start_) * kPointerSize, | |
64 false)); | |
65 | |
66 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); | |
67 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); | |
68 Address* vm_limit = reinterpret_cast<Address*>( | |
69 reinterpret_cast<char*>(virtual_memory_->address()) + | |
70 virtual_memory_->size()); | |
71 DCHECK(start_ <= vm_limit); | |
72 DCHECK(limit_ <= vm_limit); | |
73 USE(vm_limit); | |
74 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); | |
75 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == | |
76 0); | |
77 | |
78 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), | |
79 kStoreBufferSize, | |
80 false)); // Not executable. | |
81 heap_->public_set_store_buffer_top(start_); | |
82 | |
83 hash_set_1_ = new uintptr_t[kHashSetLength]; | |
84 hash_set_2_ = new uintptr_t[kHashSetLength]; | |
85 hash_sets_are_empty_ = false; | |
86 | |
87 ClearFilteringHashSets(); | |
88 } | |
89 | |
90 | |
91 void StoreBuffer::TearDown() { | |
92 delete virtual_memory_; | |
93 delete old_virtual_memory_; | |
94 delete[] hash_set_1_; | |
95 delete[] hash_set_2_; | |
96 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; | |
97 start_ = limit_ = NULL; | |
98 heap_->public_set_store_buffer_top(start_); | |
99 } | |
100 | |
101 | |
102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { | |
103 isolate->heap()->store_buffer()->Compact(); | |
104 isolate->counters()->store_buffer_overflows()->Increment(); | |
105 } | |
106 | |
107 | |
108 void StoreBuffer::Uniq() { | |
109 // Remove adjacent duplicates and cells that do not point at new space. | |
110 Address previous = NULL; | |
111 Address* write = old_start_; | |
112 DCHECK(may_move_store_buffer_entries_); | |
113 for (Address* read = old_start_; read < old_top_; read++) { | |
114 Address current = *read; | |
115 if (current != previous) { | |
116 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { | |
117 *write++ = current; | |
118 } | |
119 } | |
120 previous = current; | |
121 } | |
122 old_top_ = write; | |
123 } | |
124 | |
125 | |
126 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { | |
127 return old_limit_ - old_top_ >= space_needed; | |
128 } | |
129 | |
130 | |
131 void StoreBuffer::EnsureSpace(intptr_t space_needed) { | |
132 while (old_limit_ - old_top_ < space_needed && | |
133 old_limit_ < old_reserved_limit_) { | |
134 size_t grow = old_limit_ - old_start_; // Double size. | |
135 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), | |
136 grow * kPointerSize, | |
137 false)); | |
138 old_limit_ += grow; | |
139 } | |
140 | |
141 if (SpaceAvailable(space_needed)) return; | |
142 | |
143 if (old_buffer_is_filtered_) return; | |
144 DCHECK(may_move_store_buffer_entries_); | |
145 Compact(); | |
146 | |
147 old_buffer_is_filtered_ = true; | |
148 bool page_has_scan_on_scavenge_flag = false; | |
149 | |
150 PointerChunkIterator it(heap_); | |
151 MemoryChunk* chunk; | |
152 while ((chunk = it.next()) != NULL) { | |
153 if (chunk->scan_on_scavenge()) { | |
154 page_has_scan_on_scavenge_flag = true; | |
155 break; | |
156 } | |
157 } | |
158 | |
159 if (page_has_scan_on_scavenge_flag) { | |
160 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | |
161 } | |
162 | |
163 if (SpaceAvailable(space_needed)) return; | |
164 | |
165 // Sample 1 entry in 97 and filter out the pages where we estimate that more | |
166 // than 1 in 8 pointers are to new space. | |
167 static const int kSampleFinenesses = 5; | |
168 static const struct Samples { | |
169 int prime_sample_step; | |
170 int threshold; | |
171 } samples[kSampleFinenesses] = { | |
172 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, | |
173 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, | |
174 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, | |
175 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, | |
176 { 1, 0} | |
177 }; | |
178 for (int i = 0; i < kSampleFinenesses; i++) { | |
179 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); | |
180 // As a last resort we mark all pages as being exempt from the store buffer. | |
181 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_); | |
182 if (SpaceAvailable(space_needed)) return; | |
183 } | |
184 UNREACHABLE(); | |
185 } | |
186 | |
187 | |
188 // Sample the store buffer to see if some pages are taking up a lot of space | |
189 // in the store buffer. | |
190 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { | |
191 PointerChunkIterator it(heap_); | |
192 MemoryChunk* chunk; | |
193 while ((chunk = it.next()) != NULL) { | |
194 chunk->set_store_buffer_counter(0); | |
195 } | |
196 bool created_new_scan_on_scavenge_pages = false; | |
197 MemoryChunk* previous_chunk = NULL; | |
198 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { | |
199 Address addr = *p; | |
200 MemoryChunk* containing_chunk = NULL; | |
201 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { | |
202 containing_chunk = previous_chunk; | |
203 } else { | |
204 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); | |
205 } | |
206 int old_counter = containing_chunk->store_buffer_counter(); | |
207 if (old_counter >= threshold) { | |
208 containing_chunk->set_scan_on_scavenge(true); | |
209 created_new_scan_on_scavenge_pages = true; | |
210 } | |
211 containing_chunk->set_store_buffer_counter(old_counter + 1); | |
212 previous_chunk = containing_chunk; | |
213 } | |
214 if (created_new_scan_on_scavenge_pages) { | |
215 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | |
216 } | |
217 old_buffer_is_filtered_ = true; | |
218 } | |
219 | |
220 | |
221 void StoreBuffer::Filter(int flag) { | |
222 Address* new_top = old_start_; | |
223 MemoryChunk* previous_chunk = NULL; | |
224 for (Address* p = old_start_; p < old_top_; p++) { | |
225 Address addr = *p; | |
226 MemoryChunk* containing_chunk = NULL; | |
227 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { | |
228 containing_chunk = previous_chunk; | |
229 } else { | |
230 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); | |
231 previous_chunk = containing_chunk; | |
232 } | |
233 if (!containing_chunk->IsFlagSet(flag)) { | |
234 *new_top++ = addr; | |
235 } | |
236 } | |
237 old_top_ = new_top; | |
238 | |
239 // Filtering hash sets are inconsistent with the store buffer after this | |
240 // operation. | |
241 ClearFilteringHashSets(); | |
242 } | |
243 | |
244 | |
245 void StoreBuffer::SortUniq() { | |
246 Compact(); | |
247 if (old_buffer_is_sorted_) return; | |
248 std::sort(old_start_, old_top_); | |
249 Uniq(); | |
250 | |
251 old_buffer_is_sorted_ = true; | |
252 | |
253 // Filtering hash sets are inconsistent with the store buffer after this | |
254 // operation. | |
255 ClearFilteringHashSets(); | |
256 } | |
257 | |
258 | |
259 bool StoreBuffer::PrepareForIteration() { | |
260 Compact(); | |
261 PointerChunkIterator it(heap_); | |
262 MemoryChunk* chunk; | |
263 bool page_has_scan_on_scavenge_flag = false; | |
264 while ((chunk = it.next()) != NULL) { | |
265 if (chunk->scan_on_scavenge()) { | |
266 page_has_scan_on_scavenge_flag = true; | |
267 break; | |
268 } | |
269 } | |
270 | |
271 if (page_has_scan_on_scavenge_flag) { | |
272 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | |
273 } | |
274 | |
275 // Filtering hash sets are inconsistent with the store buffer after | |
276 // iteration. | |
277 ClearFilteringHashSets(); | |
278 | |
279 return page_has_scan_on_scavenge_flag; | |
280 } | |
281 | |
282 | |
283 #ifdef DEBUG | |
284 void StoreBuffer::Clean() { | |
285 ClearFilteringHashSets(); | |
286 Uniq(); // Also removes things that no longer point to new space. | |
287 EnsureSpace(kStoreBufferSize / 2); | |
288 } | |
289 | |
290 | |
291 static Address* in_store_buffer_1_element_cache = NULL; | |
292 | |
293 | |
294 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { | |
295 if (!FLAG_enable_slow_asserts) return true; | |
296 if (in_store_buffer_1_element_cache != NULL && | |
297 *in_store_buffer_1_element_cache == cell_address) { | |
298 return true; | |
299 } | |
300 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); | |
301 for (Address* current = top - 1; current >= start_; current--) { | |
302 if (*current == cell_address) { | |
303 in_store_buffer_1_element_cache = current; | |
304 return true; | |
305 } | |
306 } | |
307 for (Address* current = old_top_ - 1; current >= old_start_; current--) { | |
308 if (*current == cell_address) { | |
309 in_store_buffer_1_element_cache = current; | |
310 return true; | |
311 } | |
312 } | |
313 return false; | |
314 } | |
315 #endif | |
316 | |
317 | |
318 void StoreBuffer::ClearFilteringHashSets() { | |
319 if (!hash_sets_are_empty_) { | |
320 memset(reinterpret_cast<void*>(hash_set_1_), | |
321 0, | |
322 sizeof(uintptr_t) * kHashSetLength); | |
323 memset(reinterpret_cast<void*>(hash_set_2_), | |
324 0, | |
325 sizeof(uintptr_t) * kHashSetLength); | |
326 hash_sets_are_empty_ = true; | |
327 } | |
328 } | |
329 | |
330 | |
331 void StoreBuffer::GCPrologue() { | |
332 ClearFilteringHashSets(); | |
333 during_gc_ = true; | |
334 } | |
335 | |
336 | |
337 #ifdef VERIFY_HEAP | |
338 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { | |
339 LargeObjectIterator it(space); | |
340 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | |
341 if (object->IsFixedArray()) { | |
342 Address slot_address = object->address(); | |
343 Address end = object->address() + object->Size(); | |
344 | |
345 while (slot_address < end) { | |
346 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); | |
347 // When we are not in GC the Heap::InNewSpace() predicate | |
348 // checks that pointers which satisfy predicate point into | |
349 // the active semispace. | |
350 Object* object = reinterpret_cast<Object*>( | |
351 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | |
352 heap_->InNewSpace(object); | |
353 slot_address += kPointerSize; | |
354 } | |
355 } | |
356 } | |
357 } | |
358 #endif | |
359 | |
360 | |
361 void StoreBuffer::Verify() { | |
362 #ifdef VERIFY_HEAP | |
363 VerifyPointers(heap_->lo_space()); | |
364 #endif | |
365 } | |
366 | |
367 | |
368 void StoreBuffer::GCEpilogue() { | |
369 during_gc_ = false; | |
370 #ifdef VERIFY_HEAP | |
371 if (FLAG_verify_heap) { | |
372 Verify(); | |
373 } | |
374 #endif | |
375 } | |
376 | |
377 | |
378 void StoreBuffer::FindPointersToNewSpaceInRegion( | |
379 Address start, | |
380 Address end, | |
381 ObjectSlotCallback slot_callback, | |
382 bool clear_maps) { | |
383 for (Address slot_address = start; | |
384 slot_address < end; | |
385 slot_address += kPointerSize) { | |
386 Object** slot = reinterpret_cast<Object**>(slot_address); | |
387 Object* object = reinterpret_cast<Object*>( | |
388 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | |
389 if (heap_->InNewSpace(object)) { | |
390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); | |
391 DCHECK(heap_object->IsHeapObject()); | |
392 // The new space object was not promoted if it still contains a map | |
393 // pointer. Clear the map field now lazily. | |
394 if (clear_maps) ClearDeadObject(heap_object); | |
395 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); | |
396 object = reinterpret_cast<Object*>( | |
397 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | |
398 if (heap_->InNewSpace(object)) { | |
399 EnterDirectlyIntoStoreBuffer(slot_address); | |
400 } | |
401 } | |
402 } | |
403 } | |
404 | |
405 | |
406 void StoreBuffer::IteratePointersInStoreBuffer( | |
407 ObjectSlotCallback slot_callback, | |
408 bool clear_maps) { | |
409 Address* limit = old_top_; | |
410 old_top_ = old_start_; | |
411 { | |
412 DontMoveStoreBufferEntriesScope scope(this); | |
413 for (Address* current = old_start_; current < limit; current++) { | |
414 #ifdef DEBUG | |
415 Address* saved_top = old_top_; | |
416 #endif | |
417 Object** slot = reinterpret_cast<Object**>(*current); | |
418 Object* object = reinterpret_cast<Object*>( | |
419 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | |
420 if (heap_->InFromSpace(object)) { | |
421 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); | |
422 // The new space object was not promoted if it still contains a map | |
423 // pointer. Clear the map field now lazily. | |
424 if (clear_maps) ClearDeadObject(heap_object); | |
425 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); | |
426 object = reinterpret_cast<Object*>( | |
427 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | |
428 if (heap_->InNewSpace(object)) { | |
429 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); | |
430 } | |
431 } | |
432 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); | |
433 } | |
434 } | |
435 } | |
436 | |
437 | |
438 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { | |
439 IteratePointersToNewSpace(slot_callback, false); | |
440 } | |
441 | |
442 | |
443 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( | |
444 ObjectSlotCallback slot_callback) { | |
445 IteratePointersToNewSpace(slot_callback, true); | |
446 } | |
447 | |
448 | |
449 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, | |
450 bool clear_maps) { | |
451 // We do not sort or remove duplicated entries from the store buffer because | |
452 // we expect that callback will rebuild the store buffer thus removing | |
453 // all duplicates and pointers to old space. | |
454 bool some_pages_to_scan = PrepareForIteration(); | |
455 | |
456 // TODO(gc): we want to skip slots on evacuation candidates | |
457 // but we can't simply figure that out from slot address | |
458 // because slot can belong to a large object. | |
459 IteratePointersInStoreBuffer(slot_callback, clear_maps); | |
460 | |
461 // We are done scanning all the pointers that were in the store buffer, but | |
462 // there may be some pages marked scan_on_scavenge that have pointers to new | |
463 // space that are not in the store buffer. We must scan them now. As we | |
464 // scan, the surviving pointers to new space will be added to the store | |
465 // buffer. If there are still a lot of pointers to new space then we will | |
466 // keep the scan_on_scavenge flag on the page and discard the pointers that | |
467 // were added to the store buffer. If there are not many pointers to new | |
468 // space left on the page we will keep the pointers in the store buffer and | |
469 // remove the flag from the page. | |
470 if (some_pages_to_scan) { | |
471 if (callback_ != NULL) { | |
472 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); | |
473 } | |
474 PointerChunkIterator it(heap_); | |
475 MemoryChunk* chunk; | |
476 while ((chunk = it.next()) != NULL) { | |
477 if (chunk->scan_on_scavenge()) { | |
478 chunk->set_scan_on_scavenge(false); | |
479 if (callback_ != NULL) { | |
480 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); | |
481 } | |
482 if (chunk->owner() == heap_->lo_space()) { | |
483 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); | |
484 HeapObject* array = large_page->GetObject(); | |
485 DCHECK(array->IsFixedArray()); | |
486 Address start = array->address(); | |
487 Address end = start + array->Size(); | |
488 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); | |
489 } else { | |
490 Page* page = reinterpret_cast<Page*>(chunk); | |
491 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | |
492 Address start = page->area_start(); | |
493 Address end = page->area_end(); | |
494 if (owner == heap_->map_space()) { | |
495 DCHECK(page->WasSweptPrecisely()); | |
496 HeapObjectIterator iterator(page, NULL); | |
497 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; | |
498 heap_object = iterator.Next()) { | |
499 // We skip free space objects. | |
500 if (!heap_object->IsFiller()) { | |
501 FindPointersToNewSpaceInRegion( | |
502 heap_object->address() + HeapObject::kHeaderSize, | |
503 heap_object->address() + heap_object->Size(), slot_callback, | |
504 clear_maps); | |
505 } | |
506 } | |
507 } else { | |
508 if (!page->SweepingCompleted()) { | |
509 heap_->mark_compact_collector()->SweepInParallel(page, owner); | |
510 if (!page->SweepingCompleted()) { | |
511 // We were not able to sweep that page, i.e., a concurrent | |
512 // sweeper thread currently owns this page. | |
513 // TODO(hpayer): This may introduce a huge pause here. We | |
514 // just care about finish sweeping of the scan on scavenge page. | |
515 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | |
516 } | |
517 } | |
518 // TODO(hpayer): remove the special casing and merge map and pointer | |
519 // space handling as soon as we removed conservative sweeping. | |
520 CHECK(page->owner() == heap_->old_pointer_space()); | |
521 if (heap_->old_pointer_space()->swept_precisely()) { | |
522 HeapObjectIterator iterator(page, NULL); | |
523 for (HeapObject* heap_object = iterator.Next(); | |
524 heap_object != NULL; heap_object = iterator.Next()) { | |
525 // We iterate over objects that contain new space pointers only. | |
526 if (heap_object->MayContainNewSpacePointers()) { | |
527 FindPointersToNewSpaceInRegion( | |
528 heap_object->address() + HeapObject::kHeaderSize, | |
529 heap_object->address() + heap_object->Size(), | |
530 slot_callback, clear_maps); | |
531 } | |
532 } | |
533 } else { | |
534 FindPointersToNewSpaceInRegion(start, end, slot_callback, | |
535 clear_maps); | |
536 } | |
537 } | |
538 } | |
539 } | |
540 } | |
541 if (callback_ != NULL) { | |
542 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); | |
543 } | |
544 } | |
545 } | |
546 | |
547 | |
548 void StoreBuffer::Compact() { | |
549 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); | |
550 | |
551 if (top == start_) return; | |
552 | |
553 // There's no check of the limit in the loop below so we check here for | |
554 // the worst case (compaction doesn't eliminate any pointers). | |
555 DCHECK(top <= limit_); | |
556 heap_->public_set_store_buffer_top(start_); | |
557 EnsureSpace(top - start_); | |
558 DCHECK(may_move_store_buffer_entries_); | |
559 // Goes through the addresses in the store buffer attempting to remove | |
560 // duplicates. In the interest of speed this is a lossy operation. Some | |
561 // duplicates will remain. We have two hash sets with different hash | |
562 // functions to reduce the number of unnecessary clashes. | |
563 hash_sets_are_empty_ = false; // Hash sets are in use. | |
564 for (Address* current = start_; current < top; current++) { | |
565 DCHECK(!heap_->cell_space()->Contains(*current)); | |
566 DCHECK(!heap_->code_space()->Contains(*current)); | |
567 DCHECK(!heap_->old_data_space()->Contains(*current)); | |
568 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | |
569 // Shift out the last bits including any tags. | |
570 int_addr >>= kPointerSizeLog2; | |
571 // The upper part of an address is basically random because of ASLR and OS | |
572 // non-determinism, so we use only the bits within a page for hashing to | |
573 // make v8's behavior (more) deterministic. | |
574 uintptr_t hash_addr = | |
575 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); | |
576 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & | |
577 (kHashSetLength - 1)); | |
578 if (hash_set_1_[hash1] == int_addr) continue; | |
579 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); | |
580 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); | |
581 hash2 &= (kHashSetLength - 1); | |
582 if (hash_set_2_[hash2] == int_addr) continue; | |
583 if (hash_set_1_[hash1] == 0) { | |
584 hash_set_1_[hash1] = int_addr; | |
585 } else if (hash_set_2_[hash2] == 0) { | |
586 hash_set_2_[hash2] = int_addr; | |
587 } else { | |
588 // Rather than slowing down we just throw away some entries. This will | |
589 // cause some duplicates to remain undetected. | |
590 hash_set_1_[hash1] = int_addr; | |
591 hash_set_2_[hash2] = 0; | |
592 } | |
593 old_buffer_is_sorted_ = false; | |
594 old_buffer_is_filtered_ = false; | |
595 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | |
596 DCHECK(old_top_ <= old_limit_); | |
597 } | |
598 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | |
599 } | |
600 | |
601 } } // namespace v8::internal | |
OLD | NEW |