Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/store-buffer.cc

Issue 387483002: Revert "Precisely sweeping of scan-on-scavenge pages." (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/store-buffer.h" 5 #include "src/store-buffer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
11 #include "src/base/atomicops.h" 11 #include "src/base/atomicops.h"
12 #include "src/counters.h" 12 #include "src/counters.h"
13 #include "src/store-buffer-inl.h" 13 #include "src/store-buffer-inl.h"
14 #include "src/utils.h"
15 14
16 namespace v8 { 15 namespace v8 {
17 namespace internal { 16 namespace internal {
18 17
19 StoreBuffer::StoreBuffer(Heap* heap) 18 StoreBuffer::StoreBuffer(Heap* heap)
20 : heap_(heap), 19 : heap_(heap),
21 start_(NULL), 20 start_(NULL),
22 limit_(NULL), 21 limit_(NULL),
23 old_start_(NULL), 22 old_start_(NULL),
24 old_limit_(NULL), 23 old_limit_(NULL),
25 old_top_(NULL), 24 old_top_(NULL),
26 old_regular_limit_(NULL),
27 old_reserved_limit_(NULL), 25 old_reserved_limit_(NULL),
28 old_virtual_memory_(NULL),
29 old_store_buffer_length_(0),
30 old_buffer_is_sorted_(false), 26 old_buffer_is_sorted_(false),
31 old_buffer_is_filtered_(false), 27 old_buffer_is_filtered_(false),
32 allow_overflow_(false), 28 during_gc_(false),
33 store_buffer_rebuilding_enabled_(false), 29 store_buffer_rebuilding_enabled_(false),
34 callback_(NULL), 30 callback_(NULL),
35 may_move_store_buffer_entries_(true), 31 may_move_store_buffer_entries_(true),
36 virtual_memory_(NULL), 32 virtual_memory_(NULL),
37 hash_set_1_(NULL), 33 hash_set_1_(NULL),
38 hash_set_2_(NULL), 34 hash_set_2_(NULL),
39 hash_sets_are_empty_(true) { 35 hash_sets_are_empty_(true) {
40 } 36 }
41 37
42 38
43 void StoreBuffer::SetUp() { 39 void StoreBuffer::SetUp() {
44 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); 40 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
45 uintptr_t start_as_int = 41 uintptr_t start_as_int =
46 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 42 reinterpret_cast<uintptr_t>(virtual_memory_->address());
47 start_ = 43 start_ =
48 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 44 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
49 limit_ = start_ + (kStoreBufferSize / kPointerSize); 45 limit_ = start_ + (kStoreBufferSize / kPointerSize);
50 46
51 // We set the maximum store buffer size to the maximum size of a semi-space.
52 // The store buffer may reach this limit during a full garbage collection.
53 // Note that half of the semi-space should be good enough since half of the
54 // memory in the semi-space are not object pointers.
55 old_store_buffer_length_ =
56 Max(static_cast<int>(heap_->MaxSemiSpaceSize() / sizeof(Address)),
57 kOldRegularStoreBufferLength);
58
59 old_virtual_memory_ = 47 old_virtual_memory_ =
60 new base::VirtualMemory(old_store_buffer_length_ * kPointerSize); 48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
61 old_top_ = old_start_ = 49 old_top_ = old_start_ =
62 reinterpret_cast<Address*>(old_virtual_memory_->address()); 50 reinterpret_cast<Address*>(old_virtual_memory_->address());
63 // Don't know the alignment requirements of the OS, but it is certainly not 51 // Don't know the alignment requirements of the OS, but it is certainly not
64 // less than 0xfff. 52 // less than 0xfff.
65 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 53 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
66 int initial_length = 54 int initial_length =
67 static_cast<int>(base::OS::CommitPageSize() / kPointerSize); 55 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
68 ASSERT(initial_length > 0); 56 ASSERT(initial_length > 0);
69 ASSERT(initial_length <= kOldRegularStoreBufferLength); 57 ASSERT(initial_length <= kOldStoreBufferLength);
70 ASSERT(initial_length <= old_store_buffer_length_);
71 ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
72 old_limit_ = old_start_ + initial_length; 58 old_limit_ = old_start_ + initial_length;
73 old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength; 59 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
74 old_reserved_limit_ = old_start_ + old_store_buffer_length_;
75 60
76 CHECK(old_virtual_memory_->Commit( 61 CHECK(old_virtual_memory_->Commit(
77 reinterpret_cast<void*>(old_start_), 62 reinterpret_cast<void*>(old_start_),
78 (old_limit_ - old_start_) * kPointerSize, 63 (old_limit_ - old_start_) * kPointerSize,
79 false)); 64 false));
80 65
81 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 66 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
82 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 67 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
83 Address* vm_limit = reinterpret_cast<Address*>( 68 Address* vm_limit = reinterpret_cast<Address*>(
84 reinterpret_cast<char*>(virtual_memory_->address()) + 69 reinterpret_cast<char*>(virtual_memory_->address()) +
(...skipping 16 matching lines...) Expand all
101 86
102 ClearFilteringHashSets(); 87 ClearFilteringHashSets();
103 } 88 }
104 89
105 90
106 void StoreBuffer::TearDown() { 91 void StoreBuffer::TearDown() {
107 delete virtual_memory_; 92 delete virtual_memory_;
108 delete old_virtual_memory_; 93 delete old_virtual_memory_;
109 delete[] hash_set_1_; 94 delete[] hash_set_1_;
110 delete[] hash_set_2_; 95 delete[] hash_set_2_;
111 old_start_ = NULL; 96 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
112 old_top_ = NULL; 97 start_ = limit_ = NULL;
113 old_limit_ = NULL;
114 old_reserved_limit_ = NULL;
115 old_regular_limit_ = NULL;
116 start_ = NULL;
117 limit_ = NULL;
118 heap_->public_set_store_buffer_top(start_); 98 heap_->public_set_store_buffer_top(start_);
119 } 99 }
120 100
121 101
122 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
123 isolate->heap()->store_buffer()->Compact(); 103 isolate->heap()->store_buffer()->Compact();
124 isolate->counters()->store_buffer_overflows()->Increment(); 104 isolate->counters()->store_buffer_overflows()->Increment();
125 } 105 }
126 106
127 107
(...skipping 13 matching lines...) Expand all
141 } 121 }
142 old_top_ = write; 122 old_top_ = write;
143 } 123 }
144 124
145 125
146 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { 126 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
147 return old_limit_ - old_top_ >= space_needed; 127 return old_limit_ - old_top_ >= space_needed;
148 } 128 }
149 129
150 130
151 template<StoreBuffer::ExemptPopularPagesMode mode>
152 void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
153 // Sample 1 entry in 97 and filter out the pages where we estimate that more
154 // than 1 in 8 pointers are to new space.
155 static const int kSampleFinenesses = 5;
156 static const struct Samples {
157 int prime_sample_step;
158 int threshold;
159 } samples[kSampleFinenesses] = {
160 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
161 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
162 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
163 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
164 { 1, 0}
165 };
166 for (int i = 0; i < kSampleFinenesses; i++) {
167 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
168 // As a last resort we mark all pages as being exempt from the store buffer.
169 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
170 if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
171 else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
172 }
173 }
174
175
176 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 131 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
177 while (old_limit_ - old_top_ < space_needed && 132 while (old_limit_ - old_top_ < space_needed &&
178 ((!allow_overflow_ && old_limit_ < old_regular_limit_) || 133 old_limit_ < old_reserved_limit_) {
179 (allow_overflow_ && old_limit_ < old_reserved_limit_))) {
180 size_t grow = old_limit_ - old_start_; // Double size. 134 size_t grow = old_limit_ - old_start_; // Double size.
181 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 135 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
182 grow * kPointerSize, 136 grow * kPointerSize,
183 false)); 137 false));
184 old_limit_ += grow; 138 old_limit_ += grow;
185 } 139 }
186 140
187 if (SpaceAvailable(space_needed)) return; 141 if (SpaceAvailable(space_needed)) return;
188 142
189 if (old_buffer_is_filtered_) return; 143 if (old_buffer_is_filtered_) return;
(...skipping 11 matching lines...) Expand all
201 break; 155 break;
202 } 156 }
203 } 157 }
204 158
205 if (page_has_scan_on_scavenge_flag) { 159 if (page_has_scan_on_scavenge_flag) {
206 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 160 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
207 } 161 }
208 162
209 if (SpaceAvailable(space_needed)) return; 163 if (SpaceAvailable(space_needed)) return;
210 164
211 IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed); 165 // Sample 1 entry in 97 and filter out the pages where we estimate that more
212 ASSERT(SpaceAvailable(space_needed)); 166 // than 1 in 8 pointers are to new space.
167 static const int kSampleFinenesses = 5;
168 static const struct Samples {
169 int prime_sample_step;
170 int threshold;
171 } samples[kSampleFinenesses] = {
172 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
173 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
174 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
175 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
176 { 1, 0}
177 };
178 for (int i = 0; i < kSampleFinenesses; i++) {
179 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
180 // As a last resort we mark all pages as being exempt from the store buffer.
181 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
182 if (SpaceAvailable(space_needed)) return;
183 }
184 UNREACHABLE();
213 } 185 }
214 186
215 187
216 // Sample the store buffer to see if some pages are taking up a lot of space 188 // Sample the store buffer to see if some pages are taking up a lot of space
217 // in the store buffer. 189 // in the store buffer.
218 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 190 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
219 PointerChunkIterator it(heap_); 191 PointerChunkIterator it(heap_);
220 MemoryChunk* chunk; 192 MemoryChunk* chunk;
221 while ((chunk = it.next()) != NULL) { 193 while ((chunk = it.next()) != NULL) {
222 chunk->set_store_buffer_counter(0); 194 chunk->set_store_buffer_counter(0);
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
349 0, 321 0,
350 sizeof(uintptr_t) * kHashSetLength); 322 sizeof(uintptr_t) * kHashSetLength);
351 memset(reinterpret_cast<void*>(hash_set_2_), 323 memset(reinterpret_cast<void*>(hash_set_2_),
352 0, 324 0,
353 sizeof(uintptr_t) * kHashSetLength); 325 sizeof(uintptr_t) * kHashSetLength);
354 hash_sets_are_empty_ = true; 326 hash_sets_are_empty_ = true;
355 } 327 }
356 } 328 }
357 329
358 330
359 void StoreBuffer::GCPrologue(bool allow_overflow) { 331 void StoreBuffer::GCPrologue() {
360 ClearFilteringHashSets(); 332 ClearFilteringHashSets();
361 allow_overflow_ = allow_overflow; 333 during_gc_ = true;
362 } 334 }
363 335
364 336
365 #ifdef VERIFY_HEAP 337 #ifdef VERIFY_HEAP
366 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { 338 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
367 LargeObjectIterator it(space); 339 LargeObjectIterator it(space);
368 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 340 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
369 if (object->IsFixedArray()) { 341 if (object->IsFixedArray()) {
370 Address slot_address = object->address(); 342 Address slot_address = object->address();
371 Address end = object->address() + object->Size(); 343 Address end = object->address() + object->Size();
(...skipping 15 matching lines...) Expand all
387 359
388 360
389 void StoreBuffer::Verify() { 361 void StoreBuffer::Verify() {
390 #ifdef VERIFY_HEAP 362 #ifdef VERIFY_HEAP
391 VerifyPointers(heap_->lo_space()); 363 VerifyPointers(heap_->lo_space());
392 #endif 364 #endif
393 } 365 }
394 366
395 367
396 void StoreBuffer::GCEpilogue() { 368 void StoreBuffer::GCEpilogue() {
397 if (allow_overflow_ && old_limit_ > old_regular_limit_) { 369 during_gc_ = false;
398 IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
399 ASSERT(old_limit_ < old_regular_limit_);
400 old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
401 }
402
403 allow_overflow_ = false;
404 #ifdef VERIFY_HEAP 370 #ifdef VERIFY_HEAP
405 if (FLAG_verify_heap) { 371 if (FLAG_verify_heap) {
406 Verify(); 372 Verify();
407 } 373 }
408 #endif 374 #endif
409 } 375 }
410 376
411 377
412 void StoreBuffer::FindPointersToNewSpaceInRegion( 378 void StoreBuffer::FindPointersToNewSpaceInRegion(
413 Address start, 379 Address start,
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
515 } 481 }
516 if (chunk->owner() == heap_->lo_space()) { 482 if (chunk->owner() == heap_->lo_space()) {
517 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 483 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
518 HeapObject* array = large_page->GetObject(); 484 HeapObject* array = large_page->GetObject();
519 ASSERT(array->IsFixedArray()); 485 ASSERT(array->IsFixedArray());
520 Address start = array->address(); 486 Address start = array->address();
521 Address end = start + array->Size(); 487 Address end = start + array->Size();
522 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); 488 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
523 } else { 489 } else {
524 Page* page = reinterpret_cast<Page*>(chunk); 490 Page* page = reinterpret_cast<Page*>(chunk);
525 ASSERT(page->owner() == heap_->map_space() || 491 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
526 page->owner() == heap_->old_pointer_space()); 492 Address start = page->area_start();
527 CHECK(!page->WasSweptConservatively()); 493 Address end = page->area_end();
528 494 if (owner == heap_->map_space()) {
529 HeapObjectIterator iterator(page, NULL); 495 ASSERT(page->WasSweptPrecisely());
530 for (HeapObject* heap_object = iterator.Next(); 496 HeapObjectIterator iterator(page, NULL);
531 heap_object != NULL; 497 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
532 heap_object = iterator.Next()) { 498 heap_object = iterator.Next()) {
533 // We iterate over objects that contain pointers only. 499 // We skip free space objects.
534 if (heap_object->ContainsPointers()) { 500 if (!heap_object->IsFiller()) {
535 FindPointersToNewSpaceInRegion( 501 FindPointersToNewSpaceInRegion(
536 heap_object->address() + HeapObject::kHeaderSize, 502 heap_object->address() + HeapObject::kHeaderSize,
537 heap_object->address() + heap_object->Size(), 503 heap_object->address() + heap_object->Size(), slot_callback,
538 slot_callback, 504 clear_maps);
539 clear_maps); 505 }
540 } 506 }
507 } else {
508 FindPointersToNewSpaceInRegion(
509 start, end, slot_callback, clear_maps);
541 } 510 }
542 } 511 }
543 } 512 }
544 } 513 }
545 if (callback_ != NULL) { 514 if (callback_ != NULL) {
546 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); 515 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
547 } 516 }
548 } 517 }
549 } 518 }
550 519
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 } 565 }
597 old_buffer_is_sorted_ = false; 566 old_buffer_is_sorted_ = false;
598 old_buffer_is_filtered_ = false; 567 old_buffer_is_filtered_ = false;
599 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 568 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
600 ASSERT(old_top_ <= old_limit_); 569 ASSERT(old_top_ <= old_limit_);
601 } 570 }
602 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 571 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
603 } 572 }
604 573
605 } } // namespace v8::internal 574 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698