Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(269)

Side by Side Diff: src/store-buffer.cc

Issue 372193002: Avoid scan-on-scavenge pages during full collection. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/store-buffer.h" 5 #include "src/store-buffer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
11 #include "src/base/atomicops.h" 11 #include "src/base/atomicops.h"
12 #include "src/counters.h" 12 #include "src/counters.h"
13 #include "src/store-buffer-inl.h" 13 #include "src/store-buffer-inl.h"
14 14
15 namespace v8 { 15 namespace v8 {
16 namespace internal { 16 namespace internal {
17 17
18 StoreBuffer::StoreBuffer(Heap* heap) 18 StoreBuffer::StoreBuffer(Heap* heap)
19 : heap_(heap), 19 : heap_(heap),
20 start_(NULL), 20 start_(NULL),
21 limit_(NULL), 21 limit_(NULL),
22 old_start_(NULL), 22 old_start_(NULL),
23 old_limit_(NULL), 23 old_limit_(NULL),
24 old_top_(NULL), 24 old_top_(NULL),
25 old_regular_limit_(NULL),
25 old_reserved_limit_(NULL), 26 old_reserved_limit_(NULL),
27 old_virtual_memory_(NULL),
28 old_store_buffer_length_(0),
26 old_buffer_is_sorted_(false), 29 old_buffer_is_sorted_(false),
27 old_buffer_is_filtered_(false), 30 old_buffer_is_filtered_(false),
28 during_gc_(false), 31 allow_overflow_(false),
29 store_buffer_rebuilding_enabled_(false), 32 store_buffer_rebuilding_enabled_(false),
30 callback_(NULL), 33 callback_(NULL),
31 may_move_store_buffer_entries_(true), 34 may_move_store_buffer_entries_(true),
32 virtual_memory_(NULL), 35 virtual_memory_(NULL),
33 hash_set_1_(NULL), 36 hash_set_1_(NULL),
34 hash_set_2_(NULL), 37 hash_set_2_(NULL),
35 hash_sets_are_empty_(true) { 38 hash_sets_are_empty_(true) {
36 } 39 }
37 40
38 41
39 void StoreBuffer::SetUp() { 42 void StoreBuffer::SetUp() {
40 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); 43 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
41 uintptr_t start_as_int = 44 uintptr_t start_as_int =
42 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 45 reinterpret_cast<uintptr_t>(virtual_memory_->address());
43 start_ = 46 start_ =
44 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 47 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
45 limit_ = start_ + (kStoreBufferSize / kPointerSize); 48 limit_ = start_ + (kStoreBufferSize / kPointerSize);
46 49
50 // We set the maximum store buffer size to the maximum size of a semi-space.
51 // The store buffer may reach this limit during a full garbage collection.
52 // Note that half of the semi-space should be good enough since half of the
53 // memory in the semi-space are not object pointers.
54 old_store_buffer_length_ = heap_->MaxSemiSpaceSize() / sizeof(Address);
55
47 old_virtual_memory_ = 56 old_virtual_memory_ =
48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize); 57 new base::VirtualMemory(old_store_buffer_length_ * kPointerSize);
49 old_top_ = old_start_ = 58 old_top_ = old_start_ =
50 reinterpret_cast<Address*>(old_virtual_memory_->address()); 59 reinterpret_cast<Address*>(old_virtual_memory_->address());
51 // Don't know the alignment requirements of the OS, but it is certainly not 60 // Don't know the alignment requirements of the OS, but it is certainly not
52 // less than 0xfff. 61 // less than 0xfff.
53 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 62 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
54 int initial_length = 63 int initial_length =
55 static_cast<int>(base::OS::CommitPageSize() / kPointerSize); 64 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
56 ASSERT(initial_length > 0); 65 ASSERT(initial_length > 0);
57 ASSERT(initial_length <= kOldStoreBufferLength); 66 ASSERT(initial_length <= kOldRegularStoreBufferLength);
67 ASSERT(initial_length <= old_store_buffer_length_);
68 ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
58 old_limit_ = old_start_ + initial_length; 69 old_limit_ = old_start_ + initial_length;
59 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; 70 old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength;
71 old_reserved_limit_ = old_start_ + old_store_buffer_length_;
60 72
61 CHECK(old_virtual_memory_->Commit( 73 CHECK(old_virtual_memory_->Commit(
62 reinterpret_cast<void*>(old_start_), 74 reinterpret_cast<void*>(old_start_),
63 (old_limit_ - old_start_) * kPointerSize, 75 (old_limit_ - old_start_) * kPointerSize,
64 false)); 76 false));
65 77
66 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 78 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
67 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 79 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
68 Address* vm_limit = reinterpret_cast<Address*>( 80 Address* vm_limit = reinterpret_cast<Address*>(
69 reinterpret_cast<char*>(virtual_memory_->address()) + 81 reinterpret_cast<char*>(virtual_memory_->address()) +
(...skipping 16 matching lines...) Expand all
86 98
87 ClearFilteringHashSets(); 99 ClearFilteringHashSets();
88 } 100 }
89 101
90 102
91 void StoreBuffer::TearDown() { 103 void StoreBuffer::TearDown() {
92 delete virtual_memory_; 104 delete virtual_memory_;
93 delete old_virtual_memory_; 105 delete old_virtual_memory_;
94 delete[] hash_set_1_; 106 delete[] hash_set_1_;
95 delete[] hash_set_2_; 107 delete[] hash_set_2_;
96 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; 108 old_start_ = NULL;
97 start_ = limit_ = NULL; 109 old_top_ = NULL;
110 old_limit_ = NULL;
111 old_reserved_limit_ = NULL;
112 old_regular_limit_ = NULL;
113 start_ = NULL;
114 limit_ = NULL;
98 heap_->public_set_store_buffer_top(start_); 115 heap_->public_set_store_buffer_top(start_);
99 } 116 }
100 117
101 118
102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 119 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
103 isolate->heap()->store_buffer()->Compact(); 120 isolate->heap()->store_buffer()->Compact();
104 isolate->counters()->store_buffer_overflows()->Increment(); 121 isolate->counters()->store_buffer_overflows()->Increment();
105 } 122 }
106 123
107 124
(...skipping 13 matching lines...) Expand all
121 } 138 }
122 old_top_ = write; 139 old_top_ = write;
123 } 140 }
124 141
125 142
126 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { 143 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
127 return old_limit_ - old_top_ >= space_needed; 144 return old_limit_ - old_top_ >= space_needed;
128 } 145 }
129 146
130 147
148 template<StoreBuffer::ExemptPopularPagesMode mode>
149 void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
150 // Sample 1 entry in 97 and filter out the pages where we estimate that more
151 // than 1 in 8 pointers are to new space.
152 static const int kSampleFinenesses = 5;
153 static const struct Samples {
154 int prime_sample_step;
155 int threshold;
156 } samples[kSampleFinenesses] = {
157 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
158 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
159 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
160 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
161 { 1, 0}
162 };
163 for (int i = 0; i < kSampleFinenesses; i++) {
164 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
165 // As a last resort we mark all pages as being exempt from the store buffer.
166 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
167 if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
168 else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
169 }
170 }
171
172
131 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 173 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
132 while (old_limit_ - old_top_ < space_needed && 174 while (old_limit_ - old_top_ < space_needed &&
133 old_limit_ < old_reserved_limit_) { 175 ((!allow_overflow_ && old_limit_ < old_regular_limit_) ||
176 (allow_overflow_ && old_limit_ < old_reserved_limit_))) {
134 size_t grow = old_limit_ - old_start_; // Double size. 177 size_t grow = old_limit_ - old_start_; // Double size.
135 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 178 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
136 grow * kPointerSize, 179 grow * kPointerSize,
137 false)); 180 false));
138 old_limit_ += grow; 181 old_limit_ += grow;
139 } 182 }
140 183
141 if (SpaceAvailable(space_needed)) return; 184 if (SpaceAvailable(space_needed)) return;
142 185
143 if (old_buffer_is_filtered_) return; 186 if (old_buffer_is_filtered_) return;
(...skipping 11 matching lines...) Expand all
155 break; 198 break;
156 } 199 }
157 } 200 }
158 201
159 if (page_has_scan_on_scavenge_flag) { 202 if (page_has_scan_on_scavenge_flag) {
160 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 203 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
161 } 204 }
162 205
163 if (SpaceAvailable(space_needed)) return; 206 if (SpaceAvailable(space_needed)) return;
164 207
165 // Sample 1 entry in 97 and filter out the pages where we estimate that more 208 IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed);
166 // than 1 in 8 pointers are to new space. 209 ASSERT(SpaceAvailable(space_needed));
167 static const int kSampleFinenesses = 5;
168 static const struct Samples {
169 int prime_sample_step;
170 int threshold;
171 } samples[kSampleFinenesses] = {
172 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
173 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
174 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
175 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
176 { 1, 0}
177 };
178 for (int i = 0; i < kSampleFinenesses; i++) {
179 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
180 // As a last resort we mark all pages as being exempt from the store buffer.
181 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
182 if (SpaceAvailable(space_needed)) return;
183 }
184 UNREACHABLE();
185 } 210 }
186 211
187 212
188 // Sample the store buffer to see if some pages are taking up a lot of space 213 // Sample the store buffer to see if some pages are taking up a lot of space
189 // in the store buffer. 214 // in the store buffer.
190 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 215 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
191 PointerChunkIterator it(heap_); 216 PointerChunkIterator it(heap_);
192 MemoryChunk* chunk; 217 MemoryChunk* chunk;
193 while ((chunk = it.next()) != NULL) { 218 while ((chunk = it.next()) != NULL) {
194 chunk->set_store_buffer_counter(0); 219 chunk->set_store_buffer_counter(0);
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
321 0, 346 0,
322 sizeof(uintptr_t) * kHashSetLength); 347 sizeof(uintptr_t) * kHashSetLength);
323 memset(reinterpret_cast<void*>(hash_set_2_), 348 memset(reinterpret_cast<void*>(hash_set_2_),
324 0, 349 0,
325 sizeof(uintptr_t) * kHashSetLength); 350 sizeof(uintptr_t) * kHashSetLength);
326 hash_sets_are_empty_ = true; 351 hash_sets_are_empty_ = true;
327 } 352 }
328 } 353 }
329 354
330 355
331 void StoreBuffer::GCPrologue() { 356 void StoreBuffer::GCPrologue(bool allow_overflow) {
332 ClearFilteringHashSets(); 357 ClearFilteringHashSets();
333 during_gc_ = true; 358 allow_overflow_ = allow_overflow;
334 } 359 }
335 360
336 361
337 #ifdef VERIFY_HEAP 362 #ifdef VERIFY_HEAP
338 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { 363 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
339 LargeObjectIterator it(space); 364 LargeObjectIterator it(space);
340 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 365 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
341 if (object->IsFixedArray()) { 366 if (object->IsFixedArray()) {
342 Address slot_address = object->address(); 367 Address slot_address = object->address();
343 Address end = object->address() + object->Size(); 368 Address end = object->address() + object->Size();
(...skipping 15 matching lines...) Expand all
359 384
360 385
361 void StoreBuffer::Verify() { 386 void StoreBuffer::Verify() {
362 #ifdef VERIFY_HEAP 387 #ifdef VERIFY_HEAP
363 VerifyPointers(heap_->lo_space()); 388 VerifyPointers(heap_->lo_space());
364 #endif 389 #endif
365 } 390 }
366 391
367 392
368 void StoreBuffer::GCEpilogue() { 393 void StoreBuffer::GCEpilogue() {
369 during_gc_ = false; 394 if (allow_overflow_ && old_limit_ > old_regular_limit_) {
395 IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
396 ASSERT(old_limit_ < old_regular_limit_);
397 old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
398 }
399
400 allow_overflow_ = false;
370 #ifdef VERIFY_HEAP 401 #ifdef VERIFY_HEAP
371 if (FLAG_verify_heap) { 402 if (FLAG_verify_heap) {
372 Verify(); 403 Verify();
373 } 404 }
374 #endif 405 #endif
375 } 406 }
376 407
377 408
378 void StoreBuffer::FindPointersToNewSpaceInRegion( 409 void StoreBuffer::FindPointersToNewSpaceInRegion(
379 Address start, 410 Address start,
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 } 596 }
566 old_buffer_is_sorted_ = false; 597 old_buffer_is_sorted_ = false;
567 old_buffer_is_filtered_ = false; 598 old_buffer_is_filtered_ = false;
568 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 599 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
569 ASSERT(old_top_ <= old_limit_); 600 ASSERT(old_top_ <= old_limit_);
570 } 601 }
571 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 602 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
572 } 603 }
573 604
574 } } // namespace v8::internal 605 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698