Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: src/heap/store-buffer.cc

Issue 957273002: Remove slots that point to unboxed doubles from the StoreBuffer/SlotsBuffer. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/store-buffer.h ('k') | src/objects.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <algorithm> 5 #include <algorithm>
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #include "src/base/atomicops.h" 9 #include "src/base/atomicops.h"
10 #include "src/counters.h" 10 #include "src/counters.h"
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
108 } 108 }
109 109
110 110
111 void StoreBuffer::Uniq() { 111 void StoreBuffer::Uniq() {
112 // Remove adjacent duplicates and cells that do not point at new space. 112 // Remove adjacent duplicates and cells that do not point at new space.
113 Address previous = NULL; 113 Address previous = NULL;
114 Address* write = old_start_; 114 Address* write = old_start_;
115 DCHECK(may_move_store_buffer_entries_); 115 DCHECK(may_move_store_buffer_entries_);
116 for (Address* read = old_start_; read < old_top_; read++) { 116 for (Address* read = old_start_; read < old_top_; read++) {
117 Address current = *read; 117 Address current = *read;
118 DCHECK_NOT_NULL(current);
118 if (current != previous) { 119 if (current != previous) {
119 Object* object = reinterpret_cast<Object*>( 120 Object* object = reinterpret_cast<Object*>(
120 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current))); 121 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current)));
121 if (heap_->InNewSpace(object)) { 122 if (heap_->InNewSpace(object)) {
122 *write++ = current; 123 *write++ = current;
123 } 124 }
124 } 125 }
125 previous = current; 126 previous = current;
126 } 127 }
127 old_top_ = write; 128 old_top_ = write;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
195 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 196 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
196 PointerChunkIterator it(heap_); 197 PointerChunkIterator it(heap_);
197 MemoryChunk* chunk; 198 MemoryChunk* chunk;
198 while ((chunk = it.next()) != NULL) { 199 while ((chunk = it.next()) != NULL) {
199 chunk->set_store_buffer_counter(0); 200 chunk->set_store_buffer_counter(0);
200 } 201 }
201 bool created_new_scan_on_scavenge_pages = false; 202 bool created_new_scan_on_scavenge_pages = false;
202 MemoryChunk* previous_chunk = NULL; 203 MemoryChunk* previous_chunk = NULL;
203 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { 204 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
204 Address addr = *p; 205 Address addr = *p;
206 if (addr == NULL) continue;
205 MemoryChunk* containing_chunk = NULL; 207 MemoryChunk* containing_chunk = NULL;
206 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 208 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
207 containing_chunk = previous_chunk; 209 containing_chunk = previous_chunk;
208 } else { 210 } else {
209 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); 211 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
210 } 212 }
211 int old_counter = containing_chunk->store_buffer_counter(); 213 int old_counter = containing_chunk->store_buffer_counter();
212 if (old_counter >= threshold) { 214 if (old_counter >= threshold) {
213 containing_chunk->set_scan_on_scavenge(true); 215 containing_chunk->set_scan_on_scavenge(true);
214 created_new_scan_on_scavenge_pages = true; 216 created_new_scan_on_scavenge_pages = true;
215 } 217 }
216 containing_chunk->set_store_buffer_counter(old_counter + 1); 218 containing_chunk->set_store_buffer_counter(old_counter + 1);
217 previous_chunk = containing_chunk; 219 previous_chunk = containing_chunk;
218 } 220 }
219 if (created_new_scan_on_scavenge_pages) { 221 if (created_new_scan_on_scavenge_pages) {
220 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 222 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
221 } 223 }
222 old_buffer_is_filtered_ = true; 224 old_buffer_is_filtered_ = true;
223 } 225 }
224 226
225 227
226 void StoreBuffer::Filter(int flag) { 228 void StoreBuffer::Filter(int flag) {
227 Address* new_top = old_start_; 229 Address* new_top = old_start_;
228 MemoryChunk* previous_chunk = NULL; 230 MemoryChunk* previous_chunk = NULL;
229 for (Address* p = old_start_; p < old_top_; p++) { 231 for (Address* p = old_start_; p < old_top_; p++) {
230 Address addr = *p; 232 Address addr = *p;
233 if (addr == NULL) continue;
231 MemoryChunk* containing_chunk = NULL; 234 MemoryChunk* containing_chunk = NULL;
232 if (previous_chunk != NULL && previous_chunk->Contains(addr)) { 235 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
233 containing_chunk = previous_chunk; 236 containing_chunk = previous_chunk;
234 } else { 237 } else {
235 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr); 238 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
236 previous_chunk = containing_chunk; 239 previous_chunk = containing_chunk;
237 } 240 }
238 if (!containing_chunk->IsFlagSet(flag)) { 241 if (!containing_chunk->IsFlagSet(flag)) {
239 *new_top++ = addr; 242 *new_top++ = addr;
240 } 243 }
241 } 244 }
242 old_top_ = new_top; 245 old_top_ = new_top;
243 246
244 // Filtering hash sets are inconsistent with the store buffer after this 247 // Filtering hash sets are inconsistent with the store buffer after this
245 // operation. 248 // operation.
246 ClearFilteringHashSets(); 249 ClearFilteringHashSets();
247 } 250 }
248 251
249 252
253 void StoreBuffer::RemoveSlots(Address start_address, Address end_address) {
254 struct IsValueInRangePredicate {
255 Address start_address_;
256 Address end_address_;
257
258 IsValueInRangePredicate(Address start_address, Address end_address)
259 : start_address_(start_address), end_address_(end_address) {}
260
261 bool operator()(Address addr) {
262 return start_address_ <= addr && addr < end_address_;
263 }
264 };
265
266 IsValueInRangePredicate predicate(start_address, end_address);
267 const Address kRemovedSlot = NULL;
Hannes Payer (out of office) 2015/03/05 21:34:43 The NULL value is not much fun. Maybe we should po
Igor Sheludko 2015/03/06 09:06:59 Good point!
268
269 {
270 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
271 std::replace_if(start_, top, predicate, kRemovedSlot);
272 }
273
274 if (old_buffer_is_sorted_) {
275 // Remove slots from an old buffer preserving the order.
276 Address* lower = std::lower_bound(old_start_, old_top_, start_address);
277 if (lower != old_top_) {
278 Address* upper = std::lower_bound(lower, old_top_, end_address);
279 // Remove [lower, upper) from the buffer.
280 if (upper == old_top_) {
281 old_top_ = lower;
282 } else {
283 Address* new_top = lower;
284 for (Address* p = upper; p < old_top_; p++) {
285 *new_top++ = *p;
286 }
287 old_top_ = new_top;
Hannes Payer (out of office) 2015/03/05 21:34:43 Why do you set old_top_ here, the old top should r
Igor Sheludko 2015/03/06 09:06:59 Sorry, I didn't get the question. Added more comme
288 }
289 }
290 } else {
291 std::replace_if(old_start_, old_top_, predicate, kRemovedSlot);
292 }
293 }
294
295
250 void StoreBuffer::SortUniq() { 296 void StoreBuffer::SortUniq() {
251 Compact(); 297 Compact();
252 if (old_buffer_is_sorted_) return; 298 if (old_buffer_is_sorted_) return;
253 std::sort(old_start_, old_top_); 299 std::sort(old_start_, old_top_);
254 Uniq(); 300 Uniq();
255 301
256 old_buffer_is_sorted_ = true; 302 old_buffer_is_sorted_ = true;
257 303
258 // Filtering hash sets are inconsistent with the store buffer after this 304 // Filtering hash sets are inconsistent with the store buffer after this
259 // operation. 305 // operation.
(...skipping 30 matching lines...) Expand all
290 ClearFilteringHashSets(); 336 ClearFilteringHashSets();
291 Uniq(); // Also removes things that no longer point to new space. 337 Uniq(); // Also removes things that no longer point to new space.
292 EnsureSpace(kStoreBufferSize / 2); 338 EnsureSpace(kStoreBufferSize / 2);
293 } 339 }
294 340
295 341
296 static Address* in_store_buffer_1_element_cache = NULL; 342 static Address* in_store_buffer_1_element_cache = NULL;
297 343
298 344
299 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 345 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
300 if (!FLAG_enable_slow_asserts) return true; 346 DCHECK_NOT_NULL(cell_address);
347 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
301 if (in_store_buffer_1_element_cache != NULL && 348 if (in_store_buffer_1_element_cache != NULL &&
302 *in_store_buffer_1_element_cache == cell_address) { 349 *in_store_buffer_1_element_cache == cell_address) {
303 return true; 350 // Check if the cache still points into the active part of the buffer.
351 if ((start_ <= in_store_buffer_1_element_cache &&
352 in_store_buffer_1_element_cache < top) ||
353 (old_start_ <= in_store_buffer_1_element_cache &&
354 in_store_buffer_1_element_cache < old_top_)) {
355 return true;
356 }
304 } 357 }
305 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
306 for (Address* current = top - 1; current >= start_; current--) { 358 for (Address* current = top - 1; current >= start_; current--) {
307 if (*current == cell_address) { 359 if (*current == cell_address) {
308 in_store_buffer_1_element_cache = current; 360 in_store_buffer_1_element_cache = current;
309 return true; 361 return true;
310 } 362 }
311 } 363 }
312 for (Address* current = old_top_ - 1; current >= old_start_; current--) { 364 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
313 if (*current == cell_address) { 365 if (*current == cell_address) {
314 in_store_buffer_1_element_cache = current; 366 in_store_buffer_1_element_cache = current;
315 return true; 367 return true;
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 } 469 }
418 470
419 471
420 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, 472 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
421 bool clear_maps) { 473 bool clear_maps) {
422 Address* limit = old_top_; 474 Address* limit = old_top_;
423 old_top_ = old_start_; 475 old_top_ = old_start_;
424 { 476 {
425 DontMoveStoreBufferEntriesScope scope(this); 477 DontMoveStoreBufferEntriesScope scope(this);
426 for (Address* current = old_start_; current < limit; current++) { 478 for (Address* current = old_start_; current < limit; current++) {
479 Address addr = *current;
480 if (addr == NULL) continue;
427 #ifdef DEBUG 481 #ifdef DEBUG
428 Address* saved_top = old_top_; 482 Address* saved_top = old_top_;
429 #endif 483 #endif
430 ProcessOldToNewSlot(*current, slot_callback, clear_maps); 484 ProcessOldToNewSlot(addr, slot_callback, clear_maps);
431 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); 485 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
432 } 486 }
433 } 487 }
434 } 488 }
435 489
436 490
437 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { 491 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
438 IteratePointersToNewSpace(slot_callback, false); 492 IteratePointersToNewSpace(slot_callback, false);
439 } 493 }
440 494
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
575 DCHECK(top <= limit_); 629 DCHECK(top <= limit_);
576 heap_->public_set_store_buffer_top(start_); 630 heap_->public_set_store_buffer_top(start_);
577 EnsureSpace(top - start_); 631 EnsureSpace(top - start_);
578 DCHECK(may_move_store_buffer_entries_); 632 DCHECK(may_move_store_buffer_entries_);
579 // Goes through the addresses in the store buffer attempting to remove 633 // Goes through the addresses in the store buffer attempting to remove
580 // duplicates. In the interest of speed this is a lossy operation. Some 634 // duplicates. In the interest of speed this is a lossy operation. Some
581 // duplicates will remain. We have two hash sets with different hash 635 // duplicates will remain. We have two hash sets with different hash
582 // functions to reduce the number of unnecessary clashes. 636 // functions to reduce the number of unnecessary clashes.
583 hash_sets_are_empty_ = false; // Hash sets are in use. 637 hash_sets_are_empty_ = false; // Hash sets are in use.
584 for (Address* current = start_; current < top; current++) { 638 for (Address* current = start_; current < top; current++) {
585 DCHECK(!heap_->cell_space()->Contains(*current)); 639 Address addr = *current;
586 DCHECK(!heap_->code_space()->Contains(*current)); 640 if (addr == NULL) continue;
587 DCHECK(!heap_->old_data_space()->Contains(*current)); 641 DCHECK(!heap_->cell_space()->Contains(addr));
588 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 642 DCHECK(!heap_->code_space()->Contains(addr));
643 DCHECK(!heap_->old_data_space()->Contains(addr));
644 uintptr_t int_addr = reinterpret_cast<uintptr_t>(addr);
589 // Shift out the last bits including any tags. 645 // Shift out the last bits including any tags.
590 int_addr >>= kPointerSizeLog2; 646 int_addr >>= kPointerSizeLog2;
591 // The upper part of an address is basically random because of ASLR and OS 647 // The upper part of an address is basically random because of ASLR and OS
592 // non-determinism, so we use only the bits within a page for hashing to 648 // non-determinism, so we use only the bits within a page for hashing to
593 // make v8's behavior (more) deterministic. 649 // make v8's behavior (more) deterministic.
594 uintptr_t hash_addr = 650 uintptr_t hash_addr =
595 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); 651 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
596 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & 652 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
597 (kHashSetLength - 1)); 653 (kHashSetLength - 1));
598 if (hash_set_1_[hash1] == int_addr) continue; 654 if (hash_set_1_[hash1] == int_addr) continue;
(...skipping 13 matching lines...) Expand all
612 } 668 }
613 old_buffer_is_sorted_ = false; 669 old_buffer_is_sorted_ = false;
614 old_buffer_is_filtered_ = false; 670 old_buffer_is_filtered_ = false;
615 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 671 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
616 DCHECK(old_top_ <= old_limit_); 672 DCHECK(old_top_ <= old_limit_);
617 } 673 }
618 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 674 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
619 } 675 }
620 } 676 }
621 } // namespace v8::internal 677 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap/store-buffer.h ('k') | src/objects.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698