Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(137)

Side by Side Diff: src/store-buffer.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | src/store-buffer-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/store-buffer.h" 5 #include "src/store-buffer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 start_ = 43 start_ =
44 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 44 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
45 limit_ = start_ + (kStoreBufferSize / kPointerSize); 45 limit_ = start_ + (kStoreBufferSize / kPointerSize);
46 46
47 old_virtual_memory_ = 47 old_virtual_memory_ =
48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize); 48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
49 old_top_ = old_start_ = 49 old_top_ = old_start_ =
50 reinterpret_cast<Address*>(old_virtual_memory_->address()); 50 reinterpret_cast<Address*>(old_virtual_memory_->address());
51 // Don't know the alignment requirements of the OS, but it is certainly not 51 // Don't know the alignment requirements of the OS, but it is certainly not
52 // less than 0xfff. 52 // less than 0xfff.
53 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 53 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
54 int initial_length = 54 int initial_length =
55 static_cast<int>(base::OS::CommitPageSize() / kPointerSize); 55 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
56 ASSERT(initial_length > 0); 56 DCHECK(initial_length > 0);
57 ASSERT(initial_length <= kOldStoreBufferLength); 57 DCHECK(initial_length <= kOldStoreBufferLength);
58 old_limit_ = old_start_ + initial_length; 58 old_limit_ = old_start_ + initial_length;
59 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; 59 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
60 60
61 CHECK(old_virtual_memory_->Commit( 61 CHECK(old_virtual_memory_->Commit(
62 reinterpret_cast<void*>(old_start_), 62 reinterpret_cast<void*>(old_start_),
63 (old_limit_ - old_start_) * kPointerSize, 63 (old_limit_ - old_start_) * kPointerSize,
64 false)); 64 false));
65 65
66 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 66 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
67 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 67 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
68 Address* vm_limit = reinterpret_cast<Address*>( 68 Address* vm_limit = reinterpret_cast<Address*>(
69 reinterpret_cast<char*>(virtual_memory_->address()) + 69 reinterpret_cast<char*>(virtual_memory_->address()) +
70 virtual_memory_->size()); 70 virtual_memory_->size());
71 ASSERT(start_ <= vm_limit); 71 DCHECK(start_ <= vm_limit);
72 ASSERT(limit_ <= vm_limit); 72 DCHECK(limit_ <= vm_limit);
73 USE(vm_limit); 73 USE(vm_limit);
74 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 74 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
75 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 75 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
76 0); 76 0);
77 77
78 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), 78 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
79 kStoreBufferSize, 79 kStoreBufferSize,
80 false)); // Not executable. 80 false)); // Not executable.
81 heap_->public_set_store_buffer_top(start_); 81 heap_->public_set_store_buffer_top(start_);
82 82
83 hash_set_1_ = new uintptr_t[kHashSetLength]; 83 hash_set_1_ = new uintptr_t[kHashSetLength];
84 hash_set_2_ = new uintptr_t[kHashSetLength]; 84 hash_set_2_ = new uintptr_t[kHashSetLength];
85 hash_sets_are_empty_ = false; 85 hash_sets_are_empty_ = false;
(...skipping 16 matching lines...) Expand all
102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
103 isolate->heap()->store_buffer()->Compact(); 103 isolate->heap()->store_buffer()->Compact();
104 isolate->counters()->store_buffer_overflows()->Increment(); 104 isolate->counters()->store_buffer_overflows()->Increment();
105 } 105 }
106 106
107 107
108 void StoreBuffer::Uniq() { 108 void StoreBuffer::Uniq() {
109 // Remove adjacent duplicates and cells that do not point at new space. 109 // Remove adjacent duplicates and cells that do not point at new space.
110 Address previous = NULL; 110 Address previous = NULL;
111 Address* write = old_start_; 111 Address* write = old_start_;
112 ASSERT(may_move_store_buffer_entries_); 112 DCHECK(may_move_store_buffer_entries_);
113 for (Address* read = old_start_; read < old_top_; read++) { 113 for (Address* read = old_start_; read < old_top_; read++) {
114 Address current = *read; 114 Address current = *read;
115 if (current != previous) { 115 if (current != previous) {
116 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 116 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
117 *write++ = current; 117 *write++ = current;
118 } 118 }
119 } 119 }
120 previous = current; 120 previous = current;
121 } 121 }
122 old_top_ = write; 122 old_top_ = write;
(...skipping 11 matching lines...) Expand all
134 size_t grow = old_limit_ - old_start_; // Double size. 134 size_t grow = old_limit_ - old_start_; // Double size.
135 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 135 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
136 grow * kPointerSize, 136 grow * kPointerSize,
137 false)); 137 false));
138 old_limit_ += grow; 138 old_limit_ += grow;
139 } 139 }
140 140
141 if (SpaceAvailable(space_needed)) return; 141 if (SpaceAvailable(space_needed)) return;
142 142
143 if (old_buffer_is_filtered_) return; 143 if (old_buffer_is_filtered_) return;
144 ASSERT(may_move_store_buffer_entries_); 144 DCHECK(may_move_store_buffer_entries_);
145 Compact(); 145 Compact();
146 146
147 old_buffer_is_filtered_ = true; 147 old_buffer_is_filtered_ = true;
148 bool page_has_scan_on_scavenge_flag = false; 148 bool page_has_scan_on_scavenge_flag = false;
149 149
150 PointerChunkIterator it(heap_); 150 PointerChunkIterator it(heap_);
151 MemoryChunk* chunk; 151 MemoryChunk* chunk;
152 while ((chunk = it.next()) != NULL) { 152 while ((chunk = it.next()) != NULL) {
153 if (chunk->scan_on_scavenge()) { 153 if (chunk->scan_on_scavenge()) {
154 page_has_scan_on_scavenge_flag = true; 154 page_has_scan_on_scavenge_flag = true;
(...skipping 16 matching lines...) Expand all
171 } samples[kSampleFinenesses] = { 171 } samples[kSampleFinenesses] = {
172 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, 172 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
173 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, 173 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
174 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, 174 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
175 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, 175 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
176 { 1, 0} 176 { 1, 0}
177 }; 177 };
178 for (int i = 0; i < kSampleFinenesses; i++) { 178 for (int i = 0; i < kSampleFinenesses; i++) {
179 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); 179 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
180 // As a last resort we mark all pages as being exempt from the store buffer. 180 // As a last resort we mark all pages as being exempt from the store buffer.
181 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_); 181 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
182 if (SpaceAvailable(space_needed)) return; 182 if (SpaceAvailable(space_needed)) return;
183 } 183 }
184 UNREACHABLE(); 184 UNREACHABLE();
185 } 185 }
186 186
187 187
188 // Sample the store buffer to see if some pages are taking up a lot of space 188 // Sample the store buffer to see if some pages are taking up a lot of space
189 // in the store buffer. 189 // in the store buffer.
190 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 190 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
191 PointerChunkIterator it(heap_); 191 PointerChunkIterator it(heap_);
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
381 ObjectSlotCallback slot_callback, 381 ObjectSlotCallback slot_callback,
382 bool clear_maps) { 382 bool clear_maps) {
383 for (Address slot_address = start; 383 for (Address slot_address = start;
384 slot_address < end; 384 slot_address < end;
385 slot_address += kPointerSize) { 385 slot_address += kPointerSize) {
386 Object** slot = reinterpret_cast<Object**>(slot_address); 386 Object** slot = reinterpret_cast<Object**>(slot_address);
387 Object* object = reinterpret_cast<Object*>( 387 Object* object = reinterpret_cast<Object*>(
388 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 388 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
389 if (heap_->InNewSpace(object)) { 389 if (heap_->InNewSpace(object)) {
390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
391 ASSERT(heap_object->IsHeapObject()); 391 DCHECK(heap_object->IsHeapObject());
392 // The new space object was not promoted if it still contains a map 392 // The new space object was not promoted if it still contains a map
393 // pointer. Clear the map field now lazily. 393 // pointer. Clear the map field now lazily.
394 if (clear_maps) ClearDeadObject(heap_object); 394 if (clear_maps) ClearDeadObject(heap_object);
395 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 395 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
396 object = reinterpret_cast<Object*>( 396 object = reinterpret_cast<Object*>(
397 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 397 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
398 if (heap_->InNewSpace(object)) { 398 if (heap_->InNewSpace(object)) {
399 EnterDirectlyIntoStoreBuffer(slot_address); 399 EnterDirectlyIntoStoreBuffer(slot_address);
400 } 400 }
401 } 401 }
(...skipping 20 matching lines...) Expand all
422 // The new space object was not promoted if it still contains a map 422 // The new space object was not promoted if it still contains a map
423 // pointer. Clear the map field now lazily. 423 // pointer. Clear the map field now lazily.
424 if (clear_maps) ClearDeadObject(heap_object); 424 if (clear_maps) ClearDeadObject(heap_object);
425 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 425 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
426 object = reinterpret_cast<Object*>( 426 object = reinterpret_cast<Object*>(
427 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 427 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
428 if (heap_->InNewSpace(object)) { 428 if (heap_->InNewSpace(object)) {
429 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); 429 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
430 } 430 }
431 } 431 }
432 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); 432 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
433 } 433 }
434 } 434 }
435 } 435 }
436 436
437 437
438 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { 438 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
439 IteratePointersToNewSpace(slot_callback, false); 439 IteratePointersToNewSpace(slot_callback, false);
440 } 440 }
441 441
442 442
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
475 MemoryChunk* chunk; 475 MemoryChunk* chunk;
476 while ((chunk = it.next()) != NULL) { 476 while ((chunk = it.next()) != NULL) {
477 if (chunk->scan_on_scavenge()) { 477 if (chunk->scan_on_scavenge()) {
478 chunk->set_scan_on_scavenge(false); 478 chunk->set_scan_on_scavenge(false);
479 if (callback_ != NULL) { 479 if (callback_ != NULL) {
480 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); 480 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
481 } 481 }
482 if (chunk->owner() == heap_->lo_space()) { 482 if (chunk->owner() == heap_->lo_space()) {
483 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 483 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
484 HeapObject* array = large_page->GetObject(); 484 HeapObject* array = large_page->GetObject();
485 ASSERT(array->IsFixedArray()); 485 DCHECK(array->IsFixedArray());
486 Address start = array->address(); 486 Address start = array->address();
487 Address end = start + array->Size(); 487 Address end = start + array->Size();
488 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); 488 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
489 } else { 489 } else {
490 Page* page = reinterpret_cast<Page*>(chunk); 490 Page* page = reinterpret_cast<Page*>(chunk);
491 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 491 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
492 Address start = page->area_start(); 492 Address start = page->area_start();
493 Address end = page->area_end(); 493 Address end = page->area_end();
494 if (owner == heap_->map_space()) { 494 if (owner == heap_->map_space()) {
495 ASSERT(page->WasSweptPrecisely()); 495 DCHECK(page->WasSweptPrecisely());
496 HeapObjectIterator iterator(page, NULL); 496 HeapObjectIterator iterator(page, NULL);
497 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; 497 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
498 heap_object = iterator.Next()) { 498 heap_object = iterator.Next()) {
499 // We skip free space objects. 499 // We skip free space objects.
500 if (!heap_object->IsFiller()) { 500 if (!heap_object->IsFiller()) {
501 FindPointersToNewSpaceInRegion( 501 FindPointersToNewSpaceInRegion(
502 heap_object->address() + HeapObject::kHeaderSize, 502 heap_object->address() + HeapObject::kHeaderSize,
503 heap_object->address() + heap_object->Size(), slot_callback, 503 heap_object->address() + heap_object->Size(), slot_callback,
504 clear_maps); 504 clear_maps);
505 } 505 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
545 } 545 }
546 546
547 547
548 void StoreBuffer::Compact() { 548 void StoreBuffer::Compact() {
549 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 549 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
550 550
551 if (top == start_) return; 551 if (top == start_) return;
552 552
553 // There's no check of the limit in the loop below so we check here for 553 // There's no check of the limit in the loop below so we check here for
554 // the worst case (compaction doesn't eliminate any pointers). 554 // the worst case (compaction doesn't eliminate any pointers).
555 ASSERT(top <= limit_); 555 DCHECK(top <= limit_);
556 heap_->public_set_store_buffer_top(start_); 556 heap_->public_set_store_buffer_top(start_);
557 EnsureSpace(top - start_); 557 EnsureSpace(top - start_);
558 ASSERT(may_move_store_buffer_entries_); 558 DCHECK(may_move_store_buffer_entries_);
559 // Goes through the addresses in the store buffer attempting to remove 559 // Goes through the addresses in the store buffer attempting to remove
560 // duplicates. In the interest of speed this is a lossy operation. Some 560 // duplicates. In the interest of speed this is a lossy operation. Some
561 // duplicates will remain. We have two hash sets with different hash 561 // duplicates will remain. We have two hash sets with different hash
562 // functions to reduce the number of unnecessary clashes. 562 // functions to reduce the number of unnecessary clashes.
563 hash_sets_are_empty_ = false; // Hash sets are in use. 563 hash_sets_are_empty_ = false; // Hash sets are in use.
564 for (Address* current = start_; current < top; current++) { 564 for (Address* current = start_; current < top; current++) {
565 ASSERT(!heap_->cell_space()->Contains(*current)); 565 DCHECK(!heap_->cell_space()->Contains(*current));
566 ASSERT(!heap_->code_space()->Contains(*current)); 566 DCHECK(!heap_->code_space()->Contains(*current));
567 ASSERT(!heap_->old_data_space()->Contains(*current)); 567 DCHECK(!heap_->old_data_space()->Contains(*current));
568 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 568 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
569 // Shift out the last bits including any tags. 569 // Shift out the last bits including any tags.
570 int_addr >>= kPointerSizeLog2; 570 int_addr >>= kPointerSizeLog2;
571 // The upper part of an address is basically random because of ASLR and OS 571 // The upper part of an address is basically random because of ASLR and OS
572 // non-determinism, so we use only the bits within a page for hashing to 572 // non-determinism, so we use only the bits within a page for hashing to
573 // make v8's behavior (more) deterministic. 573 // make v8's behavior (more) deterministic.
574 uintptr_t hash_addr = 574 uintptr_t hash_addr =
575 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); 575 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
576 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & 576 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
577 (kHashSetLength - 1)); 577 (kHashSetLength - 1));
578 if (hash_set_1_[hash1] == int_addr) continue; 578 if (hash_set_1_[hash1] == int_addr) continue;
579 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); 579 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
580 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); 580 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
581 hash2 &= (kHashSetLength - 1); 581 hash2 &= (kHashSetLength - 1);
582 if (hash_set_2_[hash2] == int_addr) continue; 582 if (hash_set_2_[hash2] == int_addr) continue;
583 if (hash_set_1_[hash1] == 0) { 583 if (hash_set_1_[hash1] == 0) {
584 hash_set_1_[hash1] = int_addr; 584 hash_set_1_[hash1] = int_addr;
585 } else if (hash_set_2_[hash2] == 0) { 585 } else if (hash_set_2_[hash2] == 0) {
586 hash_set_2_[hash2] = int_addr; 586 hash_set_2_[hash2] = int_addr;
587 } else { 587 } else {
588 // Rather than slowing down we just throw away some entries. This will 588 // Rather than slowing down we just throw away some entries. This will
589 // cause some duplicates to remain undetected. 589 // cause some duplicates to remain undetected.
590 hash_set_1_[hash1] = int_addr; 590 hash_set_1_[hash1] = int_addr;
591 hash_set_2_[hash2] = 0; 591 hash_set_2_[hash2] = 0;
592 } 592 }
593 old_buffer_is_sorted_ = false; 593 old_buffer_is_sorted_ = false;
594 old_buffer_is_filtered_ = false; 594 old_buffer_is_filtered_ = false;
595 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 595 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
596 ASSERT(old_top_ <= old_limit_); 596 DCHECK(old_top_ <= old_limit_);
597 } 597 }
598 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 598 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
599 } 599 }
600 600
601 } } // namespace v8::internal 601 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | src/store-buffer-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698