| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/store-buffer.h" | 5 #include "src/heap/store-buffer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "src/counters.h" | 9 #include "src/counters.h" |
| 10 #include "src/heap/incremental-marking.h" | 10 #include "src/heap/incremental-marking.h" |
| 11 #include "src/isolate.h" | 11 #include "src/isolate.h" |
| 12 #include "src/objects-inl.h" | 12 #include "src/objects-inl.h" |
| 13 #include "src/v8.h" | 13 #include "src/v8.h" |
| 14 | 14 |
| 15 namespace v8 { | 15 namespace v8 { |
| 16 namespace internal { | 16 namespace internal { |
| 17 | 17 |
| 18 StoreBuffer::StoreBuffer(Heap* heap) | 18 StoreBuffer::StoreBuffer(Heap* heap) |
| 19 : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) { | 19 : heap_(heap), |
| 20 for (int i = 0; i < kStoreBuffers; i++) { | 20 top_(nullptr), |
| 21 start_[i] = nullptr; | 21 start_(nullptr), |
| 22 limit_[i] = nullptr; | 22 limit_(nullptr), |
| 23 lazy_top_[i] = nullptr; | 23 virtual_memory_(nullptr) {} |
| 24 } | |
| 25 task_running_ = false; | |
| 26 } | |
| 27 | 24 |
| 28 void StoreBuffer::SetUp() { | 25 void StoreBuffer::SetUp() { |
| 29 // Allocate 3x the buffer size, so that we can start the new store buffer | 26 // Allocate 3x the buffer size, so that we can start the new store buffer |
| 30 // aligned to 2x the size. This lets us use a bit test to detect the end of | 27 // aligned to 2x the size. This lets us use a bit test to detect the end of |
| 31 // the area. | 28 // the area. |
| 32 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); | 29 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2); |
| 33 uintptr_t start_as_int = | 30 uintptr_t start_as_int = |
| 34 reinterpret_cast<uintptr_t>(virtual_memory_->address()); | 31 reinterpret_cast<uintptr_t>(virtual_memory_->address()); |
| 35 start_[0] = | 32 start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize)); |
| 36 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize)); | 33 limit_ = start_ + (kStoreBufferSize / kPointerSize); |
| 37 limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize); | |
| 38 start_[1] = limit_[0]; | |
| 39 limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize); | |
| 40 | 34 |
| 35 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); |
| 36 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); |
| 41 Address* vm_limit = reinterpret_cast<Address*>( | 37 Address* vm_limit = reinterpret_cast<Address*>( |
| 42 reinterpret_cast<char*>(virtual_memory_->address()) + | 38 reinterpret_cast<char*>(virtual_memory_->address()) + |
| 43 virtual_memory_->size()); | 39 virtual_memory_->size()); |
| 40 DCHECK(start_ <= vm_limit); |
| 41 DCHECK(limit_ <= vm_limit); |
| 42 USE(vm_limit); |
| 43 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0); |
| 44 | 44 |
| 45 USE(vm_limit); | 45 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
| 46 for (int i = 0; i < kStoreBuffers; i++) { | 46 kStoreBufferSize, |
| 47 DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address()); | |
| 48 DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address()); | |
| 49 DCHECK(start_[i] <= vm_limit); | |
| 50 DCHECK(limit_[i] <= vm_limit); | |
| 51 DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0); | |
| 52 } | |
| 53 | |
| 54 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]), | |
| 55 kStoreBufferSize * kStoreBuffers, | |
| 56 false)) { // Not executable. | 47 false)) { // Not executable. |
| 57 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); | 48 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); |
| 58 } | 49 } |
| 59 current_ = 0; | 50 top_ = start_; |
| 60 top_ = start_[current_]; | |
| 61 } | 51 } |
| 62 | 52 |
| 63 | 53 |
| 64 void StoreBuffer::TearDown() { | 54 void StoreBuffer::TearDown() { |
| 65 delete virtual_memory_; | 55 delete virtual_memory_; |
| 66 top_ = nullptr; | 56 top_ = start_ = limit_ = nullptr; |
| 67 for (int i = 0; i < kStoreBuffers; i++) { | |
| 68 start_[i] = nullptr; | |
| 69 limit_[i] = nullptr; | |
| 70 lazy_top_[i] = nullptr; | |
| 71 } | |
| 72 } | 57 } |
| 73 | 58 |
| 74 | 59 |
| 75 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { | 60 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { |
| 76 isolate->heap()->store_buffer()->FlipStoreBuffers(); | 61 isolate->heap()->store_buffer()->MoveEntriesToRememberedSet(); |
| 77 isolate->counters()->store_buffer_overflows()->Increment(); | 62 isolate->counters()->store_buffer_overflows()->Increment(); |
| 78 } | 63 } |
| 79 | 64 |
| 80 void StoreBuffer::FlipStoreBuffers() { | 65 void StoreBuffer::MoveEntriesToRememberedSet() { |
| 81 base::LockGuard<base::Mutex> guard(&mutex_); | 66 if (top_ == start_) return; |
| 82 int other = (current_ + 1) % kStoreBuffers; | 67 DCHECK(top_ <= limit_); |
| 83 MoveEntriesToRememberedSet(other); | 68 for (Address* current = start_; current < top_; current++) { |
| 84 lazy_top_[current_] = top_; | |
| 85 current_ = other; | |
| 86 top_ = start_[current_]; | |
| 87 | |
| 88 if (!task_running_) { | |
| 89 task_running_ = true; | |
| 90 Task* task = new Task(heap_->isolate(), this); | |
| 91 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 92 task, v8::Platform::kShortRunningTask); | |
| 93 } | |
| 94 } | |
| 95 | |
| 96 void StoreBuffer::MoveEntriesToRememberedSet(int index) { | |
| 97 if (!lazy_top_[index]) return; | |
| 98 DCHECK_GE(index, 0); | |
| 99 DCHECK_LT(index, kStoreBuffers); | |
| 100 for (Address* current = start_[index]; current < lazy_top_[index]; | |
| 101 current++) { | |
| 102 DCHECK(!heap_->code_space()->Contains(*current)); | 69 DCHECK(!heap_->code_space()->Contains(*current)); |
| 103 Address addr = *current; | 70 Address addr = *current; |
| 104 Page* page = Page::FromAnyPointerAddress(heap_, addr); | 71 Page* page = Page::FromAnyPointerAddress(heap_, addr); |
| 105 if (IsDeletionAddress(addr)) { | 72 RememberedSet<OLD_TO_NEW>::Insert(page, addr); |
| 106 current++; | |
| 107 Address end = *current; | |
| 108 DCHECK(!IsDeletionAddress(end)); | |
| 109 addr = UnmarkDeletionAddress(addr); | |
| 110 if (end) { | |
| 111 RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end, | |
| 112 SlotSet::PREFREE_EMPTY_BUCKETS); | |
| 113 } else { | |
| 114 RememberedSet<OLD_TO_NEW>::Remove(page, addr); | |
| 115 } | |
| 116 } else { | |
| 117 DCHECK(!IsDeletionAddress(addr)); | |
| 118 RememberedSet<OLD_TO_NEW>::Insert(page, addr); | |
| 119 } | |
| 120 } | 73 } |
| 121 lazy_top_[index] = nullptr; | 74 top_ = start_; |
| 122 } | |
| 123 | |
| 124 void StoreBuffer::MoveAllEntriesToRememberedSet() { | |
| 125 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 126 int other = (current_ + 1) % kStoreBuffers; | |
| 127 MoveEntriesToRememberedSet(other); | |
| 128 lazy_top_[current_] = top_; | |
| 129 MoveEntriesToRememberedSet(current_); | |
| 130 top_ = start_[current_]; | |
| 131 } | |
| 132 | |
| 133 void StoreBuffer::ConcurrentlyProcessStoreBuffer() { | |
| 134 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 135 int other = (current_ + 1) % kStoreBuffers; | |
| 136 MoveEntriesToRememberedSet(other); | |
| 137 task_running_ = false; | |
| 138 } | |
| 139 | |
| 140 void StoreBuffer::DeleteEntry(Address start, Address end) { | |
| 141 if (top_ + sizeof(Address) * 2 > limit_[current_]) { | |
| 142 StoreBufferOverflow(heap_->isolate()); | |
| 143 } | |
| 144 *top_ = MarkDeletionAddress(start); | |
| 145 top_++; | |
| 146 *top_ = end; | |
| 147 top_++; | |
| 148 } | 75 } |
| 149 | 76 |
| 150 } // namespace internal | 77 } // namespace internal |
| 151 } // namespace v8 | 78 } // namespace v8 |
| OLD | NEW |