Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/heap/CallbackStack.h" | 5 #include "platform/heap/CallbackStack.h" |
| 6 #include "wtf/PtrUtil.h" | |
| 6 #include "wtf/allocator/PageAllocator.h" | 7 #include "wtf/allocator/PageAllocator.h" |
| 7 | 8 |
| 8 namespace blink { | 9 namespace blink { |
| 9 | 10 |
| 10 size_t const CallbackStack::kMinimalBlockSize = WTF::kPageAllocationGranularity / sizeof(CallbackStack::Item); | 11 CallbackStackMemoryPool& CallbackStackMemoryPool::instance() |
|
haraken
2016/07/06 02:01:38
I noticed that WTF::kPageAllocationGranularity is
sof
2016/07/06 06:03:21
I wouldn't worry too much -- the non-main thread t
| |
| 12 { | |
| 13 DEFINE_STATIC_LOCAL(CallbackStackMemoryPool, memoryPool, ()); | |
| 14 return memoryPool; | |
| 15 } | |
| 11 | 16 |
| 12 CallbackStack::Block::Block(Block* next, size_t blockSize) | 17 void CallbackStackMemoryPool::initialize() |
| 13 : m_blockSize(blockSize) | |
| 14 { | 18 { |
| 15 // Allocated block size must be a multiple of WTF::kPageAllocationGranularit y. | 19 m_freeListFirst = 0; |
| 16 ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0); | 20 for (size_t index = 0; index < kPooledBlockCount - 1; ++index) { |
| 17 m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof( Item), WTF::kPageAllocationGranularity, WTF::PageAccessible)); | 21 m_freeListNext[index] = index + 1; |
| 18 RELEASE_ASSERT(m_buffer); | 22 } |
| 23 m_freeListNext[kPooledBlockCount - 1] = -1; | |
| 24 m_pooledMemory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockBytes * kPooledBlockCount, WTF::kPageAllocationGranularity, WTF::PageAcces sible)); | |
| 25 CHECK(m_pooledMemory); | |
| 26 } | |
| 19 | 27 |
| 28 void CallbackStackMemoryPool::shutdown() | |
| 29 { | |
| 30 WTF::freePages(m_pooledMemory, kBlockBytes * kPooledBlockCount); | |
| 31 m_pooledMemory = nullptr; | |
| 32 m_freeListFirst = 0; | |
| 33 } | |
| 34 | |
| 35 CallbackStack::Item* CallbackStackMemoryPool::allocate() | |
| 36 { | |
| 37 MutexLocker locker(m_mutex); | |
| 38 // Allocate from a free list if available. | |
| 39 if (m_freeListFirst != -1) { | |
| 40 size_t index = m_freeListFirst; | |
| 41 DCHECK(0 <= index && index < CallbackStackMemoryPool::kPooledBlockCount) ; | |
| 42 m_freeListFirst = m_freeListNext[index]; | |
| 43 m_freeListNext[index] = -1; | |
| 44 return m_pooledMemory + kBlockSize * index; | |
| 45 } | |
| 46 // Otherwise, allocate a new memory region. | |
| 47 CallbackStack::Item* memory = static_cast<CallbackStack::Item*>(WTF::allocPa ges(nullptr, kBlockBytes, WTF::kPageAllocationGranularity, WTF::PageAccessible)) ; | |
|
sof
2016/07/06 13:22:17
The block size/length isn't a multiple of kPageAll
haraken
2016/07/06 13:27:29
On Windows, kPageAllocationGranularity is 64 KB. D
sof
2016/07/06 13:33:07
If you want to use allocPages().. but why insist o
| |
| 48 CHECK(memory); | |
| 49 return memory; | |
| 50 } | |
| 51 | |
| 52 void CallbackStackMemoryPool::free(CallbackStack::Item* memory) | |
| 53 { | |
| 54 MutexLocker locker(m_mutex); | |
| 55 int index = (reinterpret_cast<uintptr_t>(memory) - reinterpret_cast<uintptr_ t>(m_pooledMemory)) / (kBlockSize * sizeof(CallbackStack::Item)); | |
| 56 // If the memory is a newly allocated region, free the memory. | |
| 57 if (index < 0 || static_cast<int>(kPooledBlockCount) <= index) { | |
| 58 WTF::freePages(memory, kBlockBytes); | |
| 59 return; | |
| 60 } | |
| 61 // Otherwise, return the memory back to the free list. | |
| 62 DCHECK_EQ(m_freeListNext[index], -1); | |
| 63 m_freeListNext[index] = m_freeListFirst; | |
| 64 m_freeListFirst = index; | |
| 65 } | |
| 66 | |
| 67 CallbackStack::Block::Block(Block* next) | |
| 68 { | |
| 69 m_buffer = CallbackStackMemoryPool::instance().allocate(); | |
| 20 #if ENABLE(ASSERT) | 70 #if ENABLE(ASSERT) |
| 21 clear(); | 71 clear(); |
| 22 #endif | 72 #endif |
| 23 | 73 |
| 24 m_limit = &(m_buffer[m_blockSize]); | 74 m_limit = &(m_buffer[CallbackStackMemoryPool::kBlockSize]); |
| 25 m_current = &(m_buffer[0]); | 75 m_current = &(m_buffer[0]); |
| 26 m_next = next; | 76 m_next = next; |
| 27 } | 77 } |
| 28 | 78 |
| 29 CallbackStack::Block::~Block() | 79 CallbackStack::Block::~Block() |
| 30 { | 80 { |
| 31 WTF::freePages(m_buffer, m_blockSize * sizeof(Item)); | 81 CallbackStackMemoryPool::instance().free(m_buffer); |
| 32 m_buffer = nullptr; | 82 m_buffer = nullptr; |
| 33 m_limit = nullptr; | 83 m_limit = nullptr; |
| 34 m_current = nullptr; | 84 m_current = nullptr; |
| 35 m_next = nullptr; | 85 m_next = nullptr; |
| 36 } | 86 } |
| 37 | 87 |
| 38 #if ENABLE(ASSERT) | 88 #if ENABLE(ASSERT) |
| 39 void CallbackStack::Block::clear() | 89 void CallbackStack::Block::clear() |
| 40 { | 90 { |
| 41 for (size_t i = 0; i < m_blockSize; i++) | 91 for (size_t i = 0; i < CallbackStackMemoryPool::kBlockSize; i++) |
| 42 m_buffer[i] = Item(0, 0); | 92 m_buffer[i] = Item(0, 0); |
| 43 } | 93 } |
| 44 #endif | 94 #endif |
| 45 | 95 |
| 46 void CallbackStack::Block::decommit() | |
| 47 { | |
| 48 reset(); | |
| 49 WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item)); | |
| 50 } | |
| 51 | |
| 52 void CallbackStack::Block::reset() | |
| 53 { | |
| 54 #if ENABLE(ASSERT) | |
| 55 clear(); | |
| 56 #endif | |
| 57 m_current = &m_buffer[0]; | |
| 58 m_next = nullptr; | |
| 59 } | |
| 60 | |
| 61 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) | 96 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) |
| 62 { | 97 { |
| 63 // This loop can tolerate entries being added by the callbacks after | 98 // This loop can tolerate entries being added by the callbacks after |
| 64 // iteration starts. | 99 // iteration starts. |
| 65 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 100 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 66 Item& item = m_buffer[i]; | 101 Item& item = m_buffer[i]; |
| 67 item.call(visitor); | 102 item.call(visitor); |
| 68 } | 103 } |
| 69 } | 104 } |
| 70 | 105 |
| 71 #if ENABLE(ASSERT) | 106 #if ENABLE(ASSERT) |
| 72 bool CallbackStack::Block::hasCallbackForObject(const void* object) | 107 bool CallbackStack::Block::hasCallbackForObject(const void* object) |
| 73 { | 108 { |
| 74 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 109 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 75 Item* item = &m_buffer[i]; | 110 Item* item = &m_buffer[i]; |
| 76 if (item->object() == object) | 111 if (item->object() == object) |
| 77 return true; | 112 return true; |
| 78 } | 113 } |
| 79 return false; | 114 return false; |
| 80 } | 115 } |
| 81 #endif | 116 #endif |
| 82 | 117 |
| 83 CallbackStack::CallbackStack(size_t blockSize) | 118 std::unique_ptr<CallbackStack> CallbackStack::create() |
| 84 : m_first(new Block(nullptr, blockSize)) | 119 { |
| 85 , m_last(m_first) | 120 return wrapUnique(new CallbackStack()); |
| 121 } | |
| 122 | |
| 123 CallbackStack::CallbackStack() | |
| 124 : m_first(nullptr) | |
| 125 , m_last(nullptr) | |
| 86 { | 126 { |
| 87 } | 127 } |
| 88 | 128 |
| 89 CallbackStack::~CallbackStack() | 129 CallbackStack::~CallbackStack() |
| 90 { | 130 { |
| 91 RELEASE_ASSERT(isEmpty()); | 131 CHECK(isEmpty()); |
| 92 delete m_first; | |
| 93 m_first = nullptr; | 132 m_first = nullptr; |
| 94 m_last = nullptr; | 133 m_last = nullptr; |
| 95 } | 134 } |
| 96 | 135 |
| 97 void CallbackStack::clear() | 136 void CallbackStack::commit() |
| 98 { | 137 { |
| 138 DCHECK(!m_first); | |
| 139 m_first = new Block(m_first); | |
| 140 m_last = m_first; | |
| 141 } | |
| 142 | |
| 143 void CallbackStack::decommit() | |
| 144 { | |
| 145 if (!m_first) | |
| 146 return; | |
| 99 Block* next; | 147 Block* next; |
| 100 for (Block* current = m_first->next(); current; current = next) { | 148 for (Block* current = m_first->next(); current; current = next) { |
| 101 next = current->next(); | 149 next = current->next(); |
| 102 delete current; | 150 delete current; |
| 103 } | 151 } |
| 104 m_first->reset(); | 152 delete m_first; |
| 105 m_last = m_first; | 153 m_last = m_first = nullptr; |
| 106 } | |
| 107 | |
| 108 void CallbackStack::decommit() | |
| 109 { | |
| 110 Block* next; | |
| 111 for (Block* current = m_first->next(); current; current = next) { | |
| 112 next = current->next(); | |
| 113 delete current; | |
| 114 } | |
| 115 m_first->decommit(); | |
| 116 m_last = m_first; | |
| 117 } | 154 } |
| 118 | 155 |
| 119 bool CallbackStack::isEmpty() const | 156 bool CallbackStack::isEmpty() const |
| 120 { | 157 { |
| 121 return hasJustOneBlock() && m_first->isEmptyBlock(); | 158 return !m_first || (hasJustOneBlock() && m_first->isEmptyBlock()); |
| 122 } | 159 } |
| 123 | 160 |
| 124 CallbackStack::Item* CallbackStack::allocateEntrySlow() | 161 CallbackStack::Item* CallbackStack::allocateEntrySlow() |
| 125 { | 162 { |
| 126 ASSERT(!m_first->allocateEntry()); | 163 DCHECK(m_first); |
| 127 m_first = new Block(m_first, m_first->blockSize()); | 164 DCHECK(!m_first->allocateEntry()); |
| 165 m_first = new Block(m_first); | |
| 128 return m_first->allocateEntry(); | 166 return m_first->allocateEntry(); |
| 129 } | 167 } |
| 130 | 168 |
| 131 CallbackStack::Item* CallbackStack::popSlow() | 169 CallbackStack::Item* CallbackStack::popSlow() |
| 132 { | 170 { |
| 133 ASSERT(m_first->isEmptyBlock()); | 171 DCHECK(m_first); |
| 172 DCHECK(m_first->isEmptyBlock()); | |
| 134 | 173 |
| 135 for (;;) { | 174 for (;;) { |
| 136 Block* next = m_first->next(); | 175 Block* next = m_first->next(); |
| 137 if (!next) { | 176 if (!next) { |
| 138 #if ENABLE(ASSERT) | 177 #if ENABLE(ASSERT) |
| 139 m_first->clear(); | 178 m_first->clear(); |
| 140 #endif | 179 #endif |
| 141 return nullptr; | 180 return nullptr; |
| 142 } | 181 } |
| 143 delete m_first; | 182 delete m_first; |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 162 upto = from; | 201 upto = from; |
| 163 from = m_first; | 202 from = m_first; |
| 164 invokeOldestCallbacks(from, upto, visitor); | 203 invokeOldestCallbacks(from, upto, visitor); |
| 165 } | 204 } |
| 166 } | 205 } |
| 167 | 206 |
| 168 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis itor) | 207 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis itor) |
| 169 { | 208 { |
| 170 if (from == upto) | 209 if (from == upto) |
| 171 return; | 210 return; |
| 172 ASSERT(from); | 211 DCHECK(from); |
| 173 // Recurse first so we get to the newly added entries last. | 212 // Recurse first so we get to the newly added entries last. |
| 174 invokeOldestCallbacks(from->next(), upto, visitor); | 213 invokeOldestCallbacks(from->next(), upto, visitor); |
| 175 from->invokeEphemeronCallbacks(visitor); | 214 from->invokeEphemeronCallbacks(visitor); |
| 176 } | 215 } |
| 177 | 216 |
| 178 bool CallbackStack::hasJustOneBlock() const | 217 bool CallbackStack::hasJustOneBlock() const |
| 179 { | 218 { |
| 219 DCHECK(m_first); | |
| 180 return !m_first->next(); | 220 return !m_first->next(); |
| 181 } | 221 } |
| 182 | 222 |
| 183 #if ENABLE(ASSERT) | 223 #if ENABLE(ASSERT) |
| 184 bool CallbackStack::hasCallbackForObject(const void* object) | 224 bool CallbackStack::hasCallbackForObject(const void* object) |
| 185 { | 225 { |
| 186 for (Block* current = m_first; current; current = current->next()) { | 226 for (Block* current = m_first; current; current = current->next()) { |
| 187 if (current->hasCallbackForObject(object)) | 227 if (current->hasCallbackForObject(object)) |
| 188 return true; | 228 return true; |
| 189 } | 229 } |
| 190 return false; | 230 return false; |
| 191 } | 231 } |
| 192 #endif | 232 #endif |
| 193 | 233 |
| 194 } // namespace blink | 234 } // namespace blink |
| OLD | NEW |