Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/heap/CallbackStack.h" | 5 #include "platform/heap/CallbackStack.h" |
| 6 #include "wtf/allocator/PageAllocator.h" | 6 #include "wtf/allocator/PageAllocator.h" |
| 7 | 7 |
| 8 namespace blink { | 8 namespace blink { |
| 9 | 9 |
| 10 size_t const CallbackStack::kMinimalBlockSize = WTF::kPageAllocationGranularity / sizeof(CallbackStack::Item); | 10 CallbackStackMemoryPool& CallbackStackMemoryPool::instance() |
| 11 { | |
| 12 DEFINE_STATIC_LOCAL(CallbackStackMemoryPool, memoryPool, ()); | |
| 13 return memoryPool; | |
| 14 } | |
| 11 | 15 |
| 12 CallbackStack::Block::Block(Block* next, size_t blockSize) | 16 void CallbackStackMemoryPool::initialize() |
| 13 : m_blockSize(blockSize) | |
| 14 { | 17 { |
| 15 // Allocated block size must be a multiple of WTF::kPageAllocationGranularit y. | 18 MutexLocker locker(m_mutex); |
|
sof
2016/07/05 14:19:02
Do you need to take this lock?
| |
| 16 ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0); | 19 m_freeListFirst = 0; |
| 17 m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof( Item), WTF::kPageAllocationGranularity, WTF::PageAccessible)); | 20 for (size_t index = 0; index < kPooledBlockCount - 1; ++index) { |
| 18 RELEASE_ASSERT(m_buffer); | 21 m_freeListNext[index] = index + 1; |
| 22 } | |
| 23 m_freeListNext[kPooledBlockCount - 1] = -1; | |
| 24 m_pooledMemory = static_cast<CallbackStack::Item*>(WTF::allocPages(nullptr, kBlockSize * kPooledBlockCount * sizeof(CallbackStack::Item), WTF::kPageAllocati onGranularity, WTF::PageAccessible)); | |
| 25 CHECK(m_pooledMemory); | |
| 26 } | |
| 19 | 27 |
| 28 void CallbackStackMemoryPool::shutdown() | |
| 29 { | |
| 30 MutexLocker locker(m_mutex); | |
| 31 WTF::freePages(m_pooledMemory, kBlockSize * kPooledBlockCount * sizeof(Callb ackStack::Item)); | |
|
sof
2016/07/05 14:19:01
Could you tidy up the use of the expressions (kPoo
sof
2016/07/05 14:19:01
clear m_pooledMemory after freeing?
| |
| 32 } | |
| 33 | |
| 34 CallbackStack::Item* CallbackStackMemoryPool::allocate() | |
| 35 { | |
| 36 MutexLocker locker(m_mutex); | |
| 37 static_assert((kBlockSize * sizeof(CallbackStack::Item)) % WTF::kPageAllocat ionGranularity == 0, "Allocated block size must be a multiple of WTF::kPageAlloc ationGranularity"); | |
|
haraken
2016/07/06 01:58:11
I don't think this assert is needed. So removed.
| |
| 38 if (m_freeListFirst != -1) { | |
|
sof
2016/07/05 14:19:02
(A short comment explaining the two code paths her
| |
| 39 size_t index = m_freeListFirst; | |
| 40 DCHECK(0 <= index && index < CallbackStackMemoryPool::kPooledBlockCount) ; | |
| 41 m_freeListFirst = m_freeListNext[index]; | |
| 42 m_freeListNext[index] = -1; | |
| 43 return m_pooledMemory + kBlockSize * index; | |
| 44 } | |
| 45 CallbackStack::Item* memory = static_cast<CallbackStack::Item*>(WTF::allocPa ges(nullptr, kBlockSize * sizeof(CallbackStack::Item), WTF::kPageAllocationGranu larity, WTF::PageAccessible)); | |
| 46 CHECK(memory); | |
| 47 return memory; | |
| 48 } | |
| 49 | |
| 50 void CallbackStackMemoryPool::free(CallbackStack::Item* memory) | |
| 51 { | |
| 52 MutexLocker locker(m_mutex); | |
| 53 int index = (reinterpret_cast<uintptr_t>(memory) - reinterpret_cast<uintptr_ t>(m_pooledMemory)) / (kBlockSize * sizeof(CallbackStack::Item)); | |
| 54 if (index < 0 || static_cast<int>(kPooledBlockCount) <= index) { | |
| 55 WTF::freePages(memory, kBlockSize * sizeof(CallbackStack::Item)); | |
| 56 return; | |
| 57 } | |
| 58 DCHECK_EQ(m_freeListNext[index], -1); | |
| 59 m_freeListNext[index] = m_freeListFirst; | |
| 60 m_freeListFirst = index; | |
| 61 } | |
| 62 | |
| 63 CallbackStack::Block::Block(Block* next) | |
| 64 { | |
| 65 m_buffer = CallbackStackMemoryPool::instance().allocate(); | |
| 20 #if ENABLE(ASSERT) | 66 #if ENABLE(ASSERT) |
| 21 clear(); | 67 clear(); |
| 22 #endif | 68 #endif |
| 23 | 69 |
| 24 m_limit = &(m_buffer[m_blockSize]); | 70 m_limit = &(m_buffer[CallbackStackMemoryPool::kBlockSize]); |
| 25 m_current = &(m_buffer[0]); | 71 m_current = &(m_buffer[0]); |
| 26 m_next = next; | 72 m_next = next; |
| 27 } | 73 } |
| 28 | 74 |
| 29 CallbackStack::Block::~Block() | 75 CallbackStack::Block::~Block() |
| 30 { | 76 { |
| 31 WTF::freePages(m_buffer, m_blockSize * sizeof(Item)); | 77 CallbackStackMemoryPool::instance().free(m_buffer); |
| 32 m_buffer = nullptr; | 78 m_buffer = nullptr; |
| 33 m_limit = nullptr; | 79 m_limit = nullptr; |
| 34 m_current = nullptr; | 80 m_current = nullptr; |
| 35 m_next = nullptr; | 81 m_next = nullptr; |
| 36 } | 82 } |
| 37 | 83 |
| 38 #if ENABLE(ASSERT) | 84 #if ENABLE(ASSERT) |
| 39 void CallbackStack::Block::clear() | 85 void CallbackStack::Block::clear() |
| 40 { | 86 { |
| 41 for (size_t i = 0; i < m_blockSize; i++) | 87 for (size_t i = 0; i < CallbackStackMemoryPool::kBlockSize; i++) |
| 42 m_buffer[i] = Item(0, 0); | 88 m_buffer[i] = Item(0, 0); |
| 43 } | 89 } |
| 44 #endif | 90 #endif |
| 45 | 91 |
| 46 void CallbackStack::Block::decommit() | |
| 47 { | |
| 48 reset(); | |
| 49 WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item)); | |
| 50 } | |
| 51 | |
| 52 void CallbackStack::Block::reset() | |
| 53 { | |
| 54 #if ENABLE(ASSERT) | |
| 55 clear(); | |
| 56 #endif | |
| 57 m_current = &m_buffer[0]; | |
| 58 m_next = nullptr; | |
| 59 } | |
| 60 | |
| 61 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) | 92 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) |
| 62 { | 93 { |
| 63 // This loop can tolerate entries being added by the callbacks after | 94 // This loop can tolerate entries being added by the callbacks after |
| 64 // iteration starts. | 95 // iteration starts. |
| 65 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 96 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 66 Item& item = m_buffer[i]; | 97 Item& item = m_buffer[i]; |
| 67 item.call(visitor); | 98 item.call(visitor); |
| 68 } | 99 } |
| 69 } | 100 } |
| 70 | 101 |
| 71 #if ENABLE(ASSERT) | 102 #if ENABLE(ASSERT) |
| 72 bool CallbackStack::Block::hasCallbackForObject(const void* object) | 103 bool CallbackStack::Block::hasCallbackForObject(const void* object) |
| 73 { | 104 { |
| 74 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 105 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 75 Item* item = &m_buffer[i]; | 106 Item* item = &m_buffer[i]; |
| 76 if (item->object() == object) | 107 if (item->object() == object) |
| 77 return true; | 108 return true; |
| 78 } | 109 } |
| 79 return false; | 110 return false; |
| 80 } | 111 } |
| 81 #endif | 112 #endif |
| 82 | 113 |
| 83 CallbackStack::CallbackStack(size_t blockSize) | 114 CallbackStack::CallbackStack() |
| 84 : m_first(new Block(nullptr, blockSize)) | 115 : m_first(nullptr) |
| 85 , m_last(m_first) | 116 , m_last(nullptr) |
| 86 { | 117 { |
| 87 } | 118 } |
| 88 | 119 |
| 89 CallbackStack::~CallbackStack() | 120 CallbackStack::~CallbackStack() |
| 90 { | 121 { |
| 91 RELEASE_ASSERT(isEmpty()); | 122 CHECK(isEmpty()); |
| 92 delete m_first; | |
| 93 m_first = nullptr; | 123 m_first = nullptr; |
| 94 m_last = nullptr; | 124 m_last = nullptr; |
| 95 } | 125 } |
| 96 | 126 |
| 97 void CallbackStack::clear() | 127 void CallbackStack::commit() |
| 98 { | 128 { |
| 129 DCHECK(!m_first); | |
| 130 m_first = new Block(m_first); | |
| 131 m_last = m_first; | |
| 132 } | |
| 133 | |
| 134 void CallbackStack::decommit() | |
| 135 { | |
| 136 if (!m_first) | |
| 137 return; | |
| 99 Block* next; | 138 Block* next; |
| 100 for (Block* current = m_first->next(); current; current = next) { | 139 for (Block* current = m_first->next(); current; current = next) { |
| 101 next = current->next(); | 140 next = current->next(); |
| 102 delete current; | 141 delete current; |
| 103 } | 142 } |
| 104 m_first->reset(); | 143 delete m_first; |
| 105 m_last = m_first; | 144 m_last = m_first = nullptr; |
| 106 } | |
| 107 | |
| 108 void CallbackStack::decommit() | |
| 109 { | |
| 110 Block* next; | |
| 111 for (Block* current = m_first->next(); current; current = next) { | |
| 112 next = current->next(); | |
| 113 delete current; | |
| 114 } | |
| 115 m_first->decommit(); | |
| 116 m_last = m_first; | |
| 117 } | 145 } |
| 118 | 146 |
| 119 bool CallbackStack::isEmpty() const | 147 bool CallbackStack::isEmpty() const |
| 120 { | 148 { |
| 121 return hasJustOneBlock() && m_first->isEmptyBlock(); | 149 return !m_first || (hasJustOneBlock() && m_first->isEmptyBlock()); |
| 122 } | 150 } |
| 123 | 151 |
| 124 CallbackStack::Item* CallbackStack::allocateEntrySlow() | 152 CallbackStack::Item* CallbackStack::allocateEntrySlow() |
| 125 { | 153 { |
| 126 ASSERT(!m_first->allocateEntry()); | 154 DCHECK(m_first); |
| 127 m_first = new Block(m_first, m_first->blockSize()); | 155 DCHECK(!m_first->allocateEntry()); |
| 156 m_first = new Block(m_first); | |
| 128 return m_first->allocateEntry(); | 157 return m_first->allocateEntry(); |
| 129 } | 158 } |
| 130 | 159 |
| 131 CallbackStack::Item* CallbackStack::popSlow() | 160 CallbackStack::Item* CallbackStack::popSlow() |
| 132 { | 161 { |
| 133 ASSERT(m_first->isEmptyBlock()); | 162 DCHECK(m_first); |
| 163 DCHECK(m_first->isEmptyBlock()); | |
| 134 | 164 |
| 135 for (;;) { | 165 for (;;) { |
| 136 Block* next = m_first->next(); | 166 Block* next = m_first->next(); |
| 137 if (!next) { | 167 if (!next) { |
| 138 #if ENABLE(ASSERT) | 168 #if ENABLE(ASSERT) |
| 139 m_first->clear(); | 169 m_first->clear(); |
| 140 #endif | 170 #endif |
| 141 return nullptr; | 171 return nullptr; |
| 142 } | 172 } |
| 143 delete m_first; | 173 delete m_first; |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 162 upto = from; | 192 upto = from; |
| 163 from = m_first; | 193 from = m_first; |
| 164 invokeOldestCallbacks(from, upto, visitor); | 194 invokeOldestCallbacks(from, upto, visitor); |
| 165 } | 195 } |
| 166 } | 196 } |
| 167 | 197 |
| 168 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis itor) | 198 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis itor) |
| 169 { | 199 { |
| 170 if (from == upto) | 200 if (from == upto) |
| 171 return; | 201 return; |
| 172 ASSERT(from); | 202 DCHECK(from); |
| 173 // Recurse first so we get to the newly added entries last. | 203 // Recurse first so we get to the newly added entries last. |
| 174 invokeOldestCallbacks(from->next(), upto, visitor); | 204 invokeOldestCallbacks(from->next(), upto, visitor); |
| 175 from->invokeEphemeronCallbacks(visitor); | 205 from->invokeEphemeronCallbacks(visitor); |
| 176 } | 206 } |
| 177 | 207 |
| 178 bool CallbackStack::hasJustOneBlock() const | 208 bool CallbackStack::hasJustOneBlock() const |
| 179 { | 209 { |
| 210 DCHECK(m_first); | |
| 180 return !m_first->next(); | 211 return !m_first->next(); |
| 181 } | 212 } |
| 182 | 213 |
| 183 #if ENABLE(ASSERT) | 214 #if ENABLE(ASSERT) |
| 184 bool CallbackStack::hasCallbackForObject(const void* object) | 215 bool CallbackStack::hasCallbackForObject(const void* object) |
| 185 { | 216 { |
| 186 for (Block* current = m_first; current; current = current->next()) { | 217 for (Block* current = m_first; current; current = current->next()) { |
| 187 if (current->hasCallbackForObject(object)) | 218 if (current->hasCallbackForObject(object)) |
| 188 return true; | 219 return true; |
| 189 } | 220 } |
| 190 return false; | 221 return false; |
| 191 } | 222 } |
| 192 #endif | 223 #endif |
| 193 | 224 |
| 194 } // namespace blink | 225 } // namespace blink |
| OLD | NEW |