OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/heap/CallbackStack.h" | 5 #include "platform/heap/CallbackStack.h" |
6 #include "wtf/PageAllocator.h" | 6 #include "wtf/PageAllocator.h" |
7 | 7 |
8 namespace blink { | 8 namespace blink { |
9 | 9 |
10 CallbackStack::Block::Block(Block* next) | 10 CallbackStack::Block::Block(Block* next, size_t blockSize) |
| 11 : m_blockSize(blockSize) |
11 { | 12 { |
12 static_assert((blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity =
= 0, "CallbackStack::blockSize * sizeof(Item) must be a multiple of WTF::kPageAl
locationGranularity"); | 13 // Allocated block size must be a multiple of WTF::kPageAllocationGranularit
y. |
13 m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, blockSize * sizeof(It
em), WTF::kPageAllocationGranularity, WTF::PageAccessible)); | 14 ASSERT((m_blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0); |
| 15 m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, m_blockSize * sizeof(
Item), WTF::kPageAllocationGranularity, WTF::PageAccessible)); |
14 RELEASE_ASSERT(m_buffer); | 16 RELEASE_ASSERT(m_buffer); |
15 | 17 |
16 #if ENABLE(ASSERT) | 18 #if ENABLE(ASSERT) |
17 clear(); | 19 clear(); |
18 #endif | 20 #endif |
19 | 21 |
20 m_limit = &(m_buffer[blockSize]); | 22 m_limit = &(m_buffer[m_blockSize]); |
21 m_current = &(m_buffer[0]); | 23 m_current = &(m_buffer[0]); |
22 m_next = next; | 24 m_next = next; |
23 } | 25 } |
24 | 26 |
25 CallbackStack::Block::~Block() | 27 CallbackStack::Block::~Block() |
26 { | 28 { |
27 WTF::freePages(m_buffer, blockSize * sizeof(Item)); | 29 WTF::freePages(m_buffer, m_blockSize * sizeof(Item)); |
28 m_buffer = nullptr; | 30 m_buffer = nullptr; |
29 m_limit = nullptr; | 31 m_limit = nullptr; |
30 m_current = nullptr; | 32 m_current = nullptr; |
31 m_next = nullptr; | 33 m_next = nullptr; |
32 } | 34 } |
33 | 35 |
34 #if ENABLE(ASSERT) | 36 #if ENABLE(ASSERT) |
35 void CallbackStack::Block::clear() | 37 void CallbackStack::Block::clear() |
36 { | 38 { |
37 for (size_t i = 0; i < blockSize; i++) | 39 for (size_t i = 0; i < m_blockSize; i++) |
38 m_buffer[i] = Item(0, 0); | 40 m_buffer[i] = Item(0, 0); |
39 } | 41 } |
40 #endif | 42 #endif |
41 | 43 |
42 void CallbackStack::Block::decommit() | 44 void CallbackStack::Block::decommit() |
43 { | 45 { |
44 #if ENABLE(ASSERT) | 46 #if ENABLE(ASSERT) |
45 clear(); | 47 clear(); |
46 #endif | 48 #endif |
47 WTF::discardSystemPages(m_buffer, blockSize * sizeof(Item)); | 49 WTF::discardSystemPages(m_buffer, m_blockSize * sizeof(Item)); |
48 | 50 |
49 m_current = &m_buffer[0]; | 51 m_current = &m_buffer[0]; |
50 m_next = nullptr; | 52 m_next = nullptr; |
51 } | 53 } |
52 | 54 |
53 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) | 55 void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) |
54 { | 56 { |
55 // This loop can tolerate entries being added by the callbacks after | 57 // This loop can tolerate entries being added by the callbacks after |
56 // iteration starts. | 58 // iteration starts. |
57 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 59 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
58 Item& item = m_buffer[i]; | 60 Item& item = m_buffer[i]; |
59 item.call(visitor); | 61 item.call(visitor); |
60 } | 62 } |
61 } | 63 } |
62 | 64 |
63 #if ENABLE(ASSERT) | 65 #if ENABLE(ASSERT) |
64 bool CallbackStack::Block::hasCallbackForObject(const void* object) | 66 bool CallbackStack::Block::hasCallbackForObject(const void* object) |
65 { | 67 { |
66 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 68 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
67 Item* item = &m_buffer[i]; | 69 Item* item = &m_buffer[i]; |
68 if (item->object() == object) | 70 if (item->object() == object) |
69 return true; | 71 return true; |
70 } | 72 } |
71 return false; | 73 return false; |
72 } | 74 } |
73 #endif | 75 #endif |
74 | 76 |
75 CallbackStack::CallbackStack() | 77 CallbackStack::CallbackStack(size_t blockSize) |
76 : m_first(new Block(0)) | 78 : m_first(new Block(nullptr, blockSize)) |
77 , m_last(m_first) | 79 , m_last(m_first) |
78 { | 80 { |
79 } | 81 } |
80 | 82 |
81 CallbackStack::~CallbackStack() | 83 CallbackStack::~CallbackStack() |
82 { | 84 { |
83 RELEASE_ASSERT(isEmpty()); | 85 RELEASE_ASSERT(isEmpty()); |
84 delete m_first; | 86 delete m_first; |
85 m_first = nullptr; | 87 m_first = nullptr; |
86 m_last = nullptr; | 88 m_last = nullptr; |
(...skipping 11 matching lines...) Expand all Loading... |
98 } | 100 } |
99 | 101 |
100 bool CallbackStack::isEmpty() const | 102 bool CallbackStack::isEmpty() const |
101 { | 103 { |
102 return hasJustOneBlock() && m_first->isEmptyBlock(); | 104 return hasJustOneBlock() && m_first->isEmptyBlock(); |
103 } | 105 } |
104 | 106 |
105 CallbackStack::Item* CallbackStack::allocateEntrySlow() | 107 CallbackStack::Item* CallbackStack::allocateEntrySlow() |
106 { | 108 { |
107 ASSERT(!m_first->allocateEntry()); | 109 ASSERT(!m_first->allocateEntry()); |
108 m_first = new Block(m_first); | 110 m_first = new Block(m_first, m_first->blockSize()); |
109 return m_first->allocateEntry(); | 111 return m_first->allocateEntry(); |
110 } | 112 } |
111 | 113 |
112 CallbackStack::Item* CallbackStack::popSlow() | 114 CallbackStack::Item* CallbackStack::popSlow() |
113 { | 115 { |
114 ASSERT(m_first->isEmptyBlock()); | 116 ASSERT(m_first->isEmptyBlock()); |
115 | 117 |
116 for (;;) { | 118 for (;;) { |
117 Block* next = m_first->next(); | 119 Block* next = m_first->next(); |
118 if (!next) { | 120 if (!next) { |
(...skipping 25 matching lines...) Expand all Loading... |
144 from = m_first; | 146 from = m_first; |
145 invokeOldestCallbacks(from, upto, visitor); | 147 invokeOldestCallbacks(from, upto, visitor); |
146 } | 148 } |
147 } | 149 } |
148 | 150 |
149 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
itor) | 151 void CallbackStack::invokeOldestCallbacks(Block* from, Block* upto, Visitor* vis
itor) |
150 { | 152 { |
151 if (from == upto) | 153 if (from == upto) |
152 return; | 154 return; |
153 ASSERT(from); | 155 ASSERT(from); |
154 // Recurse first (blockSize at a time) so we get to the newly added entries
last. | 156 // Recurse first so we get to the newly added entries last. |
155 invokeOldestCallbacks(from->next(), upto, visitor); | 157 invokeOldestCallbacks(from->next(), upto, visitor); |
156 from->invokeEphemeronCallbacks(visitor); | 158 from->invokeEphemeronCallbacks(visitor); |
157 } | 159 } |
158 | 160 |
159 bool CallbackStack::hasJustOneBlock() const | 161 bool CallbackStack::hasJustOneBlock() const |
160 { | 162 { |
161 return !m_first->next(); | 163 return !m_first->next(); |
162 } | 164 } |
163 | 165 |
164 #if ENABLE(ASSERT) | 166 #if ENABLE(ASSERT) |
165 bool CallbackStack::hasCallbackForObject(const void* object) | 167 bool CallbackStack::hasCallbackForObject(const void* object) |
166 { | 168 { |
167 for (Block* current = m_first; current; current = current->next()) { | 169 for (Block* current = m_first; current; current = current->next()) { |
168 if (current->hasCallbackForObject(object)) | 170 if (current->hasCallbackForObject(object)) |
169 return true; | 171 return true; |
170 } | 172 } |
171 return false; | 173 return false; |
172 } | 174 } |
173 #endif | 175 #endif |
174 | 176 |
175 } // namespace blink | 177 } // namespace blink |
OLD | NEW |