OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 55 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
56 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. | 56 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. |
57 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 | 57 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 |
58 | 58 |
59 // When finalizing a non-inlined vector backing store/container, remove | 59 // When finalizing a non-inlined vector backing store/container, remove |
60 // its contiguous container annotation. Required as it will not be destructed | 60 // its contiguous container annotation. Required as it will not be destructed |
61 // from its Vector. | 61 // from its Vector. |
62 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \ | 62 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \ |
63 do { \ | 63 do { \ |
64 BasePage* page = pageFromObject(object); \ | 64 BasePage* page = pageFromObject(object); \ |
65 ASSERT(page); \ | 65 DCHECK(page); \ |
66 bool isContainer = \ | 66 bool isContainer = \ |
67 ThreadState::isVectorArenaIndex(page->arena()->arenaIndex()); \ | 67 ThreadState::isVectorArenaIndex(page->arena()->arenaIndex()); \ |
68 if (!isContainer && page->isLargeObjectPage()) \ | 68 if (!isContainer && page->isLargeObjectPage()) \ |
69 isContainer = \ | 69 isContainer = \ |
70 static_cast<LargeObjectPage*>(page)->isVectorBackingPage(); \ | 70 static_cast<LargeObjectPage*>(page)->isVectorBackingPage(); \ |
71 if (isContainer) \ | 71 if (isContainer) \ |
72 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \ | 72 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \ |
73 } while (0) | 73 } while (0) |
74 | 74 |
75 // A vector backing store represented by a large object is marked | 75 // A vector backing store represented by a large object is marked |
76 // so that when it is finalized, its ASan annotation will be | 76 // so that when it is finalized, its ASan annotation will be |
77 // correctly retired. | 77 // correctly retired. |
78 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) \ | 78 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) \ |
79 if (ThreadState::isVectorArenaIndex(arena->arenaIndex())) { \ | 79 if (ThreadState::isVectorArenaIndex(arena->arenaIndex())) { \ |
80 BasePage* largePage = pageFromObject(largeObject); \ | 80 BasePage* largePage = pageFromObject(largeObject); \ |
81 ASSERT(largePage->isLargeObjectPage()); \ | 81 DCHECK(largePage->isLargeObjectPage()); \ |
82 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ | 82 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ |
83 } | 83 } |
84 #else | 84 #else |
85 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | 85 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 |
86 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | 86 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) |
87 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) | 87 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) |
88 #endif | 88 #endif |
89 | 89 |
90 namespace blink { | 90 namespace blink { |
91 | 91 |
92 #if ENABLE(ASSERT) | 92 #if DCHECK_IS_ON() |
93 NO_SANITIZE_ADDRESS | 93 NO_SANITIZE_ADDRESS |
94 void HeapObjectHeader::zapMagic() { | 94 void HeapObjectHeader::zapMagic() { |
95 ASSERT(checkHeader()); | 95 DCHECK(checkHeader()); |
96 m_magic = zappedMagic; | 96 m_magic = zappedMagic; |
97 } | 97 } |
98 #endif | 98 #endif |
99 | 99 |
100 void HeapObjectHeader::finalize(Address object, size_t objectSize) { | 100 void HeapObjectHeader::finalize(Address object, size_t objectSize) { |
101 HeapAllocHooks::freeHookIfEnabled(object); | 101 HeapAllocHooks::freeHookIfEnabled(object); |
102 const GCInfo* gcInfo = ThreadHeap::gcInfo(gcInfoIndex()); | 102 const GCInfo* gcInfo = ThreadHeap::gcInfo(gcInfoIndex()); |
103 if (gcInfo->hasFinalizer()) | 103 if (gcInfo->hasFinalizer()) |
104 gcInfo->m_finalize(object); | 104 gcInfo->m_finalize(object); |
105 | 105 |
106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); | 106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); |
107 } | 107 } |
108 | 108 |
109 BaseArena::BaseArena(ThreadState* state, int index) | 109 BaseArena::BaseArena(ThreadState* state, int index) |
110 : m_firstPage(nullptr), | 110 : m_firstPage(nullptr), |
111 m_firstUnsweptPage(nullptr), | 111 m_firstUnsweptPage(nullptr), |
112 m_threadState(state), | 112 m_threadState(state), |
113 m_index(index) {} | 113 m_index(index) {} |
114 | 114 |
115 BaseArena::~BaseArena() { | 115 BaseArena::~BaseArena() { |
116 ASSERT(!m_firstPage); | 116 DCHECK(!m_firstPage); |
117 ASSERT(!m_firstUnsweptPage); | 117 DCHECK(!m_firstUnsweptPage); |
118 } | 118 } |
119 | 119 |
120 void BaseArena::cleanupPages() { | 120 void BaseArena::cleanupPages() { |
121 clearFreeLists(); | 121 clearFreeLists(); |
122 | 122 |
123 ASSERT(!m_firstUnsweptPage); | 123 DCHECK(!m_firstUnsweptPage); |
124 // Add the BaseArena's pages to the orphanedPagePool. | 124 // Add the BaseArena's pages to the orphanedPagePool. |
125 for (BasePage* page = m_firstPage; page; page = page->next()) { | 125 for (BasePage* page = m_firstPage; page; page = page->next()) { |
126 getThreadState()->heap().heapStats().decreaseAllocatedSpace(page->size()); | 126 getThreadState()->heap().heapStats().decreaseAllocatedSpace(page->size()); |
127 getThreadState()->heap().getOrphanedPagePool()->addOrphanedPage( | 127 getThreadState()->heap().getOrphanedPagePool()->addOrphanedPage( |
128 arenaIndex(), page); | 128 arenaIndex(), page); |
129 } | 129 } |
130 m_firstPage = nullptr; | 130 m_firstPage = nullptr; |
131 } | 131 } |
132 | 132 |
133 void BaseArena::takeSnapshot(const String& dumpBaseName, | 133 void BaseArena::takeSnapshot(const String& dumpBaseName, |
(...skipping 17 matching lines...) Expand all Loading... |
151 allocatorDump->AddScalar("blink_page_count", "objects", pageCount); | 151 allocatorDump->AddScalar("blink_page_count", "objects", pageCount); |
152 | 152 |
153 // When taking a full dump (w/ freelist), both the /buckets and /pages | 153 // When taking a full dump (w/ freelist), both the /buckets and /pages |
154 // report their free size but they are not meant to be added together. | 154 // report their free size but they are not meant to be added together. |
155 // Therefore, here we override the free_size of the parent heap to be | 155 // Therefore, here we override the free_size of the parent heap to be |
156 // equal to the free_size of the sum of its heap pages. | 156 // equal to the free_size of the sum of its heap pages. |
157 allocatorDump->AddScalar("free_size", "bytes", heapInfo.freeSize); | 157 allocatorDump->AddScalar("free_size", "bytes", heapInfo.freeSize); |
158 allocatorDump->AddScalar("free_count", "objects", heapInfo.freeCount); | 158 allocatorDump->AddScalar("free_count", "objects", heapInfo.freeCount); |
159 } | 159 } |
160 | 160 |
161 #if ENABLE(ASSERT) | 161 #if DCHECK_IS_ON() |
162 BasePage* BaseArena::findPageFromAddress(Address address) { | 162 BasePage* BaseArena::findPageFromAddress(Address address) { |
163 for (BasePage* page = m_firstPage; page; page = page->next()) { | 163 for (BasePage* page = m_firstPage; page; page = page->next()) { |
164 if (page->contains(address)) | 164 if (page->contains(address)) |
165 return page; | 165 return page; |
166 } | 166 } |
167 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | 167 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { |
168 if (page->contains(address)) | 168 if (page->contains(address)) |
169 return page; | 169 return page; |
170 } | 170 } |
171 return nullptr; | 171 return nullptr; |
172 } | 172 } |
173 #endif | 173 #endif |
174 | 174 |
175 void BaseArena::makeConsistentForGC() { | 175 void BaseArena::makeConsistentForGC() { |
176 clearFreeLists(); | 176 clearFreeLists(); |
177 ASSERT(isConsistentForGC()); | 177 DCHECK(isConsistentForGC()); |
178 for (BasePage* page = m_firstPage; page; page = page->next()) { | 178 for (BasePage* page = m_firstPage; page; page = page->next()) { |
179 page->markAsUnswept(); | 179 page->markAsUnswept(); |
180 page->invalidateObjectStartBitmap(); | 180 page->invalidateObjectStartBitmap(); |
181 } | 181 } |
182 | 182 |
183 // If a new GC is requested before this thread got around to sweep, | 183 // If a new GC is requested before this thread got around to sweep, |
184 // ie. due to the thread doing a long running operation, we clear | 184 // ie. due to the thread doing a long running operation, we clear |
185 // the mark bits and mark any of the dead objects as dead. The latter | 185 // the mark bits and mark any of the dead objects as dead. The latter |
186 // is used to ensure the next GC marking does not trace already dead | 186 // is used to ensure the next GC marking does not trace already dead |
187 // objects. If we trace a dead object we could end up tracing into | 187 // objects. If we trace a dead object we could end up tracing into |
188 // garbage or the middle of another object via the newly conservatively | 188 // garbage or the middle of another object via the newly conservatively |
189 // found object. | 189 // found object. |
190 BasePage* previousPage = nullptr; | 190 BasePage* previousPage = nullptr; |
191 for (BasePage *page = m_firstUnsweptPage; page; | 191 for (BasePage *page = m_firstUnsweptPage; page; |
192 previousPage = page, page = page->next()) { | 192 previousPage = page, page = page->next()) { |
193 page->makeConsistentForGC(); | 193 page->makeConsistentForGC(); |
194 ASSERT(!page->hasBeenSwept()); | 194 DCHECK(!page->hasBeenSwept()); |
195 page->invalidateObjectStartBitmap(); | 195 page->invalidateObjectStartBitmap(); |
196 } | 196 } |
197 if (previousPage) { | 197 if (previousPage) { |
198 ASSERT(m_firstUnsweptPage); | 198 DCHECK(m_firstUnsweptPage); |
199 previousPage->m_next = m_firstPage; | 199 previousPage->m_next = m_firstPage; |
200 m_firstPage = m_firstUnsweptPage; | 200 m_firstPage = m_firstUnsweptPage; |
201 m_firstUnsweptPage = nullptr; | 201 m_firstUnsweptPage = nullptr; |
202 } | 202 } |
203 ASSERT(!m_firstUnsweptPage); | 203 DCHECK(!m_firstUnsweptPage); |
204 | 204 |
205 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); | 205 HeapCompact* heapCompactor = getThreadState()->heap().compaction(); |
206 if (!heapCompactor->isCompactingArena(arenaIndex())) | 206 if (!heapCompactor->isCompactingArena(arenaIndex())) |
207 return; | 207 return; |
208 | 208 |
209 BasePage* nextPage = m_firstPage; | 209 BasePage* nextPage = m_firstPage; |
210 while (nextPage) { | 210 while (nextPage) { |
211 if (!nextPage->isLargeObjectPage()) | 211 if (!nextPage->isLargeObjectPage()) |
212 heapCompactor->addCompactingPage(nextPage); | 212 heapCompactor->addCompactingPage(nextPage); |
213 nextPage = nextPage->next(); | 213 nextPage = nextPage->next(); |
214 } | 214 } |
215 } | 215 } |
216 | 216 |
217 void BaseArena::makeConsistentForMutator() { | 217 void BaseArena::makeConsistentForMutator() { |
218 clearFreeLists(); | 218 clearFreeLists(); |
219 ASSERT(isConsistentForGC()); | 219 DCHECK(isConsistentForGC()); |
220 ASSERT(!m_firstPage); | 220 DCHECK(!m_firstPage); |
221 | 221 |
222 // Drop marks from marked objects and rebuild free lists in preparation for | 222 // Drop marks from marked objects and rebuild free lists in preparation for |
223 // resuming the executions of mutators. | 223 // resuming the executions of mutators. |
224 BasePage* previousPage = nullptr; | 224 BasePage* previousPage = nullptr; |
225 for (BasePage *page = m_firstUnsweptPage; page; | 225 for (BasePage *page = m_firstUnsweptPage; page; |
226 previousPage = page, page = page->next()) { | 226 previousPage = page, page = page->next()) { |
227 page->makeConsistentForMutator(); | 227 page->makeConsistentForMutator(); |
228 page->markAsSwept(); | 228 page->markAsSwept(); |
229 page->invalidateObjectStartBitmap(); | 229 page->invalidateObjectStartBitmap(); |
230 } | 230 } |
231 if (previousPage) { | 231 if (previousPage) { |
232 ASSERT(m_firstUnsweptPage); | 232 DCHECK(m_firstUnsweptPage); |
233 previousPage->m_next = m_firstPage; | 233 previousPage->m_next = m_firstPage; |
234 m_firstPage = m_firstUnsweptPage; | 234 m_firstPage = m_firstUnsweptPage; |
235 m_firstUnsweptPage = nullptr; | 235 m_firstUnsweptPage = nullptr; |
236 } | 236 } |
237 ASSERT(!m_firstUnsweptPage); | 237 DCHECK(!m_firstUnsweptPage); |
238 } | 238 } |
239 | 239 |
240 size_t BaseArena::objectPayloadSizeForTesting() { | 240 size_t BaseArena::objectPayloadSizeForTesting() { |
241 ASSERT(isConsistentForGC()); | 241 DCHECK(isConsistentForGC()); |
242 ASSERT(!m_firstUnsweptPage); | 242 DCHECK(!m_firstUnsweptPage); |
243 | 243 |
244 size_t objectPayloadSize = 0; | 244 size_t objectPayloadSize = 0; |
245 for (BasePage* page = m_firstPage; page; page = page->next()) | 245 for (BasePage* page = m_firstPage; page; page = page->next()) |
246 objectPayloadSize += page->objectPayloadSizeForTesting(); | 246 objectPayloadSize += page->objectPayloadSizeForTesting(); |
247 return objectPayloadSize; | 247 return objectPayloadSize; |
248 } | 248 } |
249 | 249 |
250 void BaseArena::prepareHeapForTermination() { | 250 void BaseArena::prepareHeapForTermination() { |
251 ASSERT(!m_firstUnsweptPage); | 251 DCHECK(!m_firstUnsweptPage); |
252 for (BasePage* page = m_firstPage; page; page = page->next()) { | 252 for (BasePage* page = m_firstPage; page; page = page->next()) { |
253 page->setTerminating(); | 253 page->setTerminating(); |
254 } | 254 } |
255 } | 255 } |
256 | 256 |
257 void BaseArena::prepareForSweep() { | 257 void BaseArena::prepareForSweep() { |
258 ASSERT(getThreadState()->isInGC()); | 258 DCHECK(getThreadState()->isInGC()); |
259 ASSERT(!m_firstUnsweptPage); | 259 DCHECK(!m_firstUnsweptPage); |
260 | 260 |
261 // Move all pages to a list of unswept pages. | 261 // Move all pages to a list of unswept pages. |
262 m_firstUnsweptPage = m_firstPage; | 262 m_firstUnsweptPage = m_firstPage; |
263 m_firstPage = nullptr; | 263 m_firstPage = nullptr; |
264 } | 264 } |
265 | 265 |
266 #if defined(ADDRESS_SANITIZER) | 266 #if defined(ADDRESS_SANITIZER) |
267 void BaseArena::poisonArena() { | 267 void BaseArena::poisonArena() { |
268 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) | 268 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) |
269 page->poisonUnmarkedObjects(); | 269 page->poisonUnmarkedObjects(); |
270 } | 270 } |
271 #endif | 271 #endif |
272 | 272 |
273 Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex) { | 273 Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex) { |
274 // If there are no pages to be swept, return immediately. | 274 // If there are no pages to be swept, return immediately. |
275 if (!m_firstUnsweptPage) | 275 if (!m_firstUnsweptPage) |
276 return nullptr; | 276 return nullptr; |
277 | 277 |
278 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 278 CHECK(getThreadState()->isSweepingInProgress()); |
279 | 279 |
280 // lazySweepPages() can be called recursively if finalizers invoked in | 280 // lazySweepPages() can be called recursively if finalizers invoked in |
281 // page->sweep() allocate memory and the allocation triggers | 281 // page->sweep() allocate memory and the allocation triggers |
282 // lazySweepPages(). This check prevents the sweeping from being executed | 282 // lazySweepPages(). This check prevents the sweeping from being executed |
283 // recursively. | 283 // recursively. |
284 if (getThreadState()->sweepForbidden()) | 284 if (getThreadState()->sweepForbidden()) |
285 return nullptr; | 285 return nullptr; |
286 | 286 |
287 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); | 287 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); |
288 ThreadState::SweepForbiddenScope sweepForbidden(getThreadState()); | 288 ThreadState::SweepForbiddenScope sweepForbidden(getThreadState()); |
(...skipping 22 matching lines...) Expand all Loading... |
311 } | 311 } |
312 } | 312 } |
313 | 313 |
314 bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) { | 314 bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) { |
315 // It might be heavy to call | 315 // It might be heavy to call |
316 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e., | 316 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e., |
317 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10 | 317 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10 |
318 // pages. | 318 // pages. |
319 static const int deadlineCheckInterval = 10; | 319 static const int deadlineCheckInterval = 10; |
320 | 320 |
321 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 321 CHECK(getThreadState()->isSweepingInProgress()); |
322 ASSERT(getThreadState()->sweepForbidden()); | 322 DCHECK(getThreadState()->sweepForbidden()); |
323 ASSERT(!getThreadState()->isMainThread() || | 323 DCHECK(!getThreadState()->isMainThread() || |
324 ScriptForbiddenScope::isScriptForbidden()); | 324 ScriptForbiddenScope::isScriptForbidden()); |
325 | 325 |
326 NormalPageArena* normalArena = nullptr; | 326 NormalPageArena* normalArena = nullptr; |
327 if (m_firstUnsweptPage && !m_firstUnsweptPage->isLargeObjectPage()) { | 327 if (m_firstUnsweptPage && !m_firstUnsweptPage->isLargeObjectPage()) { |
328 // Mark this NormalPageArena as being lazily swept. | 328 // Mark this NormalPageArena as being lazily swept. |
329 NormalPage* normalPage = reinterpret_cast<NormalPage*>(m_firstUnsweptPage); | 329 NormalPage* normalPage = reinterpret_cast<NormalPage*>(m_firstUnsweptPage); |
330 normalArena = normalPage->arenaForNormalPage(); | 330 normalArena = normalPage->arenaForNormalPage(); |
331 normalArena->setIsLazySweeping(true); | 331 normalArena->setIsLazySweeping(true); |
332 } | 332 } |
333 int pageCount = 1; | 333 int pageCount = 1; |
(...skipping 10 matching lines...) Expand all Loading... |
344 } | 344 } |
345 pageCount++; | 345 pageCount++; |
346 } | 346 } |
347 ThreadHeap::reportMemoryUsageForTracing(); | 347 ThreadHeap::reportMemoryUsageForTracing(); |
348 if (normalArena) | 348 if (normalArena) |
349 normalArena->setIsLazySweeping(false); | 349 normalArena->setIsLazySweeping(false); |
350 return true; | 350 return true; |
351 } | 351 } |
352 | 352 |
353 void BaseArena::completeSweep() { | 353 void BaseArena::completeSweep() { |
354 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 354 CHECK(getThreadState()->isSweepingInProgress()); |
355 ASSERT(getThreadState()->sweepForbidden()); | 355 DCHECK(getThreadState()->sweepForbidden()); |
356 ASSERT(!getThreadState()->isMainThread() || | 356 DCHECK(!getThreadState()->isMainThread() || |
357 ScriptForbiddenScope::isScriptForbidden()); | 357 ScriptForbiddenScope::isScriptForbidden()); |
358 | 358 |
359 while (m_firstUnsweptPage) { | 359 while (m_firstUnsweptPage) { |
360 sweepUnsweptPage(); | 360 sweepUnsweptPage(); |
361 } | 361 } |
362 ThreadHeap::reportMemoryUsageForTracing(); | 362 ThreadHeap::reportMemoryUsageForTracing(); |
363 } | 363 } |
364 | 364 |
365 Address BaseArena::allocateLargeObject(size_t allocationSize, | 365 Address BaseArena::allocateLargeObject(size_t allocationSize, |
366 size_t gcInfoIndex) { | 366 size_t gcInfoIndex) { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
420 // => |objectPointer| will not be lazily swept. | 420 // => |objectPointer| will not be lazily swept. |
421 // | 421 // |
422 // Notice that |objectPointer| might be pointer to a GarbageCollectedMixin, | 422 // Notice that |objectPointer| might be pointer to a GarbageCollectedMixin, |
423 // hence using fromPayload() to derive the HeapObjectHeader isn't possible | 423 // hence using fromPayload() to derive the HeapObjectHeader isn't possible |
424 // (and use its value to check if |headerAddress| is equal to it.) | 424 // (and use its value to check if |headerAddress| is equal to it.) |
425 if (headerAddress > objectPointer) | 425 if (headerAddress > objectPointer) |
426 return false; | 426 return false; |
427 if (!header->isFree() && header->isMarked()) { | 427 if (!header->isFree() && header->isMarked()) { |
428 // There must be a marked object on this page and the one located must | 428 // There must be a marked object on this page and the one located must |
429 // have room after it for the unmarked |objectPointer| object. | 429 // have room after it for the unmarked |objectPointer| object. |
430 DCHECK(headerAddress + size < pageEnd); | 430 DCHECK_LT(headerAddress + size, pageEnd); |
431 return true; | 431 return true; |
432 } | 432 } |
433 headerAddress += size; | 433 headerAddress += size; |
434 } | 434 } |
435 NOTREACHED(); | 435 NOTREACHED(); |
436 return true; | 436 return true; |
437 } | 437 } |
438 | 438 |
439 NormalPageArena::NormalPageArena(ThreadState* state, int index) | 439 NormalPageArena::NormalPageArena(ThreadState* state, int index) |
440 : BaseArena(state, index), | 440 : BaseArena(state, index), |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
555 size_t pageSize = availablePages->size(); | 555 size_t pageSize = availablePages->size(); |
556 #if DEBUG_HEAP_COMPACTION | 556 #if DEBUG_HEAP_COMPACTION |
557 if (!freedPageCount) | 557 if (!freedPageCount) |
558 LOG_HEAP_COMPACTION("Releasing:"); | 558 LOG_HEAP_COMPACTION("Releasing:"); |
559 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); | 559 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); |
560 #endif | 560 #endif |
561 freedSize += pageSize; | 561 freedSize += pageSize; |
562 freedPageCount++; | 562 freedPageCount++; |
563 BasePage* nextPage; | 563 BasePage* nextPage; |
564 availablePages->unlink(&nextPage); | 564 availablePages->unlink(&nextPage); |
565 #if !(ENABLE(ASSERT) || defined(LEAK_SANITIZER) || \ | 565 #if !(DCHECK_IS_ON() || defined(LEAK_SANITIZER) || \ |
566 defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)) | 566 defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)) |
567 // Clear out the page before adding it to the free page pool, which | 567 // Clear out the page before adding it to the free page pool, which |
568 // decommits it. Recommitting the page must find a zeroed page later. | 568 // decommits it. Recommitting the page must find a zeroed page later. |
569 // We cannot assume that the OS will hand back a zeroed page across | 569 // We cannot assume that the OS will hand back a zeroed page across |
570 // its "decommit" operation. | 570 // its "decommit" operation. |
571 // | 571 // |
572 // If in a debug setting, the unused page contents will have been | 572 // If in a debug setting, the unused page contents will have been |
573 // zapped already; leave it in that state. | 573 // zapped already; leave it in that state. |
574 DCHECK(!availablePages->isLargeObjectPage()); | 574 DCHECK(!availablePages->isLargeObjectPage()); |
575 NormalPage* unusedPage = reinterpret_cast<NormalPage*>(availablePages); | 575 NormalPage* unusedPage = reinterpret_cast<NormalPage*>(availablePages); |
576 memset(unusedPage->payload(), 0, unusedPage->payloadSize()); | 576 memset(unusedPage->payload(), 0, unusedPage->payloadSize()); |
577 #endif | 577 #endif |
578 availablePages->removeFromHeap(); | 578 availablePages->removeFromHeap(); |
579 availablePages = static_cast<NormalPage*>(nextPage); | 579 availablePages = static_cast<NormalPage*>(nextPage); |
580 } | 580 } |
581 if (freedPageCount) | 581 if (freedPageCount) |
582 LOG_HEAP_COMPACTION("\n"); | 582 LOG_HEAP_COMPACTION("\n"); |
583 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); | 583 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); |
584 } | 584 } |
585 | 585 |
586 #if ENABLE(ASSERT) | 586 #if DCHECK_IS_ON() |
587 bool NormalPageArena::isConsistentForGC() { | 587 bool NormalPageArena::isConsistentForGC() { |
588 // A thread heap is consistent for sweeping if none of the pages to be swept | 588 // A thread heap is consistent for sweeping if none of the pages to be swept |
589 // contain a freelist block or the current allocation point. | 589 // contain a freelist block or the current allocation point. |
590 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 590 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
591 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; | 591 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; |
592 freeListEntry; freeListEntry = freeListEntry->next()) { | 592 freeListEntry; freeListEntry = freeListEntry->next()) { |
593 if (pagesToBeSweptContains(freeListEntry->getAddress())) | 593 if (pagesToBeSweptContains(freeListEntry->getAddress())) |
594 return false; | 594 return false; |
595 } | 595 } |
596 } | 596 } |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
639 getThreadState()->heap().getRegionTree()); | 639 getThreadState()->heap().getRegionTree()); |
640 | 640 |
641 // Setup the PageMemory object for each of the pages in the region. | 641 // Setup the PageMemory object for each of the pages in the region. |
642 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | 642 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { |
643 PageMemory* memory = PageMemory::setupPageMemoryInRegion( | 643 PageMemory* memory = PageMemory::setupPageMemoryInRegion( |
644 region, i * blinkPageSize, blinkPagePayloadSize()); | 644 region, i * blinkPageSize, blinkPagePayloadSize()); |
645 // Take the first possible page ensuring that this thread actually | 645 // Take the first possible page ensuring that this thread actually |
646 // gets a page and add the rest to the page pool. | 646 // gets a page and add the rest to the page pool. |
647 if (!pageMemory) { | 647 if (!pageMemory) { |
648 bool result = memory->commit(); | 648 bool result = memory->commit(); |
649 // If you hit the ASSERT, it will mean that you're hitting | 649 // If you hit the DCHECK, it will mean that you're hitting |
650 // the limit of the number of mmapped regions OS can support | 650 // the limit of the number of mmapped regions OS can support |
651 // (e.g., /proc/sys/vm/max_map_count in Linux). | 651 // (e.g., /proc/sys/vm/max_map_count in Linux). |
652 RELEASE_ASSERT(result); | 652 CHECK(result); |
653 pageMemory = memory; | 653 pageMemory = memory; |
654 } else { | 654 } else { |
655 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), | 655 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), |
656 memory); | 656 memory); |
657 } | 657 } |
658 } | 658 } |
659 } | 659 } |
660 NormalPage* page = | 660 NormalPage* page = |
661 new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 661 new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
662 page->link(&m_firstPage); | 662 page->link(&m_firstPage); |
663 | 663 |
664 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); | 664 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
665 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 665 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
666 // Allow the following addToFreeList() to add the newly allocated memory | 666 // Allow the following addToFreeList() to add the newly allocated memory |
667 // to the free list. | 667 // to the free list. |
668 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 668 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
669 Address address = page->payload(); | 669 Address address = page->payload(); |
670 for (size_t i = 0; i < page->payloadSize(); i++) | 670 for (size_t i = 0; i < page->payloadSize(); i++) |
671 address[i] = reuseAllowedZapValue; | 671 address[i] = reuseAllowedZapValue; |
672 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 672 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
673 #endif | 673 #endif |
674 addToFreeList(page->payload(), page->payloadSize()); | 674 addToFreeList(page->payload(), page->payloadSize()); |
675 } | 675 } |
(...skipping 26 matching lines...) Expand all Loading... |
702 // | 702 // |
703 // FIXME: This threshold is determined just to optimize blink_perf | 703 // FIXME: This threshold is determined just to optimize blink_perf |
704 // benchmarks. Coalescing is very sensitive to the threashold and | 704 // benchmarks. Coalescing is very sensitive to the threashold and |
705 // we need further investigations on the coalescing scheme. | 705 // we need further investigations on the coalescing scheme. |
706 if (m_promptlyFreedSize < 1024 * 1024) | 706 if (m_promptlyFreedSize < 1024 * 1024) |
707 return false; | 707 return false; |
708 | 708 |
709 if (getThreadState()->sweepForbidden()) | 709 if (getThreadState()->sweepForbidden()) |
710 return false; | 710 return false; |
711 | 711 |
712 ASSERT(!hasCurrentAllocationArea()); | 712 DCHECK(!hasCurrentAllocationArea()); |
713 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); | 713 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); |
714 | 714 |
715 // Rebuild free lists. | 715 // Rebuild free lists. |
716 m_freeList.clear(); | 716 m_freeList.clear(); |
717 size_t freedSize = 0; | 717 size_t freedSize = 0; |
718 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; | 718 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; |
719 page = static_cast<NormalPage*>(page->next())) { | 719 page = static_cast<NormalPage*>(page->next())) { |
720 Address startOfGap = page->payload(); | 720 Address startOfGap = page->payload(); |
721 for (Address headerAddress = startOfGap; | 721 for (Address headerAddress = startOfGap; |
722 headerAddress < page->payloadEnd();) { | 722 headerAddress < page->payloadEnd();) { |
723 HeapObjectHeader* header = | 723 HeapObjectHeader* header = |
724 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 724 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
725 size_t size = header->size(); | 725 size_t size = header->size(); |
726 ASSERT(size > 0); | 726 DCHECK_GT(size, 0UL); |
727 ASSERT(size < blinkPagePayloadSize()); | 727 DCHECK_LT(size, blinkPagePayloadSize()); |
728 | 728 |
729 if (header->isPromptlyFreed()) { | 729 if (header->isPromptlyFreed()) { |
730 ASSERT(size >= sizeof(HeapObjectHeader)); | 730 DCHECK_GE(size, sizeof(HeapObjectHeader)); |
731 // Zero the memory in the free list header to maintain the | 731 // Zero the memory in the free list header to maintain the |
732 // invariant that memory on the free list is zero filled. | 732 // invariant that memory on the free list is zero filled. |
733 // The rest of the memory is already on the free list and is | 733 // The rest of the memory is already on the free list and is |
734 // therefore already zero filled. | 734 // therefore already zero filled. |
735 SET_MEMORY_INACCESSIBLE(headerAddress, sizeof(HeapObjectHeader)); | 735 SET_MEMORY_INACCESSIBLE(headerAddress, sizeof(HeapObjectHeader)); |
736 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 736 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); |
737 freedSize += size; | 737 freedSize += size; |
738 headerAddress += size; | 738 headerAddress += size; |
739 continue; | 739 continue; |
740 } | 740 } |
741 if (header->isFree()) { | 741 if (header->isFree()) { |
742 // Zero the memory in the free list header to maintain the | 742 // Zero the memory in the free list header to maintain the |
743 // invariant that memory on the free list is zero filled. | 743 // invariant that memory on the free list is zero filled. |
744 // The rest of the memory is already on the free list and is | 744 // The rest of the memory is already on the free list and is |
745 // therefore already zero filled. | 745 // therefore already zero filled. |
746 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) | 746 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) |
747 ? size | 747 ? size |
748 : sizeof(FreeListEntry)); | 748 : sizeof(FreeListEntry)); |
749 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 749 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); |
750 headerAddress += size; | 750 headerAddress += size; |
751 continue; | 751 continue; |
752 } | 752 } |
753 ASSERT(header->checkHeader()); | 753 DCHECK(header->checkHeader()); |
754 if (startOfGap != headerAddress) | 754 if (startOfGap != headerAddress) |
755 addToFreeList(startOfGap, headerAddress - startOfGap); | 755 addToFreeList(startOfGap, headerAddress - startOfGap); |
756 | 756 |
757 headerAddress += size; | 757 headerAddress += size; |
758 startOfGap = headerAddress; | 758 startOfGap = headerAddress; |
759 } | 759 } |
760 | 760 |
761 if (startOfGap != page->payloadEnd()) | 761 if (startOfGap != page->payloadEnd()) |
762 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 762 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
763 } | 763 } |
764 getThreadState()->decreaseAllocatedObjectSize(freedSize); | 764 getThreadState()->decreaseAllocatedObjectSize(freedSize); |
765 ASSERT(m_promptlyFreedSize == freedSize); | 765 DCHECK_EQ(m_promptlyFreedSize, freedSize); |
766 m_promptlyFreedSize = 0; | 766 m_promptlyFreedSize = 0; |
767 return true; | 767 return true; |
768 } | 768 } |
769 | 769 |
770 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) { | 770 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) { |
771 ASSERT(!getThreadState()->sweepForbidden()); | 771 DCHECK(!getThreadState()->sweepForbidden()); |
772 ASSERT(header->checkHeader()); | 772 DCHECK(header->checkHeader()); |
773 Address address = reinterpret_cast<Address>(header); | 773 Address address = reinterpret_cast<Address>(header); |
774 Address payload = header->payload(); | 774 Address payload = header->payload(); |
775 size_t size = header->size(); | 775 size_t size = header->size(); |
776 size_t payloadSize = header->payloadSize(); | 776 size_t payloadSize = header->payloadSize(); |
777 ASSERT(size > 0); | 777 DCHECK_GT(size, 0UL); |
778 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 778 DCHECK_EQ(pageFromObject(address), findPageFromAddress(address)); |
779 | 779 |
780 { | 780 { |
781 ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); | 781 ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); |
782 header->finalize(payload, payloadSize); | 782 header->finalize(payload, payloadSize); |
783 if (address + size == m_currentAllocationPoint) { | 783 if (address + size == m_currentAllocationPoint) { |
784 m_currentAllocationPoint = address; | 784 m_currentAllocationPoint = address; |
785 setRemainingAllocationSize(m_remainingAllocationSize + size); | 785 setRemainingAllocationSize(m_remainingAllocationSize + size); |
786 SET_MEMORY_INACCESSIBLE(address, size); | 786 SET_MEMORY_INACCESSIBLE(address, size); |
787 return; | 787 return; |
788 } | 788 } |
789 SET_MEMORY_INACCESSIBLE(payload, payloadSize); | 789 SET_MEMORY_INACCESSIBLE(payload, payloadSize); |
790 header->markPromptlyFreed(); | 790 header->markPromptlyFreed(); |
791 } | 791 } |
792 | 792 |
793 m_promptlyFreedSize += size; | 793 m_promptlyFreedSize += size; |
794 } | 794 } |
795 | 795 |
796 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) { | 796 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) { |
797 // It's possible that Vector requests a smaller expanded size because | 797 // It's possible that Vector requests a smaller expanded size because |
798 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 798 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
799 // size. | 799 // size. |
800 ASSERT(header->checkHeader()); | 800 DCHECK(header->checkHeader()); |
801 if (header->payloadSize() >= newSize) | 801 if (header->payloadSize() >= newSize) |
802 return true; | 802 return true; |
803 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 803 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); |
804 ASSERT(allocationSize > header->size()); | 804 DCHECK_GT(allocationSize, header->size()); |
805 size_t expandSize = allocationSize - header->size(); | 805 size_t expandSize = allocationSize - header->size(); |
806 if (isObjectAllocatedAtAllocationPoint(header) && | 806 if (isObjectAllocatedAtAllocationPoint(header) && |
807 expandSize <= m_remainingAllocationSize) { | 807 expandSize <= m_remainingAllocationSize) { |
808 m_currentAllocationPoint += expandSize; | 808 m_currentAllocationPoint += expandSize; |
809 ASSERT(m_remainingAllocationSize >= expandSize); | 809 DCHECK_GE(m_remainingAllocationSize, expandSize); |
810 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); | 810 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); |
811 // Unpoison the memory used for the object (payload). | 811 // Unpoison the memory used for the object (payload). |
812 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | 812 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); |
813 header->setSize(allocationSize); | 813 header->setSize(allocationSize); |
814 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 814 DCHECK(findPageFromAddress(header->payloadEnd() - 1)); |
815 return true; | 815 return true; |
816 } | 816 } |
817 return false; | 817 return false; |
818 } | 818 } |
819 | 819 |
820 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) { | 820 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) { |
821 ASSERT(header->checkHeader()); | 821 DCHECK(header->checkHeader()); |
822 ASSERT(header->payloadSize() > newSize); | 822 DCHECK_GT(header->payloadSize(), newSize); |
823 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 823 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); |
824 ASSERT(header->size() > allocationSize); | 824 DCHECK_GT(header->size(), allocationSize); |
825 size_t shrinkSize = header->size() - allocationSize; | 825 size_t shrinkSize = header->size() - allocationSize; |
826 if (isObjectAllocatedAtAllocationPoint(header)) { | 826 if (isObjectAllocatedAtAllocationPoint(header)) { |
827 m_currentAllocationPoint -= shrinkSize; | 827 m_currentAllocationPoint -= shrinkSize; |
828 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); | 828 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); |
829 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 829 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); |
830 header->setSize(allocationSize); | 830 header->setSize(allocationSize); |
831 return true; | 831 return true; |
832 } | 832 } |
833 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 833 DCHECK_GE(shrinkSize, sizeof(HeapObjectHeader)); |
834 ASSERT(header->gcInfoIndex() > 0); | 834 DCHECK_GT(header->gcInfoIndex(), 0UL); |
835 Address shrinkAddress = header->payloadEnd() - shrinkSize; | 835 Address shrinkAddress = header->payloadEnd() - shrinkSize; |
836 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) | 836 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) |
837 HeapObjectHeader(shrinkSize, header->gcInfoIndex()); | 837 HeapObjectHeader(shrinkSize, header->gcInfoIndex()); |
838 freedHeader->markPromptlyFreed(); | 838 freedHeader->markPromptlyFreed(); |
839 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == | 839 DCHECK_EQ(pageFromObject(reinterpret_cast<Address>(header)), |
840 findPageFromAddress(reinterpret_cast<Address>(header))); | 840 findPageFromAddress(reinterpret_cast<Address>(header))); |
841 m_promptlyFreedSize += shrinkSize; | 841 m_promptlyFreedSize += shrinkSize; |
842 header->setSize(allocationSize); | 842 header->setSize(allocationSize); |
843 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), | 843 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), |
844 shrinkSize - sizeof(HeapObjectHeader)); | 844 shrinkSize - sizeof(HeapObjectHeader)); |
845 return false; | 845 return false; |
846 } | 846 } |
847 | 847 |
848 Address NormalPageArena::lazySweepPages(size_t allocationSize, | 848 Address NormalPageArena::lazySweepPages(size_t allocationSize, |
849 size_t gcInfoIndex) { | 849 size_t gcInfoIndex) { |
850 ASSERT(!hasCurrentAllocationArea()); | 850 DCHECK(!hasCurrentAllocationArea()); |
851 AutoReset<bool> isLazySweeping(&m_isLazySweeping, true); | 851 AutoReset<bool> isLazySweeping(&m_isLazySweeping, true); |
852 Address result = nullptr; | 852 Address result = nullptr; |
853 while (m_firstUnsweptPage) { | 853 while (m_firstUnsweptPage) { |
854 BasePage* page = m_firstUnsweptPage; | 854 BasePage* page = m_firstUnsweptPage; |
855 if (page->isEmpty()) { | 855 if (page->isEmpty()) { |
856 page->unlink(&m_firstUnsweptPage); | 856 page->unlink(&m_firstUnsweptPage); |
857 page->removeFromHeap(); | 857 page->removeFromHeap(); |
858 } else { | 858 } else { |
859 // Sweep a page and move the page from m_firstUnsweptPages to | 859 // Sweep a page and move the page from m_firstUnsweptPages to |
860 // m_firstPages. | 860 // m_firstPages. |
(...skipping 28 matching lines...) Expand all Loading... |
889 m_remainingAllocationSize - m_lastRemainingAllocationSize); | 889 m_remainingAllocationSize - m_lastRemainingAllocationSize); |
890 m_lastRemainingAllocationSize = m_remainingAllocationSize; | 890 m_lastRemainingAllocationSize = m_remainingAllocationSize; |
891 } | 891 } |
892 | 892 |
893 void NormalPageArena::updateRemainingAllocationSize() { | 893 void NormalPageArena::updateRemainingAllocationSize() { |
894 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 894 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
895 getThreadState()->increaseAllocatedObjectSize( | 895 getThreadState()->increaseAllocatedObjectSize( |
896 m_lastRemainingAllocationSize - remainingAllocationSize()); | 896 m_lastRemainingAllocationSize - remainingAllocationSize()); |
897 m_lastRemainingAllocationSize = remainingAllocationSize(); | 897 m_lastRemainingAllocationSize = remainingAllocationSize(); |
898 } | 898 } |
899 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 899 DCHECK_EQ(m_lastRemainingAllocationSize, remainingAllocationSize()); |
900 } | 900 } |
901 | 901 |
902 void NormalPageArena::setAllocationPoint(Address point, size_t size) { | 902 void NormalPageArena::setAllocationPoint(Address point, size_t size) { |
903 #if ENABLE(ASSERT) | 903 #if DCHECK_IS_ON() |
904 if (point) { | 904 if (point) { |
905 ASSERT(size); | 905 DCHECK(size); |
906 BasePage* page = pageFromObject(point); | 906 BasePage* page = pageFromObject(point); |
907 ASSERT(!page->isLargeObjectPage()); | 907 DCHECK(!page->isLargeObjectPage()); |
908 ASSERT(size <= static_cast<NormalPage*>(page)->payloadSize()); | 908 DCHECK_LE(size, static_cast<NormalPage*>(page)->payloadSize()); |
909 } | 909 } |
910 #endif | 910 #endif |
911 if (hasCurrentAllocationArea()) { | 911 if (hasCurrentAllocationArea()) { |
912 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 912 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
913 } | 913 } |
914 updateRemainingAllocationSize(); | 914 updateRemainingAllocationSize(); |
915 m_currentAllocationPoint = point; | 915 m_currentAllocationPoint = point; |
916 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 916 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
917 } | 917 } |
918 | 918 |
919 Address NormalPageArena::outOfLineAllocate(size_t allocationSize, | 919 Address NormalPageArena::outOfLineAllocate(size_t allocationSize, |
920 size_t gcInfoIndex) { | 920 size_t gcInfoIndex) { |
921 ASSERT(allocationSize > remainingAllocationSize()); | 921 DCHECK_GT(allocationSize, remainingAllocationSize()); |
922 ASSERT(allocationSize >= allocationGranularity); | 922 DCHECK_GE(allocationSize, allocationGranularity); |
923 | 923 |
924 // 1. If this allocation is big enough, allocate a large object. | 924 // 1. If this allocation is big enough, allocate a large object. |
925 if (allocationSize >= largeObjectSizeThreshold) | 925 if (allocationSize >= largeObjectSizeThreshold) |
926 return allocateLargeObject(allocationSize, gcInfoIndex); | 926 return allocateLargeObject(allocationSize, gcInfoIndex); |
927 | 927 |
928 // 2. Try to allocate from a free list. | 928 // 2. Try to allocate from a free list. |
929 updateRemainingAllocationSize(); | 929 updateRemainingAllocationSize(); |
930 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | 930 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); |
931 if (result) | 931 if (result) |
932 return result; | 932 return result; |
(...skipping 19 matching lines...) Expand all Loading... |
952 getThreadState()->completeSweep(); | 952 getThreadState()->completeSweep(); |
953 | 953 |
954 // 7. Check if we should trigger a GC. | 954 // 7. Check if we should trigger a GC. |
955 getThreadState()->scheduleGCIfNeeded(); | 955 getThreadState()->scheduleGCIfNeeded(); |
956 | 956 |
957 // 8. Add a new page to this heap. | 957 // 8. Add a new page to this heap. |
958 allocatePage(); | 958 allocatePage(); |
959 | 959 |
960 // 9. Try to allocate from a free list. This allocation must succeed. | 960 // 9. Try to allocate from a free list. This allocation must succeed. |
961 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 961 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
962 RELEASE_ASSERT(result); | 962 CHECK(result); |
963 return result; | 963 return result; |
964 } | 964 } |
965 | 965 |
966 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, | 966 Address NormalPageArena::allocateFromFreeList(size_t allocationSize, |
967 size_t gcInfoIndex) { | 967 size_t gcInfoIndex) { |
968 // Try reusing a block from the largest bin. The underlying reasoning | 968 // Try reusing a block from the largest bin. The underlying reasoning |
969 // being that we want to amortize this slow allocation call by carving | 969 // being that we want to amortize this slow allocation call by carving |
970 // off as a large a free block as possible in one go; a block that will | 970 // off as a large a free block as possible in one go; a block that will |
971 // service this block and let following allocations be serviced quickly | 971 // service this block and let following allocations be serviced quickly |
972 // by bump allocation. | 972 // by bump allocation. |
973 size_t bucketSize = static_cast<size_t>(1) | 973 size_t bucketSize = static_cast<size_t>(1) |
974 << m_freeList.m_biggestFreeListIndex; | 974 << m_freeList.m_biggestFreeListIndex; |
975 int index = m_freeList.m_biggestFreeListIndex; | 975 int index = m_freeList.m_biggestFreeListIndex; |
976 for (; index > 0; --index, bucketSize >>= 1) { | 976 for (; index > 0; --index, bucketSize >>= 1) { |
977 FreeListEntry* entry = m_freeList.m_freeLists[index]; | 977 FreeListEntry* entry = m_freeList.m_freeLists[index]; |
978 if (allocationSize > bucketSize) { | 978 if (allocationSize > bucketSize) { |
979 // Final bucket candidate; check initial entry if it is able | 979 // Final bucket candidate; check initial entry if it is able |
980 // to service this allocation. Do not perform a linear scan, | 980 // to service this allocation. Do not perform a linear scan, |
981 // as it is considered too costly. | 981 // as it is considered too costly. |
982 if (!entry || entry->size() < allocationSize) | 982 if (!entry || entry->size() < allocationSize) |
983 break; | 983 break; |
984 } | 984 } |
985 if (entry) { | 985 if (entry) { |
986 entry->unlink(&m_freeList.m_freeLists[index]); | 986 entry->unlink(&m_freeList.m_freeLists[index]); |
987 setAllocationPoint(entry->getAddress(), entry->size()); | 987 setAllocationPoint(entry->getAddress(), entry->size()); |
988 ASSERT(hasCurrentAllocationArea()); | 988 DCHECK(hasCurrentAllocationArea()); |
989 ASSERT(remainingAllocationSize() >= allocationSize); | 989 DCHECK_GE(remainingAllocationSize(), allocationSize); |
990 m_freeList.m_biggestFreeListIndex = index; | 990 m_freeList.m_biggestFreeListIndex = index; |
991 return allocateObject(allocationSize, gcInfoIndex); | 991 return allocateObject(allocationSize, gcInfoIndex); |
992 } | 992 } |
993 } | 993 } |
994 m_freeList.m_biggestFreeListIndex = index; | 994 m_freeList.m_biggestFreeListIndex = index; |
995 return nullptr; | 995 return nullptr; |
996 } | 996 } |
997 | 997 |
998 LargeObjectArena::LargeObjectArena(ThreadState* state, int index) | 998 LargeObjectArena::LargeObjectArena(ThreadState* state, int index) |
999 : BaseArena(state, index) {} | 999 : BaseArena(state, index) {} |
1000 | 1000 |
1001 Address LargeObjectArena::allocateLargeObjectPage(size_t allocationSize, | 1001 Address LargeObjectArena::allocateLargeObjectPage(size_t allocationSize, |
1002 size_t gcInfoIndex) { | 1002 size_t gcInfoIndex) { |
1003 // Caller already added space for object header and rounded up to allocation | 1003 // Caller already added space for object header and rounded up to allocation |
1004 // alignment | 1004 // alignment |
1005 ASSERT(!(allocationSize & allocationMask)); | 1005 DCHECK(!(allocationSize & allocationMask)); |
1006 | 1006 |
1007 // 1. Try to sweep large objects more than allocationSize bytes | 1007 // 1. Try to sweep large objects more than allocationSize bytes |
1008 // before allocating a new large object. | 1008 // before allocating a new large object. |
1009 Address result = lazySweep(allocationSize, gcInfoIndex); | 1009 Address result = lazySweep(allocationSize, gcInfoIndex); |
1010 if (result) | 1010 if (result) |
1011 return result; | 1011 return result; |
1012 | 1012 |
1013 // 2. If we have failed in sweeping allocationSize bytes, | 1013 // 2. If we have failed in sweeping allocationSize bytes, |
1014 // we complete sweeping before allocating this large object. | 1014 // we complete sweeping before allocating this large object. |
1015 getThreadState()->completeSweep(); | 1015 getThreadState()->completeSweep(); |
(...skipping 12 matching lines...) Expand all Loading... |
1028 #if defined(ADDRESS_SANITIZER) | 1028 #if defined(ADDRESS_SANITIZER) |
1029 largeObjectSize += allocationGranularity; | 1029 largeObjectSize += allocationGranularity; |
1030 #endif | 1030 #endif |
1031 | 1031 |
1032 getThreadState()->shouldFlushHeapDoesNotContainCache(); | 1032 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
1033 PageMemory* pageMemory = PageMemory::allocate( | 1033 PageMemory* pageMemory = PageMemory::allocate( |
1034 largeObjectSize, getThreadState()->heap().getRegionTree()); | 1034 largeObjectSize, getThreadState()->heap().getRegionTree()); |
1035 Address largeObjectAddress = pageMemory->writableStart(); | 1035 Address largeObjectAddress = pageMemory->writableStart(); |
1036 Address headerAddress = | 1036 Address headerAddress = |
1037 largeObjectAddress + LargeObjectPage::pageHeaderSize(); | 1037 largeObjectAddress + LargeObjectPage::pageHeaderSize(); |
1038 #if ENABLE(ASSERT) | 1038 #if DCHECK_IS_ON() |
1039 // Verify that the allocated PageMemory is expectedly zeroed. | 1039 // Verify that the allocated PageMemory is expectedly zeroed. |
1040 for (size_t i = 0; i < largeObjectSize; ++i) | 1040 for (size_t i = 0; i < largeObjectSize; ++i) |
1041 ASSERT(!largeObjectAddress[i]); | 1041 DCHECK(!largeObjectAddress[i]); |
1042 #endif | 1042 #endif |
1043 ASSERT(gcInfoIndex > 0); | 1043 DCHECK_GT(gcInfoIndex, 0UL); |
1044 HeapObjectHeader* header = new (NotNull, headerAddress) | 1044 HeapObjectHeader* header = new (NotNull, headerAddress) |
1045 HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); | 1045 HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); |
1046 Address result = headerAddress + sizeof(*header); | 1046 Address result = headerAddress + sizeof(*header); |
1047 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1047 DCHECK(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1048 LargeObjectPage* largeObject = new (largeObjectAddress) | 1048 LargeObjectPage* largeObject = new (largeObjectAddress) |
1049 LargeObjectPage(pageMemory, this, allocationSize); | 1049 LargeObjectPage(pageMemory, this, allocationSize); |
1050 ASSERT(header->checkHeader()); | 1050 DCHECK(header->checkHeader()); |
1051 | 1051 |
1052 // Poison the object header and allocationGranularity bytes after the object | 1052 // Poison the object header and allocationGranularity bytes after the object |
1053 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1053 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
1054 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), | 1054 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), |
1055 allocationGranularity); | 1055 allocationGranularity); |
1056 | 1056 |
1057 largeObject->link(&m_firstPage); | 1057 largeObject->link(&m_firstPage); |
1058 | 1058 |
1059 getThreadState()->heap().heapStats().increaseAllocatedSpace( | 1059 getThreadState()->heap().heapStats().increaseAllocatedSpace( |
1060 largeObject->size()); | 1060 largeObject->size()); |
1061 getThreadState()->increaseAllocatedObjectSize(largeObject->size()); | 1061 getThreadState()->increaseAllocatedObjectSize(largeObject->size()); |
1062 return result; | 1062 return result; |
1063 } | 1063 } |
1064 | 1064 |
1065 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) { | 1065 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) { |
1066 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 1066 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
1067 object->heapObjectHeader()->finalize(object->payload(), | 1067 object->heapObjectHeader()->finalize(object->payload(), |
1068 object->payloadSize()); | 1068 object->payloadSize()); |
1069 getThreadState()->heap().heapStats().decreaseAllocatedSpace(object->size()); | 1069 getThreadState()->heap().heapStats().decreaseAllocatedSpace(object->size()); |
1070 | 1070 |
1071 // Unpoison the object header and allocationGranularity bytes after the | 1071 // Unpoison the object header and allocationGranularity bytes after the |
1072 // object before freeing. | 1072 // object before freeing. |
1073 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), | 1073 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), |
1074 sizeof(HeapObjectHeader)); | 1074 sizeof(HeapObjectHeader)); |
1075 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), | 1075 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), |
1076 allocationGranularity); | 1076 allocationGranularity); |
1077 | 1077 |
1078 if (object->terminating()) { | 1078 if (object->terminating()) { |
1079 ASSERT(ThreadState::current()->isTerminating()); | 1079 DCHECK(ThreadState::current()->isTerminating()); |
1080 // The thread is shutting down and this page is being removed as a part | 1080 // The thread is shutting down and this page is being removed as a part |
1081 // of the thread local GC. In that case the object could be traced in | 1081 // of the thread local GC. In that case the object could be traced in |
1082 // the next global GC if there is a dangling pointer from a live thread | 1082 // the next global GC if there is a dangling pointer from a live thread |
1083 // heap to this dead thread heap. To guard against this, we put the | 1083 // heap to this dead thread heap. To guard against this, we put the |
1084 // page into the orphaned page pool and zap the page memory. This | 1084 // page into the orphaned page pool and zap the page memory. This |
1085 // ensures that tracing the dangling pointer in the next global GC just | 1085 // ensures that tracing the dangling pointer in the next global GC just |
1086 // crashes instead of causing use-after-frees. After the next global | 1086 // crashes instead of causing use-after-frees. After the next global |
1087 // GC, the orphaned pages are removed. | 1087 // GC, the orphaned pages are removed. |
1088 getThreadState()->heap().getOrphanedPagePool()->addOrphanedPage( | 1088 getThreadState()->heap().getOrphanedPagePool()->addOrphanedPage( |
1089 arenaIndex(), object); | 1089 arenaIndex(), object); |
1090 } else { | 1090 } else { |
1091 ASSERT(!ThreadState::current()->isTerminating()); | 1091 DCHECK(!ThreadState::current()->isTerminating()); |
1092 PageMemory* memory = object->storage(); | 1092 PageMemory* memory = object->storage(); |
1093 object->~LargeObjectPage(); | 1093 object->~LargeObjectPage(); |
1094 delete memory; | 1094 delete memory; |
1095 } | 1095 } |
1096 } | 1096 } |
1097 | 1097 |
1098 Address LargeObjectArena::lazySweepPages(size_t allocationSize, | 1098 Address LargeObjectArena::lazySweepPages(size_t allocationSize, |
1099 size_t gcInfoIndex) { | 1099 size_t gcInfoIndex) { |
1100 Address result = nullptr; | 1100 Address result = nullptr; |
1101 size_t sweptSize = 0; | 1101 size_t sweptSize = 0; |
1102 while (m_firstUnsweptPage) { | 1102 while (m_firstUnsweptPage) { |
1103 BasePage* page = m_firstUnsweptPage; | 1103 BasePage* page = m_firstUnsweptPage; |
1104 if (page->isEmpty()) { | 1104 if (page->isEmpty()) { |
1105 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + | 1105 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + |
1106 sizeof(HeapObjectHeader); | 1106 sizeof(HeapObjectHeader); |
1107 page->unlink(&m_firstUnsweptPage); | 1107 page->unlink(&m_firstUnsweptPage); |
1108 page->removeFromHeap(); | 1108 page->removeFromHeap(); |
1109 // For LargeObjectPage, stop lazy sweeping once we have swept | 1109 // For LargeObjectPage, stop lazy sweeping once we have swept |
1110 // more than allocationSize bytes. | 1110 // more than allocationSize bytes. |
1111 if (sweptSize >= allocationSize) { | 1111 if (sweptSize >= allocationSize) { |
1112 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 1112 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex); |
1113 ASSERT(result); | 1113 DCHECK(result); |
1114 break; | 1114 break; |
1115 } | 1115 } |
1116 } else { | 1116 } else { |
1117 // Sweep a page and move the page from m_firstUnsweptPages to | 1117 // Sweep a page and move the page from m_firstUnsweptPages to |
1118 // m_firstPages. | 1118 // m_firstPages. |
1119 page->sweep(); | 1119 page->sweep(); |
1120 page->unlink(&m_firstUnsweptPage); | 1120 page->unlink(&m_firstUnsweptPage); |
1121 page->link(&m_firstPage); | 1121 page->link(&m_firstPage); |
1122 page->markAsSwept(); | 1122 page->markAsSwept(); |
1123 } | 1123 } |
1124 } | 1124 } |
1125 return result; | 1125 return result; |
1126 } | 1126 } |
1127 | 1127 |
1128 FreeList::FreeList() : m_biggestFreeListIndex(0) {} | 1128 FreeList::FreeList() : m_biggestFreeListIndex(0) {} |
1129 | 1129 |
1130 void FreeList::addToFreeList(Address address, size_t size) { | 1130 void FreeList::addToFreeList(Address address, size_t size) { |
1131 ASSERT(size < blinkPagePayloadSize()); | 1131 DCHECK_LT(size, blinkPagePayloadSize()); |
1132 // The free list entries are only pointer aligned (but when we allocate | 1132 // The free list entries are only pointer aligned (but when we allocate |
1133 // from them we are 8 byte aligned due to the header size). | 1133 // from them we are 8 byte aligned due to the header size). |
1134 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & | 1134 DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & |
1135 allocationMask)); | 1135 allocationMask)); |
1136 ASSERT(!(size & allocationMask)); | 1136 DCHECK(!(size & allocationMask)); |
1137 ASAN_UNPOISON_MEMORY_REGION(address, size); | 1137 ASAN_UNPOISON_MEMORY_REGION(address, size); |
1138 FreeListEntry* entry; | 1138 FreeListEntry* entry; |
1139 if (size < sizeof(*entry)) { | 1139 if (size < sizeof(*entry)) { |
1140 // Create a dummy header with only a size and freelist bit set. | 1140 // Create a dummy header with only a size and freelist bit set. |
1141 ASSERT(size >= sizeof(HeapObjectHeader)); | 1141 DCHECK_GE(size, sizeof(HeapObjectHeader)); |
1142 // Free list encode the size to mark the lost memory as freelist memory. | 1142 // Free list encode the size to mark the lost memory as freelist memory. |
1143 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHeader); | 1143 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHeader); |
1144 | 1144 |
1145 ASAN_POISON_MEMORY_REGION(address, size); | 1145 ASAN_POISON_MEMORY_REGION(address, size); |
1146 // This memory gets lost. Sweeping can reclaim it. | 1146 // This memory gets lost. Sweeping can reclaim it. |
1147 return; | 1147 return; |
1148 } | 1148 } |
1149 entry = new (NotNull, address) FreeListEntry(size); | 1149 entry = new (NotNull, address) FreeListEntry(size); |
1150 | 1150 |
1151 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1151 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
1152 // The following logic delays reusing free lists for (at least) one GC | 1152 // The following logic delays reusing free lists for (at least) one GC |
1153 // cycle or coalescing. This is helpful to detect use-after-free errors | 1153 // cycle or coalescing. This is helpful to detect use-after-free errors |
1154 // that could be caused by lazy sweeping etc. | 1154 // that could be caused by lazy sweeping etc. |
1155 size_t allowedCount = 0; | 1155 size_t allowedCount = 0; |
1156 size_t forbiddenCount = 0; | 1156 size_t forbiddenCount = 0; |
1157 for (size_t i = sizeof(FreeListEntry); i < size; i++) { | 1157 for (size_t i = sizeof(FreeListEntry); i < size; i++) { |
1158 if (address[i] == reuseAllowedZapValue) | 1158 if (address[i] == reuseAllowedZapValue) |
1159 allowedCount++; | 1159 allowedCount++; |
1160 else if (address[i] == reuseForbiddenZapValue) | 1160 else if (address[i] == reuseForbiddenZapValue) |
1161 forbiddenCount++; | 1161 forbiddenCount++; |
1162 else | 1162 else |
1163 ASSERT_NOT_REACHED(); | 1163 NOTREACHED(); |
1164 } | 1164 } |
1165 size_t entryCount = size - sizeof(FreeListEntry); | 1165 size_t entryCount = size - sizeof(FreeListEntry); |
1166 if (forbiddenCount == entryCount) { | 1166 if (forbiddenCount == entryCount) { |
1167 // If all values in the memory region are reuseForbiddenZapValue, | 1167 // If all values in the memory region are reuseForbiddenZapValue, |
1168 // we flip them to reuseAllowedZapValue. This allows the next | 1168 // we flip them to reuseAllowedZapValue. This allows the next |
1169 // addToFreeList() to add the memory region to the free list | 1169 // addToFreeList() to add the memory region to the free list |
1170 // (unless someone concatenates the memory region with another memory | 1170 // (unless someone concatenates the memory region with another memory |
1171 // region that contains reuseForbiddenZapValue.) | 1171 // region that contains reuseForbiddenZapValue.) |
1172 for (size_t i = sizeof(FreeListEntry); i < size; i++) | 1172 for (size_t i = sizeof(FreeListEntry); i < size; i++) |
1173 address[i] = reuseAllowedZapValue; | 1173 address[i] = reuseAllowedZapValue; |
(...skipping 17 matching lines...) Expand all Loading... |
1191 // region to the free list and reuse it for another object. | 1191 // region to the free list and reuse it for another object. |
1192 #endif | 1192 #endif |
1193 ASAN_POISON_MEMORY_REGION(address, size); | 1193 ASAN_POISON_MEMORY_REGION(address, size); |
1194 | 1194 |
1195 int index = bucketIndexForSize(size); | 1195 int index = bucketIndexForSize(size); |
1196 entry->link(&m_freeLists[index]); | 1196 entry->link(&m_freeLists[index]); |
1197 if (index > m_biggestFreeListIndex) | 1197 if (index > m_biggestFreeListIndex) |
1198 m_biggestFreeListIndex = index; | 1198 m_biggestFreeListIndex = index; |
1199 } | 1199 } |
1200 | 1200 |
1201 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | 1201 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
1202 defined(MEMORY_SANITIZER) | 1202 defined(MEMORY_SANITIZER) |
1203 NO_SANITIZE_ADDRESS | 1203 NO_SANITIZE_ADDRESS |
1204 NO_SANITIZE_MEMORY | 1204 NO_SANITIZE_MEMORY |
1205 void NEVER_INLINE FreeList::zapFreedMemory(Address address, size_t size) { | 1205 void NEVER_INLINE FreeList::zapFreedMemory(Address address, size_t size) { |
1206 for (size_t i = 0; i < size; i++) { | 1206 for (size_t i = 0; i < size; i++) { |
1207 // See the comment in addToFreeList(). | 1207 // See the comment in addToFreeList(). |
1208 if (address[i] != reuseAllowedZapValue) | 1208 if (address[i] != reuseAllowedZapValue) |
1209 address[i] = reuseForbiddenZapValue; | 1209 address[i] = reuseForbiddenZapValue; |
1210 } | 1210 } |
1211 } | 1211 } |
1212 | 1212 |
1213 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, | 1213 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
1214 size_t size) { | 1214 size_t size) { |
1215 for (size_t i = 0; i < size; i++) { | 1215 for (size_t i = 0; i < size; i++) { |
1216 ASSERT(address[i] == reuseAllowedZapValue || | 1216 DCHECK(address[i] == reuseAllowedZapValue || |
1217 address[i] == reuseForbiddenZapValue); | 1217 address[i] == reuseForbiddenZapValue); |
1218 } | 1218 } |
1219 } | 1219 } |
1220 #endif | 1220 #endif |
1221 | 1221 |
1222 size_t FreeList::freeListSize() const { | 1222 size_t FreeList::freeListSize() const { |
1223 size_t freeSize = 0; | 1223 size_t freeSize = 0; |
1224 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { | 1224 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
1225 FreeListEntry* entry = m_freeLists[i]; | 1225 FreeListEntry* entry = m_freeLists[i]; |
1226 while (entry) { | 1226 while (entry) { |
(...skipping 23 matching lines...) Expand all Loading... |
1250 return freeSize; | 1250 return freeSize; |
1251 } | 1251 } |
1252 | 1252 |
1253 void FreeList::clear() { | 1253 void FreeList::clear() { |
1254 m_biggestFreeListIndex = 0; | 1254 m_biggestFreeListIndex = 0; |
1255 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | 1255 for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
1256 m_freeLists[i] = nullptr; | 1256 m_freeLists[i] = nullptr; |
1257 } | 1257 } |
1258 | 1258 |
1259 int FreeList::bucketIndexForSize(size_t size) { | 1259 int FreeList::bucketIndexForSize(size_t size) { |
1260 ASSERT(size > 0); | 1260 DCHECK_GT(size, 0UL); |
1261 int index = -1; | 1261 int index = -1; |
1262 while (size) { | 1262 while (size) { |
1263 size >>= 1; | 1263 size >>= 1; |
1264 index++; | 1264 index++; |
1265 } | 1265 } |
1266 return index; | 1266 return index; |
1267 } | 1267 } |
1268 | 1268 |
1269 bool FreeList::takeSnapshot(const String& dumpBaseName) { | 1269 bool FreeList::takeSnapshot(const String& dumpBaseName) { |
1270 bool didDumpBucketStats = false; | 1270 bool didDumpBucketStats = false; |
(...skipping 17 matching lines...) Expand all Loading... |
1288 } | 1288 } |
1289 return didDumpBucketStats; | 1289 return didDumpBucketStats; |
1290 } | 1290 } |
1291 | 1291 |
1292 BasePage::BasePage(PageMemory* storage, BaseArena* arena) | 1292 BasePage::BasePage(PageMemory* storage, BaseArena* arena) |
1293 : m_storage(storage), | 1293 : m_storage(storage), |
1294 m_arena(arena), | 1294 m_arena(arena), |
1295 m_next(nullptr), | 1295 m_next(nullptr), |
1296 m_terminating(false), | 1296 m_terminating(false), |
1297 m_swept(true) { | 1297 m_swept(true) { |
1298 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1298 DCHECK(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
1299 } | 1299 } |
1300 | 1300 |
1301 void BasePage::markOrphaned() { | 1301 void BasePage::markOrphaned() { |
1302 m_arena = nullptr; | 1302 m_arena = nullptr; |
1303 m_terminating = false; | 1303 m_terminating = false; |
1304 // Since we zap the page payload for orphaned pages we need to mark it as | 1304 // Since we zap the page payload for orphaned pages we need to mark it as |
1305 // unused so a conservative pointer won't interpret the object headers. | 1305 // unused so a conservative pointer won't interpret the object headers. |
1306 storage()->markUnused(); | 1306 storage()->markUnused(); |
1307 } | 1307 } |
1308 | 1308 |
1309 NormalPage::NormalPage(PageMemory* storage, BaseArena* arena) | 1309 NormalPage::NormalPage(PageMemory* storage, BaseArena* arena) |
1310 : BasePage(storage, arena), m_objectStartBitMapComputed(false) { | 1310 : BasePage(storage, arena), m_objectStartBitMapComputed(false) { |
1311 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1311 DCHECK(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
1312 } | 1312 } |
1313 | 1313 |
1314 size_t NormalPage::objectPayloadSizeForTesting() { | 1314 size_t NormalPage::objectPayloadSizeForTesting() { |
1315 size_t objectPayloadSize = 0; | 1315 size_t objectPayloadSize = 0; |
1316 Address headerAddress = payload(); | 1316 Address headerAddress = payload(); |
1317 markAsSwept(); | 1317 markAsSwept(); |
1318 ASSERT(headerAddress != payloadEnd()); | 1318 DCHECK_NE(headerAddress, payloadEnd()); |
1319 do { | 1319 do { |
1320 HeapObjectHeader* header = | 1320 HeapObjectHeader* header = |
1321 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1321 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1322 if (!header->isFree()) { | 1322 if (!header->isFree()) { |
1323 ASSERT(header->checkHeader()); | 1323 DCHECK(header->checkHeader()); |
1324 objectPayloadSize += header->payloadSize(); | 1324 objectPayloadSize += header->payloadSize(); |
1325 } | 1325 } |
1326 ASSERT(header->size() < blinkPagePayloadSize()); | 1326 DCHECK_LT(header->size(), blinkPagePayloadSize()); |
1327 headerAddress += header->size(); | 1327 headerAddress += header->size(); |
1328 ASSERT(headerAddress <= payloadEnd()); | 1328 DCHECK_LE(headerAddress, payloadEnd()); |
1329 } while (headerAddress < payloadEnd()); | 1329 } while (headerAddress < payloadEnd()); |
1330 return objectPayloadSize; | 1330 return objectPayloadSize; |
1331 } | 1331 } |
1332 | 1332 |
1333 bool NormalPage::isEmpty() { | 1333 bool NormalPage::isEmpty() { |
1334 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); | 1334 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); |
1335 return header->isFree() && header->size() == payloadSize(); | 1335 return header->isFree() && header->size() == payloadSize(); |
1336 } | 1336 } |
1337 | 1337 |
1338 void NormalPage::removeFromHeap() { | 1338 void NormalPage::removeFromHeap() { |
1339 arenaForNormalPage()->freePage(this); | 1339 arenaForNormalPage()->freePage(this); |
1340 } | 1340 } |
1341 | 1341 |
1342 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1342 #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1343 static void discardPages(Address begin, Address end) { | 1343 static void discardPages(Address begin, Address end) { |
1344 uintptr_t beginAddress = | 1344 uintptr_t beginAddress = |
1345 WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); | 1345 WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); |
1346 uintptr_t endAddress = | 1346 uintptr_t endAddress = |
1347 WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); | 1347 WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); |
1348 if (beginAddress < endAddress) | 1348 if (beginAddress < endAddress) |
1349 WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), | 1349 WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), |
1350 endAddress - beginAddress); | 1350 endAddress - beginAddress); |
1351 } | 1351 } |
1352 #endif | 1352 #endif |
1353 | 1353 |
1354 void NormalPage::sweep() { | 1354 void NormalPage::sweep() { |
1355 size_t markedObjectSize = 0; | 1355 size_t markedObjectSize = 0; |
1356 Address startOfGap = payload(); | 1356 Address startOfGap = payload(); |
1357 NormalPageArena* pageArena = arenaForNormalPage(); | 1357 NormalPageArena* pageArena = arenaForNormalPage(); |
1358 for (Address headerAddress = startOfGap; headerAddress < payloadEnd();) { | 1358 for (Address headerAddress = startOfGap; headerAddress < payloadEnd();) { |
1359 HeapObjectHeader* header = | 1359 HeapObjectHeader* header = |
1360 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1360 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1361 size_t size = header->size(); | 1361 size_t size = header->size(); |
1362 ASSERT(size > 0); | 1362 DCHECK_GT(size, 0UL); |
1363 ASSERT(size < blinkPagePayloadSize()); | 1363 DCHECK_LT(size, blinkPagePayloadSize()); |
1364 | 1364 |
1365 if (header->isPromptlyFreed()) | 1365 if (header->isPromptlyFreed()) |
1366 pageArena->decreasePromptlyFreedSize(size); | 1366 pageArena->decreasePromptlyFreedSize(size); |
1367 if (header->isFree()) { | 1367 if (header->isFree()) { |
1368 // Zero the memory in the free list header to maintain the | 1368 // Zero the memory in the free list header to maintain the |
1369 // invariant that memory on the free list is zero filled. | 1369 // invariant that memory on the free list is zero filled. |
1370 // The rest of the memory is already on the free list and is | 1370 // The rest of the memory is already on the free list and is |
1371 // therefore already zero filled. | 1371 // therefore already zero filled. |
1372 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) | 1372 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) |
1373 ? size | 1373 ? size |
(...skipping 14 matching lines...) Expand all Loading... |
1388 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); | 1388 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); |
1389 header->finalize(payload, payloadSize); | 1389 header->finalize(payload, payloadSize); |
1390 // This memory will be added to the freelist. Maintain the invariant | 1390 // This memory will be added to the freelist. Maintain the invariant |
1391 // that memory on the freelist is zero filled. | 1391 // that memory on the freelist is zero filled. |
1392 SET_MEMORY_INACCESSIBLE(headerAddress, size); | 1392 SET_MEMORY_INACCESSIBLE(headerAddress, size); |
1393 headerAddress += size; | 1393 headerAddress += size; |
1394 continue; | 1394 continue; |
1395 } | 1395 } |
1396 if (startOfGap != headerAddress) { | 1396 if (startOfGap != headerAddress) { |
1397 pageArena->addToFreeList(startOfGap, headerAddress - startOfGap); | 1397 pageArena->addToFreeList(startOfGap, headerAddress - startOfGap); |
1398 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1398 #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1399 // Discarding pages increases page faults and may regress performance. | 1399 // Discarding pages increases page faults and may regress performance. |
1400 // So we enable this only on low-RAM devices. | 1400 // So we enable this only on low-RAM devices. |
1401 if (MemoryCoordinator::isLowEndDevice()) | 1401 if (MemoryCoordinator::isLowEndDevice()) |
1402 discardPages(startOfGap + sizeof(FreeListEntry), headerAddress); | 1402 discardPages(startOfGap + sizeof(FreeListEntry), headerAddress); |
1403 #endif | 1403 #endif |
1404 } | 1404 } |
1405 header->unmark(); | 1405 header->unmark(); |
1406 headerAddress += size; | 1406 headerAddress += size; |
1407 markedObjectSize += size; | 1407 markedObjectSize += size; |
1408 startOfGap = headerAddress; | 1408 startOfGap = headerAddress; |
1409 } | 1409 } |
1410 if (startOfGap != payloadEnd()) { | 1410 if (startOfGap != payloadEnd()) { |
1411 pageArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); | 1411 pageArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
1412 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1412 #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
1413 if (MemoryCoordinator::isLowEndDevice()) | 1413 if (MemoryCoordinator::isLowEndDevice()) |
1414 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1414 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
1415 #endif | 1415 #endif |
1416 } | 1416 } |
1417 | 1417 |
1418 if (markedObjectSize) | 1418 if (markedObjectSize) |
1419 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1419 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
1420 } | 1420 } |
1421 | 1421 |
1422 void NormalPage::sweepAndCompact(CompactionContext& context) { | 1422 void NormalPage::sweepAndCompact(CompactionContext& context) { |
(...skipping 29 matching lines...) Expand all Loading... |
1452 // finalized object will be zero-filled and poison'ed afterwards. | 1452 // finalized object will be zero-filled and poison'ed afterwards. |
1453 // Given all other unmarked objects are poisoned, ASan will detect | 1453 // Given all other unmarked objects are poisoned, ASan will detect |
1454 // an error if the finalizer touches any other on-heap object that | 1454 // an error if the finalizer touches any other on-heap object that |
1455 // die at the same GC cycle. | 1455 // die at the same GC cycle. |
1456 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); | 1456 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
1457 header->finalize(payload, payloadSize); | 1457 header->finalize(payload, payloadSize); |
1458 | 1458 |
1459 // As compaction is under way, leave the freed memory accessible | 1459 // As compaction is under way, leave the freed memory accessible |
1460 // while compacting the rest of the page. We just zap the payload | 1460 // while compacting the rest of the page. We just zap the payload |
1461 // to catch out other finalizers trying to access it. | 1461 // to catch out other finalizers trying to access it. |
1462 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | 1462 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
1463 defined(MEMORY_SANITIZER) | 1463 defined(MEMORY_SANITIZER) |
1464 FreeList::zapFreedMemory(payload, payloadSize); | 1464 FreeList::zapFreedMemory(payload, payloadSize); |
1465 #endif | 1465 #endif |
1466 headerAddress += size; | 1466 headerAddress += size; |
1467 continue; | 1467 continue; |
1468 } | 1468 } |
1469 header->unmark(); | 1469 header->unmark(); |
1470 // Allocate and copy over the live object. | 1470 // Allocate and copy over the live object. |
1471 Address compactFrontier = currentPage->payload() + allocationPoint; | 1471 Address compactFrontier = currentPage->payload() + allocationPoint; |
1472 if (compactFrontier + size > currentPage->payloadEnd()) { | 1472 if (compactFrontier + size > currentPage->payloadEnd()) { |
(...skipping 29 matching lines...) Expand all Loading... |
1502 // Use a non-overlapping copy, if possible. | 1502 // Use a non-overlapping copy, if possible. |
1503 if (currentPage == this) | 1503 if (currentPage == this) |
1504 memmove(compactFrontier, headerAddress, size); | 1504 memmove(compactFrontier, headerAddress, size); |
1505 else | 1505 else |
1506 memcpy(compactFrontier, headerAddress, size); | 1506 memcpy(compactFrontier, headerAddress, size); |
1507 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader)); | 1507 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader)); |
1508 } | 1508 } |
1509 headerAddress += size; | 1509 headerAddress += size; |
1510 markedObjectSize += size; | 1510 markedObjectSize += size; |
1511 allocationPoint += size; | 1511 allocationPoint += size; |
1512 DCHECK(allocationPoint <= currentPage->payloadSize()); | 1512 DCHECK_LE(allocationPoint, currentPage->payloadSize()); |
1513 } | 1513 } |
1514 if (markedObjectSize) | 1514 if (markedObjectSize) |
1515 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); | 1515 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
1516 | 1516 |
1517 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ | 1517 #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
1518 defined(MEMORY_SANITIZER) | 1518 defined(MEMORY_SANITIZER) |
1519 // Zap the unused portion, until it is either compacted into or freed. | 1519 // Zap the unused portion, until it is either compacted into or freed. |
1520 if (currentPage != this) { | 1520 if (currentPage != this) { |
1521 FreeList::zapFreedMemory(payload(), payloadSize()); | 1521 FreeList::zapFreedMemory(payload(), payloadSize()); |
1522 } else { | 1522 } else { |
1523 FreeList::zapFreedMemory(payload() + allocationPoint, | 1523 FreeList::zapFreedMemory(payload() + allocationPoint, |
1524 payloadSize() - allocationPoint); | 1524 payloadSize() - allocationPoint); |
1525 } | 1525 } |
1526 #endif | 1526 #endif |
1527 } | 1527 } |
1528 | 1528 |
1529 void NormalPage::makeConsistentForGC() { | 1529 void NormalPage::makeConsistentForGC() { |
1530 size_t markedObjectSize = 0; | 1530 size_t markedObjectSize = 0; |
1531 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1531 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1532 HeapObjectHeader* header = | 1532 HeapObjectHeader* header = |
1533 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1533 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1534 ASSERT(header->size() < blinkPagePayloadSize()); | 1534 DCHECK_LT(header->size(), blinkPagePayloadSize()); |
1535 // Check if a free list entry first since we cannot call | 1535 // Check if a free list entry first since we cannot call |
1536 // isMarked on a free list entry. | 1536 // isMarked on a free list entry. |
1537 if (header->isFree()) { | 1537 if (header->isFree()) { |
1538 headerAddress += header->size(); | 1538 headerAddress += header->size(); |
1539 continue; | 1539 continue; |
1540 } | 1540 } |
1541 if (header->isMarked()) { | 1541 if (header->isMarked()) { |
1542 header->unmark(); | 1542 header->unmark(); |
1543 markedObjectSize += header->size(); | 1543 markedObjectSize += header->size(); |
1544 } else { | 1544 } else { |
1545 header->markDead(); | 1545 header->markDead(); |
1546 } | 1546 } |
1547 headerAddress += header->size(); | 1547 headerAddress += header->size(); |
1548 } | 1548 } |
1549 if (markedObjectSize) | 1549 if (markedObjectSize) |
1550 arenaForNormalPage()->getThreadState()->increaseMarkedObjectSize( | 1550 arenaForNormalPage()->getThreadState()->increaseMarkedObjectSize( |
1551 markedObjectSize); | 1551 markedObjectSize); |
1552 } | 1552 } |
1553 | 1553 |
1554 void NormalPage::makeConsistentForMutator() { | 1554 void NormalPage::makeConsistentForMutator() { |
1555 Address startOfGap = payload(); | 1555 Address startOfGap = payload(); |
1556 NormalPageArena* normalArena = arenaForNormalPage(); | 1556 NormalPageArena* normalArena = arenaForNormalPage(); |
1557 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1557 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1558 HeapObjectHeader* header = | 1558 HeapObjectHeader* header = |
1559 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1559 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1560 size_t size = header->size(); | 1560 size_t size = header->size(); |
1561 ASSERT(size < blinkPagePayloadSize()); | 1561 DCHECK_LT(size, blinkPagePayloadSize()); |
1562 if (header->isPromptlyFreed()) | 1562 if (header->isPromptlyFreed()) |
1563 arenaForNormalPage()->decreasePromptlyFreedSize(size); | 1563 arenaForNormalPage()->decreasePromptlyFreedSize(size); |
1564 if (header->isFree()) { | 1564 if (header->isFree()) { |
1565 // Zero the memory in the free list header to maintain the | 1565 // Zero the memory in the free list header to maintain the |
1566 // invariant that memory on the free list is zero filled. | 1566 // invariant that memory on the free list is zero filled. |
1567 // The rest of the memory is already on the free list and is | 1567 // The rest of the memory is already on the free list and is |
1568 // therefore already zero filled. | 1568 // therefore already zero filled. |
1569 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) | 1569 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) |
1570 ? size | 1570 ? size |
1571 : sizeof(FreeListEntry)); | 1571 : sizeof(FreeListEntry)); |
1572 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 1572 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); |
1573 headerAddress += size; | 1573 headerAddress += size; |
1574 continue; | 1574 continue; |
1575 } | 1575 } |
1576 if (startOfGap != headerAddress) | 1576 if (startOfGap != headerAddress) |
1577 normalArena->addToFreeList(startOfGap, headerAddress - startOfGap); | 1577 normalArena->addToFreeList(startOfGap, headerAddress - startOfGap); |
1578 if (header->isMarked()) | 1578 if (header->isMarked()) |
1579 header->unmark(); | 1579 header->unmark(); |
1580 headerAddress += size; | 1580 headerAddress += size; |
1581 startOfGap = headerAddress; | 1581 startOfGap = headerAddress; |
1582 ASSERT(headerAddress <= payloadEnd()); | 1582 DCHECK_LE(headerAddress, payloadEnd()); |
1583 } | 1583 } |
1584 if (startOfGap != payloadEnd()) | 1584 if (startOfGap != payloadEnd()) |
1585 normalArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); | 1585 normalArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
1586 } | 1586 } |
1587 | 1587 |
1588 #if defined(ADDRESS_SANITIZER) | 1588 #if defined(ADDRESS_SANITIZER) |
1589 void NormalPage::poisonUnmarkedObjects() { | 1589 void NormalPage::poisonUnmarkedObjects() { |
1590 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1590 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1591 HeapObjectHeader* header = | 1591 HeapObjectHeader* header = |
1592 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1592 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1593 ASSERT(header->size() < blinkPagePayloadSize()); | 1593 DCHECK_LT(header->size(), blinkPagePayloadSize()); |
1594 // Check if a free list entry first since we cannot call | 1594 // Check if a free list entry first since we cannot call |
1595 // isMarked on a free list entry. | 1595 // isMarked on a free list entry. |
1596 if (header->isFree()) { | 1596 if (header->isFree()) { |
1597 headerAddress += header->size(); | 1597 headerAddress += header->size(); |
1598 continue; | 1598 continue; |
1599 } | 1599 } |
1600 if (!header->isMarked()) | 1600 if (!header->isMarked()) |
1601 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1601 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
1602 headerAddress += header->size(); | 1602 headerAddress += header->size(); |
1603 } | 1603 } |
1604 } | 1604 } |
1605 #endif | 1605 #endif |
1606 | 1606 |
1607 void NormalPage::populateObjectStartBitMap() { | 1607 void NormalPage::populateObjectStartBitMap() { |
1608 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1608 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
1609 Address start = payload(); | 1609 Address start = payload(); |
1610 for (Address headerAddress = start; headerAddress < payloadEnd();) { | 1610 for (Address headerAddress = start; headerAddress < payloadEnd();) { |
1611 HeapObjectHeader* header = | 1611 HeapObjectHeader* header = |
1612 reinterpret_cast<HeapObjectHeader*>(headerAddress); | 1612 reinterpret_cast<HeapObjectHeader*>(headerAddress); |
1613 size_t objectOffset = headerAddress - start; | 1613 size_t objectOffset = headerAddress - start; |
1614 ASSERT(!(objectOffset & allocationMask)); | 1614 DCHECK(!(objectOffset & allocationMask)); |
1615 size_t objectStartNumber = objectOffset / allocationGranularity; | 1615 size_t objectStartNumber = objectOffset / allocationGranularity; |
1616 size_t mapIndex = objectStartNumber / 8; | 1616 size_t mapIndex = objectStartNumber / 8; |
1617 ASSERT(mapIndex < objectStartBitMapSize); | 1617 DCHECK_LT(mapIndex, objectStartBitMapSize); |
1618 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7)); | 1618 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7)); |
1619 headerAddress += header->size(); | 1619 headerAddress += header->size(); |
1620 ASSERT(headerAddress <= payloadEnd()); | 1620 DCHECK_LE(headerAddress, payloadEnd()); |
1621 } | 1621 } |
1622 m_objectStartBitMapComputed = true; | 1622 m_objectStartBitMapComputed = true; |
1623 } | 1623 } |
1624 | 1624 |
1625 static int numberOfLeadingZeroes(uint8_t byte) { | 1625 static int numberOfLeadingZeroes(uint8_t byte) { |
1626 if (!byte) | 1626 if (!byte) |
1627 return 8; | 1627 return 8; |
1628 int result = 0; | 1628 int result = 0; |
1629 if (byte <= 0x0F) { | 1629 if (byte <= 0x0F) { |
1630 result += 4; | 1630 result += 4; |
1631 byte = byte << 4; | 1631 byte = byte << 4; |
1632 } | 1632 } |
1633 if (byte <= 0x3F) { | 1633 if (byte <= 0x3F) { |
1634 result += 2; | 1634 result += 2; |
1635 byte = byte << 2; | 1635 byte = byte << 2; |
1636 } | 1636 } |
1637 if (byte <= 0x7F) | 1637 if (byte <= 0x7F) |
1638 result++; | 1638 result++; |
1639 return result; | 1639 return result; |
1640 } | 1640 } |
1641 | 1641 |
1642 HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) { | 1642 HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) { |
1643 if (address < payload()) | 1643 if (address < payload()) |
1644 return nullptr; | 1644 return nullptr; |
1645 if (!m_objectStartBitMapComputed) | 1645 if (!m_objectStartBitMapComputed) |
1646 populateObjectStartBitMap(); | 1646 populateObjectStartBitMap(); |
1647 size_t objectOffset = address - payload(); | 1647 size_t objectOffset = address - payload(); |
1648 size_t objectStartNumber = objectOffset / allocationGranularity; | 1648 size_t objectStartNumber = objectOffset / allocationGranularity; |
1649 size_t mapIndex = objectStartNumber / 8; | 1649 size_t mapIndex = objectStartNumber / 8; |
1650 ASSERT(mapIndex < objectStartBitMapSize); | 1650 DCHECK_LT(mapIndex, objectStartBitMapSize); |
1651 size_t bit = objectStartNumber & 7; | 1651 size_t bit = objectStartNumber & 7; |
1652 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1); | 1652 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1); |
1653 while (!byte) { | 1653 while (!byte) { |
1654 ASSERT(mapIndex > 0); | 1654 DCHECK_GT(mapIndex, 0UL); |
1655 byte = m_objectStartBitMap[--mapIndex]; | 1655 byte = m_objectStartBitMap[--mapIndex]; |
1656 } | 1656 } |
1657 int leadingZeroes = numberOfLeadingZeroes(byte); | 1657 int leadingZeroes = numberOfLeadingZeroes(byte); |
1658 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | 1658 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; |
1659 objectOffset = objectStartNumber * allocationGranularity; | 1659 objectOffset = objectStartNumber * allocationGranularity; |
1660 Address objectAddress = objectOffset + payload(); | 1660 Address objectAddress = objectOffset + payload(); |
1661 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress); | 1661 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress); |
1662 if (header->isFree()) | 1662 if (header->isFree()) |
1663 return nullptr; | 1663 return nullptr; |
1664 ASSERT(header->checkHeader()); | 1664 DCHECK(header->checkHeader()); |
1665 return header; | 1665 return header; |
1666 } | 1666 } |
1667 | 1667 |
1668 #if ENABLE(ASSERT) | 1668 #if DCHECK_IS_ON() |
1669 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) { | 1669 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) { |
1670 // Scan through the object's fields and check that they are all zero. | 1670 // Scan through the object's fields and check that they are all zero. |
1671 Address* objectFields = reinterpret_cast<Address*>(objectPointer); | 1671 Address* objectFields = reinterpret_cast<Address*>(objectPointer); |
1672 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { | 1672 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { |
1673 if (objectFields[i] != 0) | 1673 if (objectFields[i] != 0) |
1674 return false; | 1674 return false; |
1675 } | 1675 } |
1676 return true; | 1676 return true; |
1677 } | 1677 } |
1678 #endif | 1678 #endif |
1679 | 1679 |
1680 static void markPointer(Visitor* visitor, HeapObjectHeader* header) { | 1680 static void markPointer(Visitor* visitor, HeapObjectHeader* header) { |
1681 ASSERT(header->checkHeader()); | 1681 DCHECK(header->checkHeader()); |
1682 const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); | 1682 const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); |
1683 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | 1683 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { |
1684 // We hit this branch when a GC strikes before GarbageCollected<>'s | 1684 // We hit this branch when a GC strikes before GarbageCollected<>'s |
1685 // constructor runs. | 1685 // constructor runs. |
1686 // | 1686 // |
1687 // class A : public GarbageCollected<A> { virtual void f() = 0; }; | 1687 // class A : public GarbageCollected<A> { virtual void f() = 0; }; |
1688 // class B : public A { | 1688 // class B : public A { |
1689 // B() : A(foo()) { }; | 1689 // B() : A(foo()) { }; |
1690 // }; | 1690 // }; |
1691 // | 1691 // |
1692 // If foo() allocates something and triggers a GC, the vtable of A | 1692 // If foo() allocates something and triggers a GC, the vtable of A |
1693 // has not yet been initialized. In this case, we should mark the A | 1693 // has not yet been initialized. In this case, we should mark the A |
1694 // object without tracing any member of the A object. | 1694 // object without tracing any member of the A object. |
1695 visitor->markHeaderNoTracing(header); | 1695 visitor->markHeaderNoTracing(header); |
1696 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize())); | 1696 DCHECK(isUninitializedMemory(header->payload(), header->payloadSize())); |
1697 } else { | 1697 } else { |
1698 visitor->markHeader(header, gcInfo->m_trace); | 1698 visitor->markHeader(header, gcInfo->m_trace); |
1699 } | 1699 } |
1700 } | 1700 } |
1701 | 1701 |
1702 void NormalPage::checkAndMarkPointer(Visitor* visitor, Address address) { | 1702 void NormalPage::checkAndMarkPointer(Visitor* visitor, Address address) { |
1703 ASSERT(contains(address)); | 1703 DCHECK(contains(address)); |
1704 HeapObjectHeader* header = findHeaderFromAddress(address); | 1704 HeapObjectHeader* header = findHeaderFromAddress(address); |
1705 if (!header || header->isDead()) | 1705 if (!header || header->isDead()) |
1706 return; | 1706 return; |
1707 markPointer(visitor, header); | 1707 markPointer(visitor, header); |
1708 } | 1708 } |
1709 | 1709 |
1710 void NormalPage::markOrphaned() { | 1710 void NormalPage::markOrphaned() { |
1711 // Zap the payload with a recognizable value to detect any incorrect | 1711 // Zap the payload with a recognizable value to detect any incorrect |
1712 // cross thread pointer usage. | 1712 // cross thread pointer usage. |
1713 #if defined(ADDRESS_SANITIZER) | 1713 #if defined(ADDRESS_SANITIZER) |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1756 pageDump->AddScalar("live_count", "objects", liveCount); | 1756 pageDump->AddScalar("live_count", "objects", liveCount); |
1757 pageDump->AddScalar("dead_count", "objects", deadCount); | 1757 pageDump->AddScalar("dead_count", "objects", deadCount); |
1758 pageDump->AddScalar("free_count", "objects", freeCount); | 1758 pageDump->AddScalar("free_count", "objects", freeCount); |
1759 pageDump->AddScalar("live_size", "bytes", liveSize); | 1759 pageDump->AddScalar("live_size", "bytes", liveSize); |
1760 pageDump->AddScalar("dead_size", "bytes", deadSize); | 1760 pageDump->AddScalar("dead_size", "bytes", deadSize); |
1761 pageDump->AddScalar("free_size", "bytes", freeSize); | 1761 pageDump->AddScalar("free_size", "bytes", freeSize); |
1762 heapInfo.freeSize += freeSize; | 1762 heapInfo.freeSize += freeSize; |
1763 heapInfo.freeCount += freeCount; | 1763 heapInfo.freeCount += freeCount; |
1764 } | 1764 } |
1765 | 1765 |
1766 #if ENABLE(ASSERT) | 1766 #if DCHECK_IS_ON() |
1767 bool NormalPage::contains(Address addr) { | 1767 bool NormalPage::contains(Address addr) { |
1768 Address blinkPageStart = roundToBlinkPageStart(getAddress()); | 1768 Address blinkPageStart = roundToBlinkPageStart(getAddress()); |
1769 // Page is at aligned address plus guard page size. | 1769 // Page is at aligned address plus guard page size. |
1770 ASSERT(blinkPageStart == getAddress() - blinkGuardPageSize); | 1770 DCHECK_EQ(blinkPageStart, getAddress() - blinkGuardPageSize); |
1771 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 1771 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
1772 } | 1772 } |
1773 #endif | 1773 #endif |
1774 | 1774 |
1775 LargeObjectPage::LargeObjectPage(PageMemory* storage, | 1775 LargeObjectPage::LargeObjectPage(PageMemory* storage, |
1776 BaseArena* arena, | 1776 BaseArena* arena, |
1777 size_t payloadSize) | 1777 size_t payloadSize) |
1778 : BasePage(storage, arena), | 1778 : BasePage(storage, arena), |
1779 m_payloadSize(payloadSize) | 1779 m_payloadSize(payloadSize) |
1780 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) | 1780 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1820 | 1820 |
1821 #if defined(ADDRESS_SANITIZER) | 1821 #if defined(ADDRESS_SANITIZER) |
1822 void LargeObjectPage::poisonUnmarkedObjects() { | 1822 void LargeObjectPage::poisonUnmarkedObjects() { |
1823 HeapObjectHeader* header = heapObjectHeader(); | 1823 HeapObjectHeader* header = heapObjectHeader(); |
1824 if (!header->isMarked()) | 1824 if (!header->isMarked()) |
1825 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1825 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
1826 } | 1826 } |
1827 #endif | 1827 #endif |
1828 | 1828 |
1829 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) { | 1829 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) { |
1830 ASSERT(contains(address)); | 1830 DCHECK(contains(address)); |
1831 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) | 1831 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) |
1832 return; | 1832 return; |
1833 markPointer(visitor, heapObjectHeader()); | 1833 markPointer(visitor, heapObjectHeader()); |
1834 } | 1834 } |
1835 | 1835 |
1836 void LargeObjectPage::markOrphaned() { | 1836 void LargeObjectPage::markOrphaned() { |
1837 // Zap the payload with a recognizable value to detect any incorrect | 1837 // Zap the payload with a recognizable value to detect any incorrect |
1838 // cross thread pointer usage. | 1838 // cross thread pointer usage. |
1839 OrphanedPagePool::asanDisabledMemset( | 1839 OrphanedPagePool::asanDisabledMemset( |
1840 payload(), OrphanedPagePool::orphanedZapValue, payloadSize()); | 1840 payload(), OrphanedPagePool::orphanedZapValue, payloadSize()); |
(...skipping 22 matching lines...) Expand all Loading... |
1863 info.deadCount[gcInfoIndex]++; | 1863 info.deadCount[gcInfoIndex]++; |
1864 info.deadSize[gcInfoIndex] += payloadSize; | 1864 info.deadSize[gcInfoIndex] += payloadSize; |
1865 } | 1865 } |
1866 | 1866 |
1867 pageDump->AddScalar("live_count", "objects", liveCount); | 1867 pageDump->AddScalar("live_count", "objects", liveCount); |
1868 pageDump->AddScalar("dead_count", "objects", deadCount); | 1868 pageDump->AddScalar("dead_count", "objects", deadCount); |
1869 pageDump->AddScalar("live_size", "bytes", liveSize); | 1869 pageDump->AddScalar("live_size", "bytes", liveSize); |
1870 pageDump->AddScalar("dead_size", "bytes", deadSize); | 1870 pageDump->AddScalar("dead_size", "bytes", deadSize); |
1871 } | 1871 } |
1872 | 1872 |
1873 #if ENABLE(ASSERT) | 1873 #if DCHECK_IS_ON() |
1874 bool LargeObjectPage::contains(Address object) { | 1874 bool LargeObjectPage::contains(Address object) { |
1875 return roundToBlinkPageStart(getAddress()) <= object && | 1875 return roundToBlinkPageStart(getAddress()) <= object && |
1876 object < roundToBlinkPageEnd(getAddress() + size()); | 1876 object < roundToBlinkPageEnd(getAddress() + size()); |
1877 } | 1877 } |
1878 #endif | 1878 #endif |
1879 | 1879 |
1880 void HeapDoesNotContainCache::flush() { | 1880 void HeapDoesNotContainCache::flush() { |
1881 if (m_hasEntries) { | 1881 if (m_hasEntries) { |
1882 for (int i = 0; i < numberOfEntries; ++i) | 1882 for (int i = 0; i < numberOfEntries; ++i) |
1883 m_entries[i] = nullptr; | 1883 m_entries[i] = nullptr; |
1884 m_hasEntries = false; | 1884 m_hasEntries = false; |
1885 } | 1885 } |
1886 } | 1886 } |
1887 | 1887 |
1888 size_t HeapDoesNotContainCache::hash(Address address) { | 1888 size_t HeapDoesNotContainCache::hash(Address address) { |
1889 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1889 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
1890 value ^= value >> numberOfEntriesLog2; | 1890 value ^= value >> numberOfEntriesLog2; |
1891 value ^= value >> (numberOfEntriesLog2 * 2); | 1891 value ^= value >> (numberOfEntriesLog2 * 2); |
1892 value &= numberOfEntries - 1; | 1892 value &= numberOfEntries - 1; |
1893 return value & ~1; // Returns only even number. | 1893 return value & ~1; // Returns only even number. |
1894 } | 1894 } |
1895 | 1895 |
1896 bool HeapDoesNotContainCache::lookup(Address address) { | 1896 bool HeapDoesNotContainCache::lookup(Address address) { |
1897 ASSERT(ThreadState::current()->isInGC()); | 1897 DCHECK(ThreadState::current()->isInGC()); |
1898 | 1898 |
1899 size_t index = hash(address); | 1899 size_t index = hash(address); |
1900 ASSERT(!(index & 1)); | 1900 DCHECK(!(index & 1)); |
1901 Address cachePage = roundToBlinkPageStart(address); | 1901 Address cachePage = roundToBlinkPageStart(address); |
1902 if (m_entries[index] == cachePage) | 1902 if (m_entries[index] == cachePage) |
1903 return m_entries[index]; | 1903 return m_entries[index]; |
1904 if (m_entries[index + 1] == cachePage) | 1904 if (m_entries[index + 1] == cachePage) |
1905 return m_entries[index + 1]; | 1905 return m_entries[index + 1]; |
1906 return false; | 1906 return false; |
1907 } | 1907 } |
1908 | 1908 |
1909 void HeapDoesNotContainCache::addEntry(Address address) { | 1909 void HeapDoesNotContainCache::addEntry(Address address) { |
1910 ASSERT(ThreadState::current()->isInGC()); | 1910 DCHECK(ThreadState::current()->isInGC()); |
1911 | 1911 |
1912 m_hasEntries = true; | 1912 m_hasEntries = true; |
1913 size_t index = hash(address); | 1913 size_t index = hash(address); |
1914 ASSERT(!(index & 1)); | 1914 DCHECK(!(index & 1)); |
1915 Address cachePage = roundToBlinkPageStart(address); | 1915 Address cachePage = roundToBlinkPageStart(address); |
1916 m_entries[index + 1] = m_entries[index]; | 1916 m_entries[index + 1] = m_entries[index]; |
1917 m_entries[index] = cachePage; | 1917 m_entries[index] = cachePage; |
1918 } | 1918 } |
1919 | 1919 |
1920 } // namespace blink | 1920 } // namespace blink |
OLD | NEW |