| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 14 matching lines...) Expand all Loading... |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #include "config.h" | 31 #include "config.h" |
| 32 #include "platform/heap/Heap.h" | 32 #include "platform/heap/Heap.h" |
| 33 | 33 |
| 34 #include "platform/ScriptForbiddenScope.h" | 34 #include "platform/ScriptForbiddenScope.h" |
| 35 #include "platform/Task.h" | |
| 36 #include "platform/TraceEvent.h" | 35 #include "platform/TraceEvent.h" |
| 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 38 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
| 39 #include "platform/heap/MarkingVisitor.h" | 38 #include "platform/heap/MarkingVisitor.h" |
| 40 #include "platform/heap/PageMemory.h" | 39 #include "platform/heap/PageMemory.h" |
| 41 #include "platform/heap/PagePool.h" | 40 #include "platform/heap/PagePool.h" |
| 42 #include "platform/heap/SafePoint.h" | 41 #include "platform/heap/SafePoint.h" |
| 43 #include "platform/heap/ThreadState.h" | 42 #include "platform/heap/ThreadState.h" |
| 44 #include "public/platform/Platform.h" | 43 #include "public/platform/Platform.h" |
| 45 #include "public/platform/WebMemoryAllocatorDump.h" | 44 #include "public/platform/WebMemoryAllocatorDump.h" |
| 46 #include "public/platform/WebProcessMemoryDump.h" | 45 #include "public/platform/WebProcessMemoryDump.h" |
| 47 #include "wtf/Assertions.h" | 46 #include "wtf/Assertions.h" |
| 48 #include "wtf/ContainerAnnotations.h" | |
| 49 #include "wtf/LeakAnnotations.h" | 47 #include "wtf/LeakAnnotations.h" |
| 50 #include "wtf/MainThread.h" | 48 #include "wtf/MainThread.h" |
| 51 #include "wtf/PageAllocator.h" | |
| 52 #include "wtf/Partitions.h" | 49 #include "wtf/Partitions.h" |
| 53 #include "wtf/PassOwnPtr.h" | 50 #include "wtf/PassOwnPtr.h" |
| 54 #if ENABLE(GC_PROFILING) | 51 #if ENABLE(GC_PROFILING) |
| 55 #include "platform/TracedValue.h" | 52 #include "platform/TracedValue.h" |
| 56 #include "wtf/HashMap.h" | 53 #include "wtf/HashMap.h" |
| 57 #include "wtf/HashSet.h" | 54 #include "wtf/HashSet.h" |
| 58 #include "wtf/text/StringBuilder.h" | 55 #include "wtf/text/StringBuilder.h" |
| 59 #include "wtf/text/StringHash.h" | 56 #include "wtf/text/StringHash.h" |
| 60 #include <stdio.h> | 57 #include <stdio.h> |
| 61 #include <utility> | 58 #include <utility> |
| 62 #endif | 59 #endif |
| 63 | 60 |
| 64 #if OS(POSIX) | |
| 65 #include <sys/mman.h> | |
| 66 #include <unistd.h> | |
| 67 #elif OS(WIN) | |
| 68 #include <windows.h> | |
| 69 #endif | |
| 70 | |
| 71 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | |
| 72 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. | |
| 73 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 | |
| 74 | |
| 75 // When finalizing a non-inlined vector backing store/container, remove | |
| 76 // its contiguous container annotation. Required as it will not be destructed | |
| 77 // from its Vector. | |
| 78 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize)
\ | |
| 79 do {
\ | |
| 80 BasePage* page = pageFromObject(object);
\ | |
| 81 ASSERT(page);
\ | |
| 82 bool isContainer = ThreadState::isVectorHeapIndex(page->heap()->heapInde
x()); \ | |
| 83 if (!isContainer && page->isLargeObjectPage())
\ | |
| 84 isContainer = static_cast<LargeObjectPage*>(page)->isVectorBackingPa
ge(); \ | |
| 85 if (isContainer)
\ | |
| 86 ANNOTATE_DELETE_BUFFER(object, objectSize, 0);
\ | |
| 87 } while (0) | |
| 88 | |
| 89 // A vector backing store represented by a large object is marked | |
| 90 // so that when it is finalized, its ASan annotation will be | |
| 91 // correctly retired. | |
| 92 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) \ | |
| 93 if (ThreadState::isVectorHeapIndex(heap->heapIndex())) { \ | |
| 94 BasePage* largePage = pageFromObject(largeObject); \ | |
| 95 ASSERT(largePage->isLargeObjectPage()); \ | |
| 96 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ | |
| 97 } | |
| 98 #else | |
| 99 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | |
| 100 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | |
| 101 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) | |
| 102 #endif | |
| 103 | |
| 104 namespace blink { | 61 namespace blink { |
| 105 | 62 |
| 106 #if ENABLE(GC_PROFILING) | |
| 107 static String classOf(const void* object) | |
| 108 { | |
| 109 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_
cast<void*>(object)))) | |
| 110 return gcInfo->m_className; | |
| 111 return "unknown"; | |
| 112 } | |
| 113 #endif | |
| 114 | |
| 115 class GCForbiddenScope final { | 63 class GCForbiddenScope final { |
| 116 public: | 64 public: |
| 117 explicit GCForbiddenScope(ThreadState* state) | 65 explicit GCForbiddenScope(ThreadState* state) |
| 118 : m_state(state) | 66 : m_state(state) |
| 119 { | 67 { |
| 120 // Prevent nested collectGarbage() invocations. | 68 // Prevent nested collectGarbage() invocations. |
| 121 m_state->enterGCForbiddenScope(); | 69 m_state->enterGCForbiddenScope(); |
| 122 } | 70 } |
| 123 | 71 |
| 124 ~GCForbiddenScope() | 72 ~GCForbiddenScope() |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 190 // before entering the safe point scope. Prior to reaching the safe point, | 138 // before entering the safe point scope. Prior to reaching the safe point, |
| 191 // ThreadState::runScheduledGC() is called. See its comment why we need | 139 // ThreadState::runScheduledGC() is called. See its comment why we need |
| 192 // to be in a GC forbidden scope when doing so. | 140 // to be in a GC forbidden scope when doing so. |
| 193 GCForbiddenScope m_gcForbiddenScope; | 141 GCForbiddenScope m_gcForbiddenScope; |
| 194 SafePointScope m_safePointScope; | 142 SafePointScope m_safePointScope; |
| 195 ThreadState::GCType m_gcType; | 143 ThreadState::GCType m_gcType; |
| 196 OwnPtr<Visitor> m_visitor; | 144 OwnPtr<Visitor> m_visitor; |
| 197 bool m_parkedAllThreads; // False if we fail to park all threads | 145 bool m_parkedAllThreads; // False if we fail to park all threads |
| 198 }; | 146 }; |
| 199 | 147 |
| 200 #if ENABLE(ASSERT) | |
| 201 NO_SANITIZE_ADDRESS | |
| 202 void HeapObjectHeader::zapMagic() | |
| 203 { | |
| 204 ASSERT(checkHeader()); | |
| 205 m_magic = zappedMagic; | |
| 206 } | |
| 207 #endif | |
| 208 | |
| 209 void HeapObjectHeader::finalize(Address object, size_t objectSize) | |
| 210 { | |
| 211 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); | |
| 212 if (gcInfo->hasFinalizer()) | |
| 213 gcInfo->m_finalize(object); | |
| 214 | |
| 215 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); | |
| 216 } | |
| 217 | |
| 218 BaseHeap::BaseHeap(ThreadState* state, int index) | |
| 219 : m_firstPage(nullptr) | |
| 220 , m_firstUnsweptPage(nullptr) | |
| 221 , m_threadState(state) | |
| 222 , m_index(index) | |
| 223 { | |
| 224 } | |
| 225 | |
| 226 BaseHeap::~BaseHeap() | |
| 227 { | |
| 228 ASSERT(!m_firstPage); | |
| 229 ASSERT(!m_firstUnsweptPage); | |
| 230 } | |
| 231 | |
| 232 void BaseHeap::cleanupPages() | |
| 233 { | |
| 234 clearFreeLists(); | |
| 235 | |
| 236 ASSERT(!m_firstUnsweptPage); | |
| 237 // Add the BaseHeap's pages to the orphanedPagePool. | |
| 238 for (BasePage* page = m_firstPage; page; page = page->next()) { | |
| 239 Heap::decreaseAllocatedSpace(page->size()); | |
| 240 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | |
| 241 } | |
| 242 m_firstPage = nullptr; | |
| 243 } | |
| 244 | |
| 245 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI
nfo& info) | |
| 246 { | |
| 247 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" | |
| 248 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); | |
| 249 size_t pageIndex = 0; | |
| 250 size_t heapTotalFreeSize = 0; | |
| 251 size_t heapTotalFreeCount = 0; | |
| 252 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | |
| 253 size_t heapPageFreeSize = 0; | |
| 254 size_t heapPageFreeCount = 0; | |
| 255 page->takeSnapshot(dumpBaseName, pageIndex, info, &heapPageFreeSize, &he
apPageFreeCount); | |
| 256 heapTotalFreeSize += heapPageFreeSize; | |
| 257 heapTotalFreeCount += heapPageFreeCount; | |
| 258 pageIndex++; | |
| 259 } | |
| 260 allocatorDump->AddScalar("blink_page_count", "objects", pageIndex); | |
| 261 | |
| 262 // When taking a full dump (w/ freelist), both the /buckets and /pages | |
| 263 // report their free size but they are not meant to be added together. | |
| 264 // Therefore, here we override the free_size of the parent heap to be | |
| 265 // equal to the free_size of the sum of its heap pages. | |
| 266 allocatorDump->AddScalar("free_size", "bytes", heapTotalFreeSize); | |
| 267 allocatorDump->AddScalar("free_count", "objects", heapTotalFreeCount); | |
| 268 } | |
| 269 | |
| 270 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
| 271 BasePage* BaseHeap::findPageFromAddress(Address address) | |
| 272 { | |
| 273 for (BasePage* page = m_firstPage; page; page = page->next()) { | |
| 274 if (page->contains(address)) | |
| 275 return page; | |
| 276 } | |
| 277 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | |
| 278 if (page->contains(address)) | |
| 279 return page; | |
| 280 } | |
| 281 return nullptr; | |
| 282 } | |
| 283 #endif | |
| 284 | |
| 285 #if ENABLE(GC_PROFILING) | |
| 286 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | |
| 287 void BaseHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | |
| 288 { | |
| 289 ASSERT(isConsistentForGC()); | |
| 290 size_t previousPageCount = info->pageCount; | |
| 291 | |
| 292 json->beginArray("pages"); | |
| 293 for (BasePage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { | |
| 294 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. | |
| 295 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { | |
| 296 json->beginArray(); | |
| 297 json->pushInteger(reinterpret_cast<intptr_t>(page)); | |
| 298 page->snapshot(json, info); | |
| 299 json->endArray(); | |
| 300 } else { | |
| 301 page->snapshot(nullptr, info); | |
| 302 } | |
| 303 } | |
| 304 json->endArray(); | |
| 305 | |
| 306 json->setInteger("pageCount", info->pageCount - previousPageCount); | |
| 307 } | |
| 308 | |
| 309 void BaseHeap::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) const | |
| 310 { | |
| 311 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 312 page->countMarkedObjects(classAgeCounts); | |
| 313 } | |
| 314 | |
| 315 void BaseHeap::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) const | |
| 316 { | |
| 317 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 318 page->countObjectsToSweep(classAgeCounts); | |
| 319 } | |
| 320 | |
| 321 void BaseHeap::incrementMarkedObjectsAge() | |
| 322 { | |
| 323 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 324 page->incrementMarkedObjectsAge(); | |
| 325 } | |
| 326 #endif | |
| 327 | |
| 328 void BaseHeap::makeConsistentForGC() | |
| 329 { | |
| 330 clearFreeLists(); | |
| 331 ASSERT(isConsistentForGC()); | |
| 332 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 333 page->markAsUnswept(); | |
| 334 | |
| 335 // If a new GC is requested before this thread got around to sweep, | |
| 336 // ie. due to the thread doing a long running operation, we clear | |
| 337 // the mark bits and mark any of the dead objects as dead. The latter | |
| 338 // is used to ensure the next GC marking does not trace already dead | |
| 339 // objects. If we trace a dead object we could end up tracing into | |
| 340 // garbage or the middle of another object via the newly conservatively | |
| 341 // found object. | |
| 342 BasePage* previousPage = nullptr; | |
| 343 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { | |
| 344 page->makeConsistentForGC(); | |
| 345 ASSERT(!page->hasBeenSwept()); | |
| 346 } | |
| 347 if (previousPage) { | |
| 348 ASSERT(m_firstUnsweptPage); | |
| 349 previousPage->m_next = m_firstPage; | |
| 350 m_firstPage = m_firstUnsweptPage; | |
| 351 m_firstUnsweptPage = nullptr; | |
| 352 } | |
| 353 ASSERT(!m_firstUnsweptPage); | |
| 354 } | |
| 355 | |
| 356 void BaseHeap::makeConsistentForMutator() | |
| 357 { | |
| 358 clearFreeLists(); | |
| 359 ASSERT(isConsistentForGC()); | |
| 360 ASSERT(!m_firstPage); | |
| 361 | |
| 362 // Drop marks from marked objects and rebuild free lists in preparation for | |
| 363 // resuming the executions of mutators. | |
| 364 BasePage* previousPage = nullptr; | |
| 365 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { | |
| 366 page->makeConsistentForMutator(); | |
| 367 page->markAsSwept(); | |
| 368 } | |
| 369 if (previousPage) { | |
| 370 ASSERT(m_firstUnsweptPage); | |
| 371 previousPage->m_next = m_firstPage; | |
| 372 m_firstPage = m_firstUnsweptPage; | |
| 373 m_firstUnsweptPage = nullptr; | |
| 374 } | |
| 375 ASSERT(!m_firstUnsweptPage); | |
| 376 } | |
| 377 | |
| 378 size_t BaseHeap::objectPayloadSizeForTesting() | |
| 379 { | |
| 380 ASSERT(isConsistentForGC()); | |
| 381 ASSERT(!m_firstUnsweptPage); | |
| 382 | |
| 383 size_t objectPayloadSize = 0; | |
| 384 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 385 objectPayloadSize += page->objectPayloadSizeForTesting(); | |
| 386 return objectPayloadSize; | |
| 387 } | |
| 388 | |
| 389 void BaseHeap::prepareHeapForTermination() | |
| 390 { | |
| 391 ASSERT(!m_firstUnsweptPage); | |
| 392 for (BasePage* page = m_firstPage; page; page = page->next()) { | |
| 393 page->setTerminating(); | |
| 394 } | |
| 395 } | |
| 396 | |
| 397 void BaseHeap::prepareForSweep() | |
| 398 { | |
| 399 ASSERT(threadState()->isInGC()); | |
| 400 ASSERT(!m_firstUnsweptPage); | |
| 401 | |
| 402 // Move all pages to a list of unswept pages. | |
| 403 m_firstUnsweptPage = m_firstPage; | |
| 404 m_firstPage = nullptr; | |
| 405 } | |
| 406 | |
| 407 #if defined(ADDRESS_SANITIZER) | |
| 408 void BaseHeap::poisonHeap(ThreadState::ObjectsToPoison objectsToPoison, ThreadSt
ate::Poisoning poisoning) | |
| 409 { | |
| 410 // TODO(sof): support complete poisoning of all heaps. | |
| 411 ASSERT(objectsToPoison != ThreadState::MarkedAndUnmarked || heapIndex() == T
hreadState::EagerSweepHeapIndex); | |
| 412 | |
| 413 // This method may either be called to poison (SetPoison) heap | |
| 414 // object payloads prior to sweeping, or it may be called at | |
| 415 // the completion of a sweep to unpoison (ClearPoison) the | |
| 416 // objects remaining in the heap. Those will all be live and unmarked. | |
| 417 // | |
| 418 // Poisoning may be limited to unmarked objects only, or apply to all. | |
| 419 if (poisoning == ThreadState::SetPoison) { | |
| 420 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) | |
| 421 page->poisonObjects(objectsToPoison, poisoning); | |
| 422 return; | |
| 423 } | |
| 424 // Support clearing of poisoning after sweeping has completed, | |
| 425 // in which case the pages of the live objects are reachable | |
| 426 // via m_firstPage. | |
| 427 ASSERT(!m_firstUnsweptPage); | |
| 428 for (BasePage* page = m_firstPage; page; page = page->next()) | |
| 429 page->poisonObjects(objectsToPoison, poisoning); | |
| 430 } | |
| 431 #endif | |
| 432 | |
| 433 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) | |
| 434 { | |
| 435 // If there are no pages to be swept, return immediately. | |
| 436 if (!m_firstUnsweptPage) | |
| 437 return nullptr; | |
| 438 | |
| 439 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | |
| 440 | |
| 441 // lazySweepPages() can be called recursively if finalizers invoked in | |
| 442 // page->sweep() allocate memory and the allocation triggers | |
| 443 // lazySweepPages(). This check prevents the sweeping from being executed | |
| 444 // recursively. | |
| 445 if (threadState()->sweepForbidden()) | |
| 446 return nullptr; | |
| 447 | |
| 448 TRACE_EVENT0("blink_gc", "BaseHeap::lazySweepPages"); | |
| 449 ThreadState::SweepForbiddenScope scope(threadState()); | |
| 450 | |
| 451 if (threadState()->isMainThread()) | |
| 452 ScriptForbiddenScope::enter(); | |
| 453 | |
| 454 Address result = lazySweepPages(allocationSize, gcInfoIndex); | |
| 455 | |
| 456 if (threadState()->isMainThread()) | |
| 457 ScriptForbiddenScope::exit(); | |
| 458 | |
| 459 Heap::reportMemoryUsageForTracing(); | |
| 460 | |
| 461 return result; | |
| 462 } | |
| 463 | |
| 464 void BaseHeap::sweepUnsweptPage() | |
| 465 { | |
| 466 BasePage* page = m_firstUnsweptPage; | |
| 467 if (page->isEmpty()) { | |
| 468 page->unlink(&m_firstUnsweptPage); | |
| 469 page->removeFromHeap(); | |
| 470 } else { | |
| 471 // Sweep a page and move the page from m_firstUnsweptPages to | |
| 472 // m_firstPages. | |
| 473 page->sweep(); | |
| 474 page->unlink(&m_firstUnsweptPage); | |
| 475 page->link(&m_firstPage); | |
| 476 page->markAsSwept(); | |
| 477 } | |
| 478 } | |
| 479 | |
| 480 bool BaseHeap::lazySweepWithDeadline(double deadlineSeconds) | |
| 481 { | |
| 482 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim
e() | |
| 483 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check | |
| 484 // the deadline per 10 pages. | |
| 485 static const int deadlineCheckInterval = 10; | |
| 486 | |
| 487 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | |
| 488 ASSERT(threadState()->sweepForbidden()); | |
| 489 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi
dden()); | |
| 490 | |
| 491 int pageCount = 1; | |
| 492 while (m_firstUnsweptPage) { | |
| 493 sweepUnsweptPage(); | |
| 494 if (pageCount % deadlineCheckInterval == 0) { | |
| 495 if (deadlineSeconds <= Platform::current()->monotonicallyIncreasingT
ime()) { | |
| 496 // Deadline has come. | |
| 497 Heap::reportMemoryUsageForTracing(); | |
| 498 return !m_firstUnsweptPage; | |
| 499 } | |
| 500 } | |
| 501 pageCount++; | |
| 502 } | |
| 503 Heap::reportMemoryUsageForTracing(); | |
| 504 return true; | |
| 505 } | |
| 506 | |
| 507 void BaseHeap::completeSweep() | |
| 508 { | |
| 509 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | |
| 510 ASSERT(threadState()->sweepForbidden()); | |
| 511 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi
dden()); | |
| 512 | |
| 513 while (m_firstUnsweptPage) { | |
| 514 sweepUnsweptPage(); | |
| 515 } | |
| 516 | |
| 517 Heap::reportMemoryUsageForTracing(); | |
| 518 } | |
| 519 | |
| 520 NormalPageHeap::NormalPageHeap(ThreadState* state, int index) | |
| 521 : BaseHeap(state, index) | |
| 522 , m_currentAllocationPoint(nullptr) | |
| 523 , m_remainingAllocationSize(0) | |
| 524 , m_lastRemainingAllocationSize(0) | |
| 525 , m_promptlyFreedSize(0) | |
| 526 #if ENABLE(GC_PROFILING) | |
| 527 , m_cumulativeAllocationSize(0) | |
| 528 , m_allocationCount(0) | |
| 529 , m_inlineAllocationCount(0) | |
| 530 #endif | |
| 531 { | |
| 532 clearFreeLists(); | |
| 533 } | |
| 534 | |
| 535 void NormalPageHeap::clearFreeLists() | |
| 536 { | |
| 537 setAllocationPoint(nullptr, 0); | |
| 538 m_freeList.clear(); | |
| 539 } | |
| 540 | |
| 541 #if ENABLE(ASSERT) | |
| 542 bool NormalPageHeap::isConsistentForGC() | |
| 543 { | |
| 544 // A thread heap is consistent for sweeping if none of the pages to be swept | |
| 545 // contain a freelist block or the current allocation point. | |
| 546 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | |
| 547 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { | |
| 548 if (pagesToBeSweptContains(freeListEntry->address())) | |
| 549 return false; | |
| 550 } | |
| 551 } | |
| 552 if (hasCurrentAllocationArea()) { | |
| 553 if (pagesToBeSweptContains(currentAllocationPoint())) | |
| 554 return false; | |
| 555 } | |
| 556 return true; | |
| 557 } | |
| 558 | |
| 559 bool NormalPageHeap::pagesToBeSweptContains(Address address) | |
| 560 { | |
| 561 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { | |
| 562 if (page->contains(address)) | |
| 563 return true; | |
| 564 } | |
| 565 return false; | |
| 566 } | |
| 567 #endif | |
| 568 | |
| 569 void NormalPageHeap::takeFreelistSnapshot(const String& dumpName) | |
| 570 { | |
| 571 if (m_freeList.takeSnapshot(dumpName)) { | |
| 572 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc
e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | |
| 573 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | |
| 574 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOw
nershipEdge(pagesDump->guid(), bucketsDump->guid()); | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 #if ENABLE(GC_PROFILING) | |
| 579 void NormalPageHeap::snapshotFreeList(TracedValue& json) | |
| 580 { | |
| 581 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); | |
| 582 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio
nCount) / m_allocationCount); | |
| 583 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); | |
| 584 json.setInteger("allocationCount", m_allocationCount); | |
| 585 size_t pageCount = 0; | |
| 586 size_t totalPageSize = 0; | |
| 587 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page =
static_cast<NormalPage*>(page->next())) { | |
| 588 ++pageCount; | |
| 589 totalPageSize += page->payloadSize(); | |
| 590 } | |
| 591 json.setInteger("pageCount", pageCount); | |
| 592 json.setInteger("totalPageSize", totalPageSize); | |
| 593 | |
| 594 FreeList::PerBucketFreeListStats bucketStats[blinkPageSizeLog2]; | |
| 595 size_t totalFreeSize; | |
| 596 m_freeList.getFreeSizeStats(bucketStats, totalFreeSize); | |
| 597 json.setInteger("totalFreeSize", totalFreeSize); | |
| 598 | |
| 599 json.beginArray("perBucketEntryCount"); | |
| 600 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | |
| 601 json.pushInteger(bucketStats[i].entryCount); | |
| 602 json.endArray(); | |
| 603 | |
| 604 json.beginArray("perBucketFreeSize"); | |
| 605 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | |
| 606 json.pushInteger(bucketStats[i].freeSize); | |
| 607 json.endArray(); | |
| 608 } | |
| 609 #endif | |
| 610 | |
| 611 void NormalPageHeap::allocatePage() | |
| 612 { | |
| 613 threadState()->shouldFlushHeapDoesNotContainCache(); | |
| 614 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); | |
| 615 // We continue allocating page memory until we succeed in committing one. | |
| 616 while (!pageMemory) { | |
| 617 // Allocate a memory region for blinkPagesPerRegion pages that | |
| 618 // will each have the following layout. | |
| 619 // | |
| 620 // [ guard os page | ... payload ... | guard os page ] | |
| 621 // ^---{ aligned to blink page size } | |
| 622 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); | |
| 623 | |
| 624 // Setup the PageMemory object for each of the pages in the region. | |
| 625 size_t offset = 0; | |
| 626 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | |
| 627 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off
set, blinkPagePayloadSize()); | |
| 628 // Take the first possible page ensuring that this thread actually | |
| 629 // gets a page and add the rest to the page pool. | |
| 630 if (!pageMemory) { | |
| 631 if (memory->commit()) | |
| 632 pageMemory = memory; | |
| 633 else | |
| 634 delete memory; | |
| 635 } else { | |
| 636 Heap::freePagePool()->addFreePage(heapIndex(), memory); | |
| 637 } | |
| 638 offset += blinkPageSize; | |
| 639 } | |
| 640 } | |
| 641 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); | |
| 642 page->link(&m_firstPage); | |
| 643 | |
| 644 Heap::increaseAllocatedSpace(page->size()); | |
| 645 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | |
| 646 // Allow the following addToFreeList() to add the newly allocated memory | |
| 647 // to the free list. | |
| 648 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | |
| 649 Address address = page->payload(); | |
| 650 for (size_t i = 0; i < page->payloadSize(); i++) | |
| 651 address[i] = reuseAllowedZapValue; | |
| 652 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); | |
| 653 #endif | |
| 654 addToFreeList(page->payload(), page->payloadSize()); | |
| 655 } | |
| 656 | |
| 657 void NormalPageHeap::freePage(NormalPage* page) | |
| 658 { | |
| 659 Heap::decreaseAllocatedSpace(page->size()); | |
| 660 | |
| 661 if (page->terminating()) { | |
| 662 // The thread is shutting down and this page is being removed as a part | |
| 663 // of the thread local GC. In that case the object could be traced in | |
| 664 // the next global GC if there is a dangling pointer from a live thread | |
| 665 // heap to this dead thread heap. To guard against this, we put the | |
| 666 // page into the orphaned page pool and zap the page memory. This | |
| 667 // ensures that tracing the dangling pointer in the next global GC just | |
| 668 // crashes instead of causing use-after-frees. After the next global | |
| 669 // GC, the orphaned pages are removed. | |
| 670 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | |
| 671 } else { | |
| 672 PageMemory* memory = page->storage(); | |
| 673 page->~NormalPage(); | |
| 674 Heap::freePagePool()->addFreePage(heapIndex(), memory); | |
| 675 } | |
| 676 } | |
| 677 | |
| 678 bool NormalPageHeap::coalesce() | |
| 679 { | |
| 680 // Don't coalesce heaps if there are not enough promptly freed entries | |
| 681 // to be coalesced. | |
| 682 // | |
| 683 // FIXME: This threshold is determined just to optimize blink_perf | |
| 684 // benchmarks. Coalescing is very sensitive to the threashold and | |
| 685 // we need further investigations on the coalescing scheme. | |
| 686 if (m_promptlyFreedSize < 1024 * 1024) | |
| 687 return false; | |
| 688 | |
| 689 if (threadState()->sweepForbidden()) | |
| 690 return false; | |
| 691 | |
| 692 ASSERT(!hasCurrentAllocationArea()); | |
| 693 TRACE_EVENT0("blink_gc", "BaseHeap::coalesce"); | |
| 694 | |
| 695 // Rebuild free lists. | |
| 696 m_freeList.clear(); | |
| 697 size_t freedSize = 0; | |
| 698 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page =
static_cast<NormalPage*>(page->next())) { | |
| 699 page->clearObjectStartBitMap(); | |
| 700 Address startOfGap = page->payload(); | |
| 701 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn
d(); ) { | |
| 702 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade
rAddress); | |
| 703 size_t size = header->size(); | |
| 704 ASSERT(size > 0); | |
| 705 ASSERT(size < blinkPagePayloadSize()); | |
| 706 | |
| 707 if (header->isPromptlyFreed()) { | |
| 708 ASSERT(size >= sizeof(HeapObjectHeader)); | |
| 709 // Zero the memory in the free list header to maintain the | |
| 710 // invariant that memory on the free list is zero filled. | |
| 711 // The rest of the memory is already on the free list and is | |
| 712 // therefore already zero filled. | |
| 713 SET_MEMORY_INACCESSIBLE(headerAddress, sizeof(HeapObjectHeader))
; | |
| 714 freedSize += size; | |
| 715 headerAddress += size; | |
| 716 continue; | |
| 717 } | |
| 718 if (header->isFree()) { | |
| 719 // Zero the memory in the free list header to maintain the | |
| 720 // invariant that memory on the free list is zero filled. | |
| 721 // The rest of the memory is already on the free list and is | |
| 722 // therefore already zero filled. | |
| 723 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEnt
ry) ? size : sizeof(FreeListEntry)); | |
| 724 headerAddress += size; | |
| 725 continue; | |
| 726 } | |
| 727 ASSERT(header->checkHeader()); | |
| 728 if (startOfGap != headerAddress) | |
| 729 addToFreeList(startOfGap, headerAddress - startOfGap); | |
| 730 | |
| 731 headerAddress += size; | |
| 732 startOfGap = headerAddress; | |
| 733 } | |
| 734 | |
| 735 if (startOfGap != page->payloadEnd()) | |
| 736 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | |
| 737 } | |
| 738 Heap::decreaseAllocatedObjectSize(freedSize); | |
| 739 ASSERT(m_promptlyFreedSize == freedSize); | |
| 740 m_promptlyFreedSize = 0; | |
| 741 return true; | |
| 742 } | |
| 743 | |
| 744 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) | |
| 745 { | |
| 746 ASSERT(!threadState()->sweepForbidden()); | |
| 747 ASSERT(header->checkHeader()); | |
| 748 Address address = reinterpret_cast<Address>(header); | |
| 749 Address payload = header->payload(); | |
| 750 size_t size = header->size(); | |
| 751 size_t payloadSize = header->payloadSize(); | |
| 752 ASSERT(size > 0); | |
| 753 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | |
| 754 | |
| 755 { | |
| 756 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); | |
| 757 header->finalize(payload, payloadSize); | |
| 758 if (address + size == m_currentAllocationPoint) { | |
| 759 m_currentAllocationPoint = address; | |
| 760 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) { | |
| 761 Heap::decreaseAllocatedObjectSize(size); | |
| 762 m_lastRemainingAllocationSize += size; | |
| 763 } | |
| 764 m_remainingAllocationSize += size; | |
| 765 SET_MEMORY_INACCESSIBLE(address, size); | |
| 766 return; | |
| 767 } | |
| 768 SET_MEMORY_INACCESSIBLE(payload, payloadSize); | |
| 769 header->markPromptlyFreed(); | |
| 770 } | |
| 771 | |
| 772 m_promptlyFreedSize += size; | |
| 773 } | |
| 774 | |
| 775 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize) | |
| 776 { | |
| 777 // It's possible that Vector requests a smaller expanded size because | |
| 778 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | |
| 779 // size. | |
| 780 ASSERT(header->checkHeader()); | |
| 781 if (header->payloadSize() >= newSize) | |
| 782 return true; | |
| 783 size_t allocationSize = Heap::allocationSizeFromSize(newSize); | |
| 784 ASSERT(allocationSize > header->size()); | |
| 785 size_t expandSize = allocationSize - header->size(); | |
| 786 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema
iningAllocationSize) { | |
| 787 m_currentAllocationPoint += expandSize; | |
| 788 m_remainingAllocationSize -= expandSize; | |
| 789 | |
| 790 // Unpoison the memory used for the object (payload). | |
| 791 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | |
| 792 header->setSize(allocationSize); | |
| 793 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | |
| 794 return true; | |
| 795 } | |
| 796 return false; | |
| 797 } | |
| 798 | |
| 799 bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) | |
| 800 { | |
| 801 ASSERT(header->checkHeader()); | |
| 802 ASSERT(header->payloadSize() > newSize); | |
| 803 size_t allocationSize = Heap::allocationSizeFromSize(newSize); | |
| 804 ASSERT(header->size() > allocationSize); | |
| 805 size_t shrinkSize = header->size() - allocationSize; | |
| 806 if (header->payloadEnd() == m_currentAllocationPoint) { | |
| 807 m_currentAllocationPoint -= shrinkSize; | |
| 808 m_remainingAllocationSize += shrinkSize; | |
| 809 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | |
| 810 header->setSize(allocationSize); | |
| 811 return true; | |
| 812 } | |
| 813 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | |
| 814 ASSERT(header->gcInfoIndex() > 0); | |
| 815 Address shrinkAddress = header->payloadEnd() - shrinkSize; | |
| 816 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade
r(shrinkSize, header->gcInfoIndex()); | |
| 817 freedHeader->markPromptlyFreed(); | |
| 818 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr
ess(reinterpret_cast<Address>(header))); | |
| 819 m_promptlyFreedSize += shrinkSize; | |
| 820 header->setSize(allocationSize); | |
| 821 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize
- sizeof(HeapObjectHeader)); | |
| 822 return false; | |
| 823 } | |
| 824 | |
| 825 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex
) | |
| 826 { | |
| 827 ASSERT(!hasCurrentAllocationArea()); | |
| 828 Address result = nullptr; | |
| 829 while (m_firstUnsweptPage) { | |
| 830 BasePage* page = m_firstUnsweptPage; | |
| 831 if (page->isEmpty()) { | |
| 832 page->unlink(&m_firstUnsweptPage); | |
| 833 page->removeFromHeap(); | |
| 834 } else { | |
| 835 // Sweep a page and move the page from m_firstUnsweptPages to | |
| 836 // m_firstPages. | |
| 837 page->sweep(); | |
| 838 page->unlink(&m_firstUnsweptPage); | |
| 839 page->link(&m_firstPage); | |
| 840 page->markAsSwept(); | |
| 841 | |
| 842 // For NormalPage, stop lazy sweeping once we find a slot to | |
| 843 // allocate a new object. | |
| 844 result = allocateFromFreeList(allocationSize, gcInfoIndex); | |
| 845 if (result) | |
| 846 break; | |
| 847 } | |
| 848 } | |
| 849 return result; | |
| 850 } | |
| 851 | |
| 852 void NormalPageHeap::updateRemainingAllocationSize() | |
| 853 { | |
| 854 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | |
| 855 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); | |
| 856 m_lastRemainingAllocationSize = remainingAllocationSize(); | |
| 857 } | |
| 858 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | |
| 859 } | |
| 860 | |
| 861 void NormalPageHeap::setAllocationPoint(Address point, size_t size) | |
| 862 { | |
| 863 #if ENABLE(ASSERT) | |
| 864 if (point) { | |
| 865 ASSERT(size); | |
| 866 BasePage* page = pageFromObject(point); | |
| 867 ASSERT(!page->isLargeObjectPage()); | |
| 868 ASSERT(size <= static_cast<NormalPage*>(page)->payloadSize()); | |
| 869 } | |
| 870 #endif | |
| 871 if (hasCurrentAllocationArea()) { | |
| 872 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | |
| 873 } | |
| 874 updateRemainingAllocationSize(); | |
| 875 m_currentAllocationPoint = point; | |
| 876 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | |
| 877 } | |
| 878 | |
| 879 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn
dex) | |
| 880 { | |
| 881 ASSERT(allocationSize > remainingAllocationSize()); | |
| 882 ASSERT(allocationSize >= allocationGranularity); | |
| 883 | |
| 884 #if ENABLE(GC_PROFILING) | |
| 885 threadState()->snapshotFreeListIfNecessary(); | |
| 886 #endif | |
| 887 | |
| 888 // Ideally we want to update the persistent count every time a persistent | |
| 889 // handle is created or destructed, but that is heavy. So we do the update | |
| 890 // only in outOfLineAllocate(). | |
| 891 threadState()->updatePersistentCounters(); | |
| 892 | |
| 893 // 1. If this allocation is big enough, allocate a large object. | |
| 894 if (allocationSize >= largeObjectSizeThreshold) { | |
| 895 // TODO(sof): support eagerly finalized large objects, if ever needed. | |
| 896 RELEASE_ASSERT(heapIndex() != ThreadState::EagerSweepHeapIndex); | |
| 897 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS
tate()->heap(ThreadState::LargeObjectHeapIndex)); | |
| 898 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio
nSize, gcInfoIndex); | |
| 899 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); | |
| 900 return largeObject; | |
| 901 } | |
| 902 | |
| 903 // 2. Try to allocate from a free list. | |
| 904 updateRemainingAllocationSize(); | |
| 905 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | |
| 906 if (result) | |
| 907 return result; | |
| 908 | |
| 909 // 3. Reset the allocation point. | |
| 910 setAllocationPoint(nullptr, 0); | |
| 911 | |
| 912 // 4. Lazily sweep pages of this heap until we find a freed area for | |
| 913 // this allocation or we finish sweeping all pages of this heap. | |
| 914 result = lazySweep(allocationSize, gcInfoIndex); | |
| 915 if (result) | |
| 916 return result; | |
| 917 | |
| 918 // 5. Coalesce promptly freed areas and then try to allocate from a free | |
| 919 // list. | |
| 920 if (coalesce()) { | |
| 921 result = allocateFromFreeList(allocationSize, gcInfoIndex); | |
| 922 if (result) | |
| 923 return result; | |
| 924 } | |
| 925 | |
| 926 // 6. Complete sweeping. | |
| 927 threadState()->completeSweep(); | |
| 928 | |
| 929 // 7. Check if we should trigger a GC. | |
| 930 threadState()->scheduleGCIfNeeded(); | |
| 931 | |
| 932 // 8. Add a new page to this heap. | |
| 933 allocatePage(); | |
| 934 | |
| 935 // 9. Try to allocate from a free list. This allocation must succeed. | |
| 936 result = allocateFromFreeList(allocationSize, gcInfoIndex); | |
| 937 RELEASE_ASSERT(result); | |
| 938 return result; | |
| 939 } | |
| 940 | |
| 941 Address NormalPageHeap::allocateFromFreeList(size_t allocationSize, size_t gcInf
oIndex) | |
| 942 { | |
| 943 // Try reusing a block from the largest bin. The underlying reasoning | |
| 944 // being that we want to amortize this slow allocation call by carving | |
| 945 // off as a large a free block as possible in one go; a block that will | |
| 946 // service this block and let following allocations be serviced quickly | |
| 947 // by bump allocation. | |
| 948 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; | |
| 949 int index = m_freeList.m_biggestFreeListIndex; | |
| 950 for (; index > 0; --index, bucketSize >>= 1) { | |
| 951 FreeListEntry* entry = m_freeList.m_freeLists[index]; | |
| 952 if (allocationSize > bucketSize) { | |
| 953 // Final bucket candidate; check initial entry if it is able | |
| 954 // to service this allocation. Do not perform a linear scan, | |
| 955 // as it is considered too costly. | |
| 956 if (!entry || entry->size() < allocationSize) | |
| 957 break; | |
| 958 } | |
| 959 if (entry) { | |
| 960 entry->unlink(&m_freeList.m_freeLists[index]); | |
| 961 setAllocationPoint(entry->address(), entry->size()); | |
| 962 ASSERT(hasCurrentAllocationArea()); | |
| 963 ASSERT(remainingAllocationSize() >= allocationSize); | |
| 964 m_freeList.m_biggestFreeListIndex = index; | |
| 965 return allocateObject(allocationSize, gcInfoIndex); | |
| 966 } | |
| 967 } | |
| 968 m_freeList.m_biggestFreeListIndex = index; | |
| 969 return nullptr; | |
| 970 } | |
| 971 | |
| 972 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) | |
| 973 : BaseHeap(state, index) | |
| 974 { | |
| 975 } | |
| 976 | |
| 977 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g
cInfoIndex) | |
| 978 { | |
| 979 // Caller already added space for object header and rounded up to allocation | |
| 980 // alignment | |
| 981 ASSERT(!(allocationSize & allocationMask)); | |
| 982 | |
| 983 // 1. Try to sweep large objects more than allocationSize bytes | |
| 984 // before allocating a new large object. | |
| 985 Address result = lazySweep(allocationSize, gcInfoIndex); | |
| 986 if (result) | |
| 987 return result; | |
| 988 | |
| 989 // 2. If we have failed in sweeping allocationSize bytes, | |
| 990 // we complete sweeping before allocating this large object. | |
| 991 threadState()->completeSweep(); | |
| 992 | |
| 993 // 3. Check if we should trigger a GC. | |
| 994 threadState()->scheduleGCIfNeeded(); | |
| 995 | |
| 996 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | |
| 997 } | |
| 998 | |
| 999 Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t
gcInfoIndex) | |
| 1000 { | |
| 1001 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; | |
| 1002 // If ASan is supported we add allocationGranularity bytes to the allocated | |
| 1003 // space and poison that to detect overflows | |
| 1004 #if defined(ADDRESS_SANITIZER) | |
| 1005 largeObjectSize += allocationGranularity; | |
| 1006 #endif | |
| 1007 | |
| 1008 threadState()->shouldFlushHeapDoesNotContainCache(); | |
| 1009 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | |
| 1010 Address largeObjectAddress = pageMemory->writableStart(); | |
| 1011 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize
(); | |
| 1012 #if ENABLE(ASSERT) | |
| 1013 // Verify that the allocated PageMemory is expectedly zeroed. | |
| 1014 for (size_t i = 0; i < largeObjectSize; ++i) | |
| 1015 ASSERT(!largeObjectAddress[i]); | |
| 1016 #endif | |
| 1017 ASSERT(gcInfoIndex > 0); | |
| 1018 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | |
| 1019 Address result = headerAddress + sizeof(*header); | |
| 1020 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
| 1021 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); | |
| 1022 ASSERT(header->checkHeader()); | |
| 1023 | |
| 1024 // Poison the object header and allocationGranularity bytes after the object | |
| 1025 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | |
| 1026 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | |
| 1027 | |
| 1028 largeObject->link(&m_firstPage); | |
| 1029 | |
| 1030 Heap::increaseAllocatedSpace(largeObject->size()); | |
| 1031 Heap::increaseAllocatedObjectSize(largeObject->size()); | |
| 1032 return result; | |
| 1033 } | |
| 1034 | |
| 1035 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) | |
| 1036 { | |
| 1037 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | |
| 1038 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | |
| 1039 Heap::decreaseAllocatedSpace(object->size()); | |
| 1040 | |
| 1041 // Unpoison the object header and allocationGranularity bytes after the | |
| 1042 // object before freeing. | |
| 1043 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | |
| 1044 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | |
| 1045 | |
| 1046 if (object->terminating()) { | |
| 1047 ASSERT(ThreadState::current()->isTerminating()); | |
| 1048 // The thread is shutting down and this page is being removed as a part | |
| 1049 // of the thread local GC. In that case the object could be traced in | |
| 1050 // the next global GC if there is a dangling pointer from a live thread | |
| 1051 // heap to this dead thread heap. To guard against this, we put the | |
| 1052 // page into the orphaned page pool and zap the page memory. This | |
| 1053 // ensures that tracing the dangling pointer in the next global GC just | |
| 1054 // crashes instead of causing use-after-frees. After the next global | |
| 1055 // GC, the orphaned pages are removed. | |
| 1056 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); | |
| 1057 } else { | |
| 1058 ASSERT(!ThreadState::current()->isTerminating()); | |
| 1059 PageMemory* memory = object->storage(); | |
| 1060 object->~LargeObjectPage(); | |
| 1061 delete memory; | |
| 1062 } | |
| 1063 } | |
| 1064 | |
| 1065 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde
x) | |
| 1066 { | |
| 1067 Address result = nullptr; | |
| 1068 size_t sweptSize = 0; | |
| 1069 while (m_firstUnsweptPage) { | |
| 1070 BasePage* page = m_firstUnsweptPage; | |
| 1071 if (page->isEmpty()) { | |
| 1072 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + si
zeof(HeapObjectHeader); | |
| 1073 page->unlink(&m_firstUnsweptPage); | |
| 1074 page->removeFromHeap(); | |
| 1075 // For LargeObjectPage, stop lazy sweeping once we have swept | |
| 1076 // more than allocationSize bytes. | |
| 1077 if (sweptSize >= allocationSize) { | |
| 1078 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | |
| 1079 ASSERT(result); | |
| 1080 break; | |
| 1081 } | |
| 1082 } else { | |
| 1083 // Sweep a page and move the page from m_firstUnsweptPages to | |
| 1084 // m_firstPages. | |
| 1085 page->sweep(); | |
| 1086 page->unlink(&m_firstUnsweptPage); | |
| 1087 page->link(&m_firstPage); | |
| 1088 page->markAsSwept(); | |
| 1089 } | |
| 1090 } | |
| 1091 return result; | |
| 1092 } | |
| 1093 | |
| 1094 FreeList::FreeList() | |
| 1095 : m_biggestFreeListIndex(0) | |
| 1096 { | |
| 1097 } | |
| 1098 | |
| 1099 void FreeList::addToFreeList(Address address, size_t size) | |
| 1100 { | |
| 1101 ASSERT(size < blinkPagePayloadSize()); | |
| 1102 // The free list entries are only pointer aligned (but when we allocate | |
| 1103 // from them we are 8 byte aligned due to the header size). | |
| 1104 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
allocationMask)); | |
| 1105 ASSERT(!(size & allocationMask)); | |
| 1106 ASAN_UNPOISON_MEMORY_REGION(address, size); | |
| 1107 FreeListEntry* entry; | |
| 1108 if (size < sizeof(*entry)) { | |
| 1109 // Create a dummy header with only a size and freelist bit set. | |
| 1110 ASSERT(size >= sizeof(HeapObjectHeader)); | |
| 1111 // Free list encode the size to mark the lost memory as freelist memory. | |
| 1112 new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHead
er); | |
| 1113 | |
| 1114 ASAN_POISON_MEMORY_REGION(address, size); | |
| 1115 // This memory gets lost. Sweeping can reclaim it. | |
| 1116 return; | |
| 1117 } | |
| 1118 entry = new (NotNull, address) FreeListEntry(size); | |
| 1119 | |
| 1120 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | |
| 1121 // The following logic delays reusing free lists for (at least) one GC | |
| 1122 // cycle or coalescing. This is helpful to detect use-after-free errors | |
| 1123 // that could be caused by lazy sweeping etc. | |
| 1124 size_t allowedCount = 0; | |
| 1125 size_t forbiddenCount = 0; | |
| 1126 for (size_t i = sizeof(FreeListEntry); i < size; i++) { | |
| 1127 if (address[i] == reuseAllowedZapValue) { | |
| 1128 allowedCount++; | |
| 1129 } else if (address[i] == reuseForbiddenZapValue) { | |
| 1130 forbiddenCount++; | |
| 1131 } else { | |
| 1132 ASSERT_NOT_REACHED(); | |
| 1133 } | |
| 1134 } | |
| 1135 size_t entryCount = size - sizeof(FreeListEntry); | |
| 1136 if (forbiddenCount == entryCount) { | |
| 1137 // If all values in the memory region are reuseForbiddenZapValue, | |
| 1138 // we flip them to reuseAllowedZapValue. This allows the next | |
| 1139 // addToFreeList() to add the memory region to the free list | |
| 1140 // (unless someone concatenates the memory region with another memory | |
| 1141 // region that contains reuseForbiddenZapValue.) | |
| 1142 for (size_t i = sizeof(FreeListEntry); i < size; i++) | |
| 1143 address[i] = reuseAllowedZapValue; | |
| 1144 ASAN_POISON_MEMORY_REGION(address, size); | |
| 1145 // Don't add the memory region to the free list in this addToFreeList(). | |
| 1146 return; | |
| 1147 } | |
| 1148 if (allowedCount != entryCount) { | |
| 1149 // If the memory region mixes reuseForbiddenZapValue and | |
| 1150 // reuseAllowedZapValue, we (conservatively) flip all the values | |
| 1151 // to reuseForbiddenZapValue. These values will be changed to | |
| 1152 // reuseAllowedZapValue in the next addToFreeList(). | |
| 1153 for (size_t i = sizeof(FreeListEntry); i < size; i++) | |
| 1154 address[i] = reuseForbiddenZapValue; | |
| 1155 ASAN_POISON_MEMORY_REGION(address, size); | |
| 1156 // Don't add the memory region to the free list in this addToFreeList(). | |
| 1157 return; | |
| 1158 } | |
| 1159 // We reach here only when all the values in the memory region are | |
| 1160 // reuseAllowedZapValue. In this case, we are allowed to add the memory | |
| 1161 // region to the free list and reuse it for another object. | |
| 1162 #endif | |
| 1163 ASAN_POISON_MEMORY_REGION(address, size); | |
| 1164 | |
| 1165 int index = bucketIndexForSize(size); | |
| 1166 entry->link(&m_freeLists[index]); | |
| 1167 if (index > m_biggestFreeListIndex) | |
| 1168 m_biggestFreeListIndex = index; | |
| 1169 } | |
| 1170 | |
| 1171 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || d
efined(MEMORY_SANITIZER) | |
| 1172 NO_SANITIZE_ADDRESS | |
| 1173 NO_SANITIZE_MEMORY | |
| 1174 void NEVER_INLINE FreeList::zapFreedMemory(Address address, size_t size) | |
| 1175 { | |
| 1176 for (size_t i = 0; i < size; i++) { | |
| 1177 // See the comment in addToFreeList(). | |
| 1178 if (address[i] != reuseAllowedZapValue) | |
| 1179 address[i] = reuseForbiddenZapValue; | |
| 1180 } | |
| 1181 } | |
| 1182 #endif | |
| 1183 | |
| 1184 void FreeList::clear() | |
| 1185 { | |
| 1186 m_biggestFreeListIndex = 0; | |
| 1187 for (size_t i = 0; i < blinkPageSizeLog2; ++i) | |
| 1188 m_freeLists[i] = nullptr; | |
| 1189 } | |
| 1190 | |
| 1191 int FreeList::bucketIndexForSize(size_t size) | |
| 1192 { | |
| 1193 ASSERT(size > 0); | |
| 1194 int index = -1; | |
| 1195 while (size) { | |
| 1196 size >>= 1; | |
| 1197 index++; | |
| 1198 } | |
| 1199 return index; | |
| 1200 } | |
| 1201 | |
| 1202 bool FreeList::takeSnapshot(const String& dumpBaseName) | |
| 1203 { | |
| 1204 bool didDumpBucketStats = false; | |
| 1205 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | |
| 1206 size_t entryCount = 0; | |
| 1207 size_t freeSize = 0; | |
| 1208 for (FreeListEntry* entry = m_freeLists[i]; entry; entry = entry->next()
) { | |
| 1209 ++entryCount; | |
| 1210 freeSize += entry->size(); | |
| 1211 } | |
| 1212 | |
| 1213 String dumpName = dumpBaseName + String::format("/buckets/bucket_%lu", s
tatic_cast<unsigned long>(1 << i)); | |
| 1214 WebMemoryAllocatorDump* bucketDump = BlinkGCMemoryDumpProvider::instance
()->createMemoryAllocatorDumpForCurrentGC(dumpName); | |
| 1215 bucketDump->AddScalar("free_count", "objects", entryCount); | |
| 1216 bucketDump->AddScalar("free_size", "bytes", freeSize); | |
| 1217 didDumpBucketStats = true; | |
| 1218 } | |
| 1219 return didDumpBucketStats; | |
| 1220 } | |
| 1221 | |
| 1222 #if ENABLE(GC_PROFILING) | |
| 1223 void FreeList::getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& to
talFreeSize) const | |
| 1224 { | |
| 1225 totalFreeSize = 0; | |
| 1226 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | |
| 1227 size_t& entryCount = bucketStats[i].entryCount; | |
| 1228 size_t& freeSize = bucketStats[i].freeSize; | |
| 1229 for (FreeListEntry* entry = m_freeLists[i]; entry; entry = entry->next()
) { | |
| 1230 ++entryCount; | |
| 1231 freeSize += entry->size(); | |
| 1232 } | |
| 1233 totalFreeSize += freeSize; | |
| 1234 } | |
| 1235 } | |
| 1236 #endif | |
| 1237 | |
| 1238 BasePage::BasePage(PageMemory* storage, BaseHeap* heap) | |
| 1239 : m_storage(storage) | |
| 1240 , m_heap(heap) | |
| 1241 , m_next(nullptr) | |
| 1242 , m_terminating(false) | |
| 1243 , m_swept(true) | |
| 1244 { | |
| 1245 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | |
| 1246 } | |
| 1247 | |
| 1248 void BasePage::markOrphaned() | |
| 1249 { | |
| 1250 m_heap = nullptr; | |
| 1251 m_terminating = false; | |
| 1252 // Since we zap the page payload for orphaned pages we need to mark it as | |
| 1253 // unused so a conservative pointer won't interpret the object headers. | |
| 1254 storage()->markUnused(); | |
| 1255 } | |
| 1256 | |
| 1257 NormalPage::NormalPage(PageMemory* storage, BaseHeap* heap) | |
| 1258 : BasePage(storage, heap) | |
| 1259 { | |
| 1260 m_objectStartBitMapComputed = false; | |
| 1261 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | |
| 1262 } | |
| 1263 | |
| 1264 size_t NormalPage::objectPayloadSizeForTesting() | |
| 1265 { | |
| 1266 size_t objectPayloadSize = 0; | |
| 1267 Address headerAddress = payload(); | |
| 1268 markAsSwept(); | |
| 1269 ASSERT(headerAddress != payloadEnd()); | |
| 1270 do { | |
| 1271 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1272 if (!header->isFree()) { | |
| 1273 ASSERT(header->checkHeader()); | |
| 1274 objectPayloadSize += header->payloadSize(); | |
| 1275 } | |
| 1276 ASSERT(header->size() < blinkPagePayloadSize()); | |
| 1277 headerAddress += header->size(); | |
| 1278 ASSERT(headerAddress <= payloadEnd()); | |
| 1279 } while (headerAddress < payloadEnd()); | |
| 1280 return objectPayloadSize; | |
| 1281 } | |
| 1282 | |
| 1283 bool NormalPage::isEmpty() | |
| 1284 { | |
| 1285 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); | |
| 1286 return header->isFree() && header->size() == payloadSize(); | |
| 1287 } | |
| 1288 | |
| 1289 void NormalPage::removeFromHeap() | |
| 1290 { | |
| 1291 heapForNormalPage()->freePage(this); | |
| 1292 } | |
| 1293 | |
| 1294 void NormalPage::sweep() | |
| 1295 { | |
| 1296 clearObjectStartBitMap(); | |
| 1297 | |
| 1298 size_t markedObjectSize = 0; | |
| 1299 Address startOfGap = payload(); | |
| 1300 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { | |
| 1301 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1302 ASSERT(header->size() > 0); | |
| 1303 ASSERT(header->size() < blinkPagePayloadSize()); | |
| 1304 | |
| 1305 if (header->isPromptlyFreed()) | |
| 1306 heapForNormalPage()->decreasePromptlyFreedSize(header->size()); | |
| 1307 if (header->isFree()) { | |
| 1308 size_t size = header->size(); | |
| 1309 // Zero the memory in the free list header to maintain the | |
| 1310 // invariant that memory on the free list is zero filled. | |
| 1311 // The rest of the memory is already on the free list and is | |
| 1312 // therefore already zero filled. | |
| 1313 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); | |
| 1314 headerAddress += size; | |
| 1315 continue; | |
| 1316 } | |
| 1317 ASSERT(header->checkHeader()); | |
| 1318 | |
| 1319 if (!header->isMarked()) { | |
| 1320 size_t size = header->size(); | |
| 1321 // This is a fast version of header->payloadSize(). | |
| 1322 size_t payloadSize = size - sizeof(HeapObjectHeader); | |
| 1323 Address payload = header->payload(); | |
| 1324 // For ASan, unpoison the object before calling the finalizer. The | |
| 1325 // finalized object will be zero-filled and poison'ed afterwards. | |
| 1326 // Given all other unmarked objects are poisoned, ASan will detect | |
| 1327 // an error if the finalizer touches any other on-heap object that | |
| 1328 // die at the same GC cycle. | |
| 1329 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); | |
| 1330 header->finalize(payload, payloadSize); | |
| 1331 // This memory will be added to the freelist. Maintain the invariant | |
| 1332 // that memory on the freelist is zero filled. | |
| 1333 SET_MEMORY_INACCESSIBLE(headerAddress, size); | |
| 1334 headerAddress += size; | |
| 1335 continue; | |
| 1336 } | |
| 1337 if (startOfGap != headerAddress) | |
| 1338 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); | |
| 1339 header->unmark(); | |
| 1340 headerAddress += header->size(); | |
| 1341 markedObjectSize += header->size(); | |
| 1342 startOfGap = headerAddress; | |
| 1343 } | |
| 1344 if (startOfGap != payloadEnd()) | |
| 1345 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); | |
| 1346 | |
| 1347 if (markedObjectSize) | |
| 1348 Heap::increaseMarkedObjectSize(markedObjectSize); | |
| 1349 } | |
| 1350 | |
| 1351 void NormalPage::makeConsistentForGC() | |
| 1352 { | |
| 1353 size_t markedObjectSize = 0; | |
| 1354 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
| 1355 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1356 ASSERT(header->size() < blinkPagePayloadSize()); | |
| 1357 // Check if a free list entry first since we cannot call | |
| 1358 // isMarked on a free list entry. | |
| 1359 if (header->isFree()) { | |
| 1360 headerAddress += header->size(); | |
| 1361 continue; | |
| 1362 } | |
| 1363 ASSERT(header->checkHeader()); | |
| 1364 if (header->isMarked()) { | |
| 1365 header->unmark(); | |
| 1366 markedObjectSize += header->size(); | |
| 1367 } else { | |
| 1368 header->markDead(); | |
| 1369 } | |
| 1370 headerAddress += header->size(); | |
| 1371 } | |
| 1372 if (markedObjectSize) | |
| 1373 Heap::increaseMarkedObjectSize(markedObjectSize); | |
| 1374 } | |
| 1375 | |
| 1376 void NormalPage::makeConsistentForMutator() | |
| 1377 { | |
| 1378 Address startOfGap = payload(); | |
| 1379 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
| 1380 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1381 size_t size = header->size(); | |
| 1382 ASSERT(size < blinkPagePayloadSize()); | |
| 1383 if (header->isPromptlyFreed()) | |
| 1384 heapForNormalPage()->decreasePromptlyFreedSize(size); | |
| 1385 if (header->isFree()) { | |
| 1386 // Zero the memory in the free list header to maintain the | |
| 1387 // invariant that memory on the free list is zero filled. | |
| 1388 // The rest of the memory is already on the free list and is | |
| 1389 // therefore already zero filled. | |
| 1390 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); | |
| 1391 headerAddress += size; | |
| 1392 continue; | |
| 1393 } | |
| 1394 ASSERT(header->checkHeader()); | |
| 1395 | |
| 1396 if (startOfGap != headerAddress) | |
| 1397 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); | |
| 1398 if (header->isMarked()) | |
| 1399 header->unmark(); | |
| 1400 headerAddress += size; | |
| 1401 startOfGap = headerAddress; | |
| 1402 ASSERT(headerAddress <= payloadEnd()); | |
| 1403 } | |
| 1404 if (startOfGap != payloadEnd()) | |
| 1405 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); | |
| 1406 } | |
| 1407 | |
| 1408 #if defined(ADDRESS_SANITIZER) | |
| 1409 void NormalPage::poisonObjects(ThreadState::ObjectsToPoison objectsToPoison, Thr
eadState::Poisoning poisoning) | |
| 1410 { | |
| 1411 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | |
| 1412 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1413 ASSERT(header->size() < blinkPagePayloadSize()); | |
| 1414 // Check if a free list entry first since we cannot call | |
| 1415 // isMarked on a free list entry. | |
| 1416 if (header->isFree()) { | |
| 1417 headerAddress += header->size(); | |
| 1418 continue; | |
| 1419 } | |
| 1420 ASSERT(header->checkHeader()); | |
| 1421 if (objectsToPoison == ThreadState::MarkedAndUnmarked || !header->isMark
ed()) { | |
| 1422 if (poisoning == ThreadState::SetPoison) | |
| 1423 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize
()); | |
| 1424 else | |
| 1425 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSi
ze()); | |
| 1426 } | |
| 1427 headerAddress += header->size(); | |
| 1428 } | |
| 1429 } | |
| 1430 #endif | |
| 1431 | |
| 1432 void NormalPage::populateObjectStartBitMap() | |
| 1433 { | |
| 1434 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | |
| 1435 Address start = payload(); | |
| 1436 for (Address headerAddress = start; headerAddress < payloadEnd();) { | |
| 1437 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | |
| 1438 size_t objectOffset = headerAddress - start; | |
| 1439 ASSERT(!(objectOffset & allocationMask)); | |
| 1440 size_t objectStartNumber = objectOffset / allocationGranularity; | |
| 1441 size_t mapIndex = objectStartNumber / 8; | |
| 1442 ASSERT(mapIndex < objectStartBitMapSize); | |
| 1443 m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7)); | |
| 1444 headerAddress += header->size(); | |
| 1445 ASSERT(headerAddress <= payloadEnd()); | |
| 1446 } | |
| 1447 m_objectStartBitMapComputed = true; | |
| 1448 } | |
| 1449 | |
| 1450 void NormalPage::clearObjectStartBitMap() | |
| 1451 { | |
| 1452 m_objectStartBitMapComputed = false; | |
| 1453 } | |
| 1454 | |
| 1455 static int numberOfLeadingZeroes(uint8_t byte) | |
| 1456 { | |
| 1457 if (!byte) | |
| 1458 return 8; | |
| 1459 int result = 0; | |
| 1460 if (byte <= 0x0F) { | |
| 1461 result += 4; | |
| 1462 byte = byte << 4; | |
| 1463 } | |
| 1464 if (byte <= 0x3F) { | |
| 1465 result += 2; | |
| 1466 byte = byte << 2; | |
| 1467 } | |
| 1468 if (byte <= 0x7F) | |
| 1469 result++; | |
| 1470 return result; | |
| 1471 } | |
| 1472 | |
| 1473 HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) | |
| 1474 { | |
| 1475 if (address < payload()) | |
| 1476 return nullptr; | |
| 1477 if (!isObjectStartBitMapComputed()) | |
| 1478 populateObjectStartBitMap(); | |
| 1479 size_t objectOffset = address - payload(); | |
| 1480 size_t objectStartNumber = objectOffset / allocationGranularity; | |
| 1481 size_t mapIndex = objectStartNumber / 8; | |
| 1482 ASSERT(mapIndex < objectStartBitMapSize); | |
| 1483 size_t bit = objectStartNumber & 7; | |
| 1484 uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1); | |
| 1485 while (!byte) { | |
| 1486 ASSERT(mapIndex > 0); | |
| 1487 byte = m_objectStartBitMap[--mapIndex]; | |
| 1488 } | |
| 1489 int leadingZeroes = numberOfLeadingZeroes(byte); | |
| 1490 objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes; | |
| 1491 objectOffset = objectStartNumber * allocationGranularity; | |
| 1492 Address objectAddress = objectOffset + payload(); | |
| 1493 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress
); | |
| 1494 if (header->isFree()) | |
| 1495 return nullptr; | |
| 1496 ASSERT(header->checkHeader()); | |
| 1497 return header; | |
| 1498 } | |
| 1499 | |
| 1500 #if ENABLE(ASSERT) | |
| 1501 static bool isUninitializedMemory(void* objectPointer, size_t objectSize) | |
| 1502 { | |
| 1503 // Scan through the object's fields and check that they are all zero. | |
| 1504 Address* objectFields = reinterpret_cast<Address*>(objectPointer); | |
| 1505 for (size_t i = 0; i < objectSize / sizeof(Address); ++i) { | |
| 1506 if (objectFields[i] != 0) | |
| 1507 return false; | |
| 1508 } | |
| 1509 return true; | |
| 1510 } | |
| 1511 #endif | |
| 1512 | |
| 1513 static void markPointer(Visitor* visitor, HeapObjectHeader* header) | |
| 1514 { | |
| 1515 ASSERT(header->checkHeader()); | |
| 1516 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex()); | |
| 1517 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | |
| 1518 // We hit this branch when a GC strikes before GarbageCollected<>'s | |
| 1519 // constructor runs. | |
| 1520 // | |
| 1521 // class A : public GarbageCollected<A> { virtual void f() = 0; }; | |
| 1522 // class B : public A { | |
| 1523 // B() : A(foo()) { }; | |
| 1524 // }; | |
| 1525 // | |
| 1526 // If foo() allocates something and triggers a GC, the vtable of A | |
| 1527 // has not yet been initialized. In this case, we should mark the A | |
| 1528 // object without tracing any member of the A object. | |
| 1529 visitor->markHeaderNoTracing(header); | |
| 1530 ASSERT(isUninitializedMemory(header->payload(), header->payloadSize())); | |
| 1531 } else { | |
| 1532 visitor->markHeader(header, gcInfo->m_trace); | |
| 1533 } | |
| 1534 } | |
| 1535 | |
| 1536 void NormalPage::checkAndMarkPointer(Visitor* visitor, Address address) | |
| 1537 { | |
| 1538 ASSERT(contains(address)); | |
| 1539 HeapObjectHeader* header = findHeaderFromAddress(address); | |
| 1540 if (!header || header->isDead()) | |
| 1541 return; | |
| 1542 markPointer(visitor, header); | |
| 1543 } | |
| 1544 | |
| 1545 void NormalPage::markOrphaned() | |
| 1546 { | |
| 1547 // Zap the payload with a recognizable value to detect any incorrect | |
| 1548 // cross thread pointer usage. | |
| 1549 #if defined(ADDRESS_SANITIZER) | |
| 1550 // This needs to zap poisoned memory as well. | |
| 1551 // Force unpoison memory before memset. | |
| 1552 ASAN_UNPOISON_MEMORY_REGION(payload(), payloadSize()); | |
| 1553 #endif | |
| 1554 OrphanedPagePool::asanDisabledMemset(payload(), OrphanedPagePool::orphanedZa
pValue, payloadSize()); | |
| 1555 BasePage::markOrphaned(); | |
| 1556 } | |
| 1557 | |
| 1558 void NormalPage::takeSnapshot(String dumpName, size_t pageIndex, ThreadState::GC
SnapshotInfo& info, size_t* outFreeSize, size_t* outFreeCount) | |
| 1559 { | |
| 1560 dumpName.append(String::format("/pages/page_%lu", static_cast<unsigned long>
(pageIndex))); | |
| 1561 WebMemoryAllocatorDump* pageDump = BlinkGCMemoryDumpProvider::instance()->cr
eateMemoryAllocatorDumpForCurrentGC(dumpName); | |
| 1562 | |
| 1563 HeapObjectHeader* header = nullptr; | |
| 1564 size_t liveCount = 0; | |
| 1565 size_t deadCount = 0; | |
| 1566 size_t freeCount = 0; | |
| 1567 size_t liveSize = 0; | |
| 1568 size_t deadSize = 0; | |
| 1569 size_t freeSize = 0; | |
| 1570 for (Address headerAddress = payload(); headerAddress < payloadEnd(); header
Address += header->size()) { | |
| 1571 header = reinterpret_cast<HeapObjectHeader*>(headerAddress); | |
| 1572 if (header->isFree()) { | |
| 1573 freeCount++; | |
| 1574 freeSize += header->size(); | |
| 1575 } else if (header->isMarked()) { | |
| 1576 liveCount++; | |
| 1577 liveSize += header->size(); | |
| 1578 | |
| 1579 size_t gcInfoIndex = header->gcInfoIndex(); | |
| 1580 info.liveCount[gcInfoIndex]++; | |
| 1581 info.liveSize[gcInfoIndex] += header->size(); | |
| 1582 } else { | |
| 1583 deadCount++; | |
| 1584 deadSize += header->size(); | |
| 1585 | |
| 1586 size_t gcInfoIndex = header->gcInfoIndex(); | |
| 1587 info.deadCount[gcInfoIndex]++; | |
| 1588 info.deadSize[gcInfoIndex] += header->size(); | |
| 1589 } | |
| 1590 } | |
| 1591 | |
| 1592 pageDump->AddScalar("live_count", "objects", liveCount); | |
| 1593 pageDump->AddScalar("dead_count", "objects", deadCount); | |
| 1594 pageDump->AddScalar("free_count", "objects", freeCount); | |
| 1595 pageDump->AddScalar("live_size", "bytes", liveSize); | |
| 1596 pageDump->AddScalar("dead_size", "bytes", deadSize); | |
| 1597 pageDump->AddScalar("free_size", "bytes", freeSize); | |
| 1598 *outFreeSize = freeSize; | |
| 1599 *outFreeCount = freeCount; | |
| 1600 } | |
| 1601 | |
| 1602 #if ENABLE(GC_PROFILING) | |
| 1603 const GCInfo* NormalPage::findGCInfo(Address address) | |
| 1604 { | |
| 1605 if (address < payload()) | |
| 1606 return nullptr; | |
| 1607 | |
| 1608 HeapObjectHeader* header = findHeaderFromAddress(address); | |
| 1609 if (!header) | |
| 1610 return nullptr; | |
| 1611 | |
| 1612 return Heap::gcInfo(header->gcInfoIndex()); | |
| 1613 } | |
| 1614 #endif | |
| 1615 | |
| 1616 #if ENABLE(GC_PROFILING) | |
| 1617 void NormalPage::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | |
| 1618 { | |
| 1619 HeapObjectHeader* header = nullptr; | |
| 1620 for (Address addr = payload(); addr < payloadEnd(); addr += header->size())
{ | |
| 1621 header = reinterpret_cast<HeapObjectHeader*>(addr); | |
| 1622 if (json) | |
| 1623 json->pushInteger(header->encodedSize()); | |
| 1624 if (header->isFree()) { | |
| 1625 info->freeSize += header->size(); | |
| 1626 continue; | |
| 1627 } | |
| 1628 ASSERT(header->checkHeader()); | |
| 1629 | |
| 1630 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); | |
| 1631 size_t age = header->age(); | |
| 1632 if (json) | |
| 1633 json->pushInteger(tag); | |
| 1634 if (header->isMarked()) { | |
| 1635 info->liveCount[tag] += 1; | |
| 1636 info->liveSize[tag] += header->size(); | |
| 1637 // Count objects that are live when promoted to the final generation
. | |
| 1638 if (age == maxHeapObjectAge - 1) | |
| 1639 info->generations[tag][maxHeapObjectAge] += 1; | |
| 1640 } else { | |
| 1641 info->deadCount[tag] += 1; | |
| 1642 info->deadSize[tag] += header->size(); | |
| 1643 // Count objects that are dead before the final generation. | |
| 1644 if (age < maxHeapObjectAge) | |
| 1645 info->generations[tag][age] += 1; | |
| 1646 } | |
| 1647 } | |
| 1648 } | |
| 1649 | |
| 1650 void NormalPage::incrementMarkedObjectsAge() | |
| 1651 { | |
| 1652 HeapObjectHeader* header = nullptr; | |
| 1653 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | |
| 1654 header = reinterpret_cast<HeapObjectHeader*>(address); | |
| 1655 if (header->isMarked()) | |
| 1656 header->incrementAge(); | |
| 1657 } | |
| 1658 } | |
| 1659 | |
| 1660 void NormalPage::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) | |
| 1661 { | |
| 1662 HeapObjectHeader* header = nullptr; | |
| 1663 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | |
| 1664 header = reinterpret_cast<HeapObjectHeader*>(address); | |
| 1665 if (header->isMarked()) { | |
| 1666 String className(classOf(header->payload())); | |
| 1667 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); | |
| 1668 } | |
| 1669 } | |
| 1670 } | |
| 1671 | |
| 1672 void NormalPage::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) | |
| 1673 { | |
| 1674 HeapObjectHeader* header = nullptr; | |
| 1675 for (Address address = payload(); address < payloadEnd(); address += header-
>size()) { | |
| 1676 header = reinterpret_cast<HeapObjectHeader*>(address); | |
| 1677 if (!header->isFree() && !header->isMarked()) { | |
| 1678 String className(classOf(header->payload())); | |
| 1679 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.age
s[header->age()]); | |
| 1680 } | |
| 1681 } | |
| 1682 } | |
| 1683 #endif | |
| 1684 | |
| 1685 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
| 1686 bool NormalPage::contains(Address addr) | |
| 1687 { | |
| 1688 Address blinkPageStart = roundToBlinkPageStart(address()); | |
| 1689 ASSERT(blinkPageStart == address() - blinkGuardPageSize); // Page is at alig
ned address plus guard page size. | |
| 1690 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | |
| 1691 } | |
| 1692 #endif | |
| 1693 | |
| 1694 NormalPageHeap* NormalPage::heapForNormalPage() | |
| 1695 { | |
| 1696 return static_cast<NormalPageHeap*>(heap()); | |
| 1697 } | |
| 1698 | |
| 1699 LargeObjectPage::LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t pay
loadSize) | |
| 1700 : BasePage(storage, heap) | |
| 1701 , m_payloadSize(payloadSize) | |
| 1702 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) | |
| 1703 , m_isVectorBackingPage(false) | |
| 1704 #endif | |
| 1705 { | |
| 1706 } | |
| 1707 | |
| 1708 size_t LargeObjectPage::objectPayloadSizeForTesting() | |
| 1709 { | |
| 1710 markAsSwept(); | |
| 1711 return payloadSize(); | |
| 1712 } | |
| 1713 | |
| 1714 bool LargeObjectPage::isEmpty() | |
| 1715 { | |
| 1716 return !heapObjectHeader()->isMarked(); | |
| 1717 } | |
| 1718 | |
| 1719 void LargeObjectPage::removeFromHeap() | |
| 1720 { | |
| 1721 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); | |
| 1722 } | |
| 1723 | |
| 1724 void LargeObjectPage::sweep() | |
| 1725 { | |
| 1726 heapObjectHeader()->unmark(); | |
| 1727 Heap::increaseMarkedObjectSize(size()); | |
| 1728 } | |
| 1729 | |
| 1730 void LargeObjectPage::makeConsistentForGC() | |
| 1731 { | |
| 1732 HeapObjectHeader* header = heapObjectHeader(); | |
| 1733 if (header->isMarked()) { | |
| 1734 header->unmark(); | |
| 1735 Heap::increaseMarkedObjectSize(size()); | |
| 1736 } else { | |
| 1737 header->markDead(); | |
| 1738 } | |
| 1739 } | |
| 1740 | |
| 1741 void LargeObjectPage::makeConsistentForMutator() | |
| 1742 { | |
| 1743 HeapObjectHeader* header = heapObjectHeader(); | |
| 1744 if (header->isMarked()) | |
| 1745 header->unmark(); | |
| 1746 } | |
| 1747 | |
| 1748 #if defined(ADDRESS_SANITIZER) | |
| 1749 void LargeObjectPage::poisonObjects(ThreadState::ObjectsToPoison objectsToPoison
, ThreadState::Poisoning poisoning) | |
| 1750 { | |
| 1751 HeapObjectHeader* header = heapObjectHeader(); | |
| 1752 if (objectsToPoison == ThreadState::MarkedAndUnmarked || !header->isMarked()
) { | |
| 1753 if (poisoning == ThreadState::SetPoison) | |
| 1754 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | |
| 1755 else | |
| 1756 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); | |
| 1757 } | |
| 1758 } | |
| 1759 #endif | |
| 1760 | |
| 1761 void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) | |
| 1762 { | |
| 1763 ASSERT(contains(address)); | |
| 1764 if (!containedInObjectPayload(address) || heapObjectHeader()->isDead()) | |
| 1765 return; | |
| 1766 markPointer(visitor, heapObjectHeader()); | |
| 1767 } | |
| 1768 | |
| 1769 void LargeObjectPage::markOrphaned() | |
| 1770 { | |
| 1771 // Zap the payload with a recognizable value to detect any incorrect | |
| 1772 // cross thread pointer usage. | |
| 1773 OrphanedPagePool::asanDisabledMemset(payload(), OrphanedPagePool::orphanedZa
pValue, payloadSize()); | |
| 1774 BasePage::markOrphaned(); | |
| 1775 } | |
| 1776 | |
| 1777 void LargeObjectPage::takeSnapshot(String dumpName, size_t pageIndex, ThreadStat
e::GCSnapshotInfo& info, size_t* outFreeSize, size_t* outFreeCount) | |
| 1778 { | |
| 1779 dumpName.append(String::format("/pages/page_%lu", static_cast<unsigned long>
(pageIndex))); | |
| 1780 WebMemoryAllocatorDump* pageDump = BlinkGCMemoryDumpProvider::instance()->cr
eateMemoryAllocatorDumpForCurrentGC(dumpName); | |
| 1781 | |
| 1782 size_t liveSize = 0; | |
| 1783 size_t deadSize = 0; | |
| 1784 size_t liveCount = 0; | |
| 1785 size_t deadCount = 0; | |
| 1786 HeapObjectHeader* header = heapObjectHeader(); | |
| 1787 size_t gcInfoIndex = header->gcInfoIndex(); | |
| 1788 if (header->isMarked()) { | |
| 1789 liveCount = 1; | |
| 1790 liveSize += header->payloadSize(); | |
| 1791 info.liveCount[gcInfoIndex]++; | |
| 1792 info.liveSize[gcInfoIndex] += header->size(); | |
| 1793 } else { | |
| 1794 deadCount = 1; | |
| 1795 deadSize += header->payloadSize(); | |
| 1796 info.deadCount[gcInfoIndex]++; | |
| 1797 info.deadSize[gcInfoIndex] += header->size(); | |
| 1798 } | |
| 1799 | |
| 1800 pageDump->AddScalar("live_count", "objects", liveCount); | |
| 1801 pageDump->AddScalar("dead_count", "objects", deadCount); | |
| 1802 pageDump->AddScalar("live_size", "bytes", liveSize); | |
| 1803 pageDump->AddScalar("dead_size", "bytes", deadSize); | |
| 1804 } | |
| 1805 | |
| 1806 #if ENABLE(GC_PROFILING) | |
| 1807 const GCInfo* LargeObjectPage::findGCInfo(Address address) | |
| 1808 { | |
| 1809 if (!containedInObjectPayload(address)) | |
| 1810 return nullptr; | |
| 1811 HeapObjectHeader* header = heapObjectHeader(); | |
| 1812 return Heap::gcInfo(header->gcInfoIndex()); | |
| 1813 } | |
| 1814 | |
| 1815 void LargeObjectPage::snapshot(TracedValue* json, ThreadState::SnapshotInfo* inf
o) | |
| 1816 { | |
| 1817 HeapObjectHeader* header = heapObjectHeader(); | |
| 1818 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); | |
| 1819 size_t age = header->age(); | |
| 1820 if (header->isMarked()) { | |
| 1821 info->liveCount[tag] += 1; | |
| 1822 info->liveSize[tag] += header->size(); | |
| 1823 // Count objects that are live when promoted to the final generation. | |
| 1824 if (age == maxHeapObjectAge - 1) | |
| 1825 info->generations[tag][maxHeapObjectAge] += 1; | |
| 1826 } else { | |
| 1827 info->deadCount[tag] += 1; | |
| 1828 info->deadSize[tag] += header->size(); | |
| 1829 // Count objects that are dead before the final generation. | |
| 1830 if (age < maxHeapObjectAge) | |
| 1831 info->generations[tag][age] += 1; | |
| 1832 } | |
| 1833 | |
| 1834 if (json) { | |
| 1835 json->setInteger("class", tag); | |
| 1836 json->setInteger("size", header->size()); | |
| 1837 json->setInteger("isMarked", header->isMarked()); | |
| 1838 } | |
| 1839 } | |
| 1840 | |
| 1841 void LargeObjectPage::incrementMarkedObjectsAge() | |
| 1842 { | |
| 1843 HeapObjectHeader* header = heapObjectHeader(); | |
| 1844 if (header->isMarked()) | |
| 1845 header->incrementAge(); | |
| 1846 } | |
| 1847 | |
| 1848 void LargeObjectPage::countMarkedObjects(ClassAgeCountsMap& classAgeCounts) | |
| 1849 { | |
| 1850 HeapObjectHeader* header = heapObjectHeader(); | |
| 1851 if (header->isMarked()) { | |
| 1852 String className(classOf(header->payload())); | |
| 1853 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); | |
| 1854 } | |
| 1855 } | |
| 1856 | |
| 1857 void LargeObjectPage::countObjectsToSweep(ClassAgeCountsMap& classAgeCounts) | |
| 1858 { | |
| 1859 HeapObjectHeader* header = heapObjectHeader(); | |
| 1860 if (!header->isFree() && !header->isMarked()) { | |
| 1861 String className(classOf(header->payload())); | |
| 1862 ++(classAgeCounts.add(className, AgeCounts()).storedValue->value.ages[he
ader->age()]); | |
| 1863 } | |
| 1864 } | |
| 1865 #endif | |
| 1866 | |
| 1867 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | |
| 1868 bool LargeObjectPage::contains(Address object) | |
| 1869 { | |
| 1870 return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPa
geEnd(address() + size()); | |
| 1871 } | |
| 1872 #endif | |
| 1873 | |
| 1874 void HeapDoesNotContainCache::flush() | |
| 1875 { | |
| 1876 if (m_hasEntries) { | |
| 1877 for (int i = 0; i < numberOfEntries; ++i) | |
| 1878 m_entries[i] = nullptr; | |
| 1879 m_hasEntries = false; | |
| 1880 } | |
| 1881 } | |
| 1882 | |
| 1883 size_t HeapDoesNotContainCache::hash(Address address) | |
| 1884 { | |
| 1885 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | |
| 1886 value ^= value >> numberOfEntriesLog2; | |
| 1887 value ^= value >> (numberOfEntriesLog2 * 2); | |
| 1888 value &= numberOfEntries - 1; | |
| 1889 return value & ~1; // Returns only even number. | |
| 1890 } | |
| 1891 | |
| 1892 bool HeapDoesNotContainCache::lookup(Address address) | |
| 1893 { | |
| 1894 ASSERT(ThreadState::current()->isInGC()); | |
| 1895 | |
| 1896 size_t index = hash(address); | |
| 1897 ASSERT(!(index & 1)); | |
| 1898 Address cachePage = roundToBlinkPageStart(address); | |
| 1899 if (m_entries[index] == cachePage) | |
| 1900 return m_entries[index]; | |
| 1901 if (m_entries[index + 1] == cachePage) | |
| 1902 return m_entries[index + 1]; | |
| 1903 return false; | |
| 1904 } | |
| 1905 | |
| 1906 void HeapDoesNotContainCache::addEntry(Address address) | |
| 1907 { | |
| 1908 ASSERT(ThreadState::current()->isInGC()); | |
| 1909 | |
| 1910 m_hasEntries = true; | |
| 1911 size_t index = hash(address); | |
| 1912 ASSERT(!(index & 1)); | |
| 1913 Address cachePage = roundToBlinkPageStart(address); | |
| 1914 m_entries[index + 1] = m_entries[index]; | |
| 1915 m_entries[index] = cachePage; | |
| 1916 } | |
| 1917 | |
| 1918 void Heap::flushHeapDoesNotContainCache() | 148 void Heap::flushHeapDoesNotContainCache() |
| 1919 { | 149 { |
| 1920 s_heapDoesNotContainCache->flush(); | 150 s_heapDoesNotContainCache->flush(); |
| 1921 } | 151 } |
| 1922 | 152 |
| 1923 void Heap::init() | 153 void Heap::init() |
| 1924 { | 154 { |
| 1925 ThreadState::init(); | 155 ThreadState::init(); |
| 1926 s_markingStack = new CallbackStack(); | 156 s_markingStack = new CallbackStack(); |
| 1927 s_postMarkingCallbackStack = new CallbackStack(); | 157 s_postMarkingCallbackStack = new CallbackStack(); |
| (...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2549 size_t Heap::s_objectSizeAtLastGC = 0; | 779 size_t Heap::s_objectSizeAtLastGC = 0; |
| 2550 size_t Heap::s_markedObjectSize = 0; | 780 size_t Heap::s_markedObjectSize = 0; |
| 2551 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | 781 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; |
| 2552 size_t Heap::s_persistentCount = 0; | 782 size_t Heap::s_persistentCount = 0; |
| 2553 size_t Heap::s_persistentCountAtLastGC = 0; | 783 size_t Heap::s_persistentCountAtLastGC = 0; |
| 2554 size_t Heap::s_collectedPersistentCount = 0; | 784 size_t Heap::s_collectedPersistentCount = 0; |
| 2555 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | 785 size_t Heap::s_partitionAllocSizeAtLastGC = 0; |
| 2556 double Heap::s_estimatedMarkingTimePerByte = 0.0; | 786 double Heap::s_estimatedMarkingTimePerByte = 0.0; |
| 2557 | 787 |
| 2558 } // namespace blink | 788 } // namespace blink |
| OLD | NEW |