OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
91 void HeapObjectHeader::zapMagic() | 91 void HeapObjectHeader::zapMagic() |
92 { | 92 { |
93 ASSERT(checkHeader()); | 93 ASSERT(checkHeader()); |
94 m_magic = zappedMagic; | 94 m_magic = zappedMagic; |
95 } | 95 } |
96 #endif | 96 #endif |
97 | 97 |
98 void HeapObjectHeader::finalize(Address object, size_t objectSize) | 98 void HeapObjectHeader::finalize(Address object, size_t objectSize) |
99 { | 99 { |
100 HeapAllocHooks::freeHookIfEnabled(object); | 100 HeapAllocHooks::freeHookIfEnabled(object); |
101 const GCInfo* gcInfo = ThreadHeap::gcInfo(gcInfoIndex()); | 101 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); |
102 if (gcInfo->hasFinalizer()) | 102 if (gcInfo->hasFinalizer()) |
103 gcInfo->m_finalize(object); | 103 gcInfo->m_finalize(object); |
104 | 104 |
105 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); | 105 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); |
106 } | 106 } |
107 | 107 |
108 BaseArena::BaseArena(ThreadState* state, int index) | 108 BaseArena::BaseArena(ThreadState* state, int index) |
109 : m_firstPage(nullptr) | 109 : m_firstPage(nullptr) |
110 , m_firstUnsweptPage(nullptr) | 110 , m_firstUnsweptPage(nullptr) |
111 , m_threadState(state) | 111 , m_threadState(state) |
112 , m_index(index) | 112 , m_index(index) |
113 { | 113 { |
114 } | 114 } |
115 | 115 |
116 BaseArena::~BaseArena() | 116 BaseArena::~BaseArena() |
117 { | 117 { |
118 ASSERT(!m_firstPage); | 118 ASSERT(!m_firstPage); |
119 ASSERT(!m_firstUnsweptPage); | 119 ASSERT(!m_firstUnsweptPage); |
120 } | 120 } |
121 | 121 |
122 void BaseArena::cleanupPages() | 122 void BaseArena::cleanupPages() |
123 { | 123 { |
124 clearFreeLists(); | 124 clearFreeLists(); |
125 | 125 |
126 ASSERT(!m_firstUnsweptPage); | 126 ASSERT(!m_firstUnsweptPage); |
127 // Add the BaseArena's pages to the orphanedPagePool. | 127 // Add the BaseArena's pages to the orphanedPagePool. |
128 for (BasePage* page = m_firstPage; page; page = page->next()) { | 128 for (BasePage* page = m_firstPage; page; page = page->next()) { |
129 ThreadHeap::heapStats().decreaseAllocatedSpace(page->size()); | 129 Heap::heapStats().decreaseAllocatedSpace(page->size()); |
130 ThreadHeap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); | 130 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); |
131 } | 131 } |
132 m_firstPage = nullptr; | 132 m_firstPage = nullptr; |
133 } | 133 } |
134 | 134 |
135 void BaseArena::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshot
Info& info) | 135 void BaseArena::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshot
Info& info) |
136 { | 136 { |
137 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" | 137 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" |
138 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); | 138 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); |
139 size_t pageCount = 0; | 139 size_t pageCount = 0; |
140 BasePage::HeapSnapshotInfo heapInfo; | 140 BasePage::HeapSnapshotInfo heapInfo; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 if (getThreadState()->sweepForbidden()) | 275 if (getThreadState()->sweepForbidden()) |
276 return nullptr; | 276 return nullptr; |
277 | 277 |
278 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); | 278 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); |
279 ThreadState::SweepForbiddenScope sweepForbidden(getThreadState()); | 279 ThreadState::SweepForbiddenScope sweepForbidden(getThreadState()); |
280 ScriptForbiddenIfMainThreadScope scriptForbidden; | 280 ScriptForbiddenIfMainThreadScope scriptForbidden; |
281 | 281 |
282 double startTime = WTF::currentTimeMS(); | 282 double startTime = WTF::currentTimeMS(); |
283 Address result = lazySweepPages(allocationSize, gcInfoIndex); | 283 Address result = lazySweepPages(allocationSize, gcInfoIndex); |
284 getThreadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); | 284 getThreadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); |
285 ThreadHeap::reportMemoryUsageForTracing(); | 285 Heap::reportMemoryUsageForTracing(); |
286 | 286 |
287 return result; | 287 return result; |
288 } | 288 } |
289 | 289 |
290 void BaseArena::sweepUnsweptPage() | 290 void BaseArena::sweepUnsweptPage() |
291 { | 291 { |
292 BasePage* page = m_firstUnsweptPage; | 292 BasePage* page = m_firstUnsweptPage; |
293 if (page->isEmpty()) { | 293 if (page->isEmpty()) { |
294 page->unlink(&m_firstUnsweptPage); | 294 page->unlink(&m_firstUnsweptPage); |
295 page->removeFromHeap(); | 295 page->removeFromHeap(); |
(...skipping 17 matching lines...) Expand all Loading... |
313 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 313 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
314 ASSERT(getThreadState()->sweepForbidden()); | 314 ASSERT(getThreadState()->sweepForbidden()); |
315 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); | 315 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); |
316 | 316 |
317 int pageCount = 1; | 317 int pageCount = 1; |
318 while (m_firstUnsweptPage) { | 318 while (m_firstUnsweptPage) { |
319 sweepUnsweptPage(); | 319 sweepUnsweptPage(); |
320 if (pageCount % deadlineCheckInterval == 0) { | 320 if (pageCount % deadlineCheckInterval == 0) { |
321 if (deadlineSeconds <= monotonicallyIncreasingTime()) { | 321 if (deadlineSeconds <= monotonicallyIncreasingTime()) { |
322 // Deadline has come. | 322 // Deadline has come. |
323 ThreadHeap::reportMemoryUsageForTracing(); | 323 Heap::reportMemoryUsageForTracing(); |
324 return !m_firstUnsweptPage; | 324 return !m_firstUnsweptPage; |
325 } | 325 } |
326 } | 326 } |
327 pageCount++; | 327 pageCount++; |
328 } | 328 } |
329 ThreadHeap::reportMemoryUsageForTracing(); | 329 Heap::reportMemoryUsageForTracing(); |
330 return true; | 330 return true; |
331 } | 331 } |
332 | 332 |
333 void BaseArena::completeSweep() | 333 void BaseArena::completeSweep() |
334 { | 334 { |
335 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 335 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
336 ASSERT(getThreadState()->sweepForbidden()); | 336 ASSERT(getThreadState()->sweepForbidden()); |
337 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); | 337 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); |
338 | 338 |
339 while (m_firstUnsweptPage) { | 339 while (m_firstUnsweptPage) { |
340 sweepUnsweptPage(); | 340 sweepUnsweptPage(); |
341 } | 341 } |
342 ThreadHeap::reportMemoryUsageForTracing(); | 342 Heap::reportMemoryUsageForTracing(); |
343 } | 343 } |
344 | 344 |
345 NormalPageArena::NormalPageArena(ThreadState* state, int index) | 345 NormalPageArena::NormalPageArena(ThreadState* state, int index) |
346 : BaseArena(state, index) | 346 : BaseArena(state, index) |
347 , m_currentAllocationPoint(nullptr) | 347 , m_currentAllocationPoint(nullptr) |
348 , m_remainingAllocationSize(0) | 348 , m_remainingAllocationSize(0) |
349 , m_lastRemainingAllocationSize(0) | 349 , m_lastRemainingAllocationSize(0) |
350 , m_promptlyFreedSize(0) | 350 , m_promptlyFreedSize(0) |
351 { | 351 { |
352 clearFreeLists(); | 352 clearFreeLists(); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
391 if (m_freeList.takeSnapshot(dumpName)) { | 391 if (m_freeList.takeSnapshot(dumpName)) { |
392 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc
e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | 392 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc
e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); |
393 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | 393 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); |
394 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw
nershipEdge(pagesDump->guid(), bucketsDump->guid()); | 394 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw
nershipEdge(pagesDump->guid(), bucketsDump->guid()); |
395 } | 395 } |
396 } | 396 } |
397 | 397 |
398 void NormalPageArena::allocatePage() | 398 void NormalPageArena::allocatePage() |
399 { | 399 { |
400 getThreadState()->shouldFlushHeapDoesNotContainCache(); | 400 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
401 PageMemory* pageMemory = ThreadHeap::getFreePagePool()->takeFreePage(arenaIn
dex()); | 401 PageMemory* pageMemory = Heap::getFreePagePool()->takeFreePage(arenaIndex())
; |
402 | 402 |
403 if (!pageMemory) { | 403 if (!pageMemory) { |
404 // Allocate a memory region for blinkPagesPerRegion pages that | 404 // Allocate a memory region for blinkPagesPerRegion pages that |
405 // will each have the following layout. | 405 // will each have the following layout. |
406 // | 406 // |
407 // [ guard os page | ... payload ... | guard os page ] | 407 // [ guard os page | ... payload ... | guard os page ] |
408 // ^---{ aligned to blink page size } | 408 // ^---{ aligned to blink page size } |
409 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(ThreadH
eap::getRegionTree()); | 409 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(Heap::g
etRegionTree()); |
410 | 410 |
411 // Setup the PageMemory object for each of the pages in the region. | 411 // Setup the PageMemory object for each of the pages in the region. |
412 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | 412 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { |
413 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i *
blinkPageSize, blinkPagePayloadSize()); | 413 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i *
blinkPageSize, blinkPagePayloadSize()); |
414 // Take the first possible page ensuring that this thread actually | 414 // Take the first possible page ensuring that this thread actually |
415 // gets a page and add the rest to the page pool. | 415 // gets a page and add the rest to the page pool. |
416 if (!pageMemory) { | 416 if (!pageMemory) { |
417 bool result = memory->commit(); | 417 bool result = memory->commit(); |
418 // If you hit the ASSERT, it will mean that you're hitting | 418 // If you hit the ASSERT, it will mean that you're hitting |
419 // the limit of the number of mmapped regions OS can support | 419 // the limit of the number of mmapped regions OS can support |
420 // (e.g., /proc/sys/vm/max_map_count in Linux). | 420 // (e.g., /proc/sys/vm/max_map_count in Linux). |
421 RELEASE_ASSERT(result); | 421 RELEASE_ASSERT(result); |
422 pageMemory = memory; | 422 pageMemory = memory; |
423 } else { | 423 } else { |
424 ThreadHeap::getFreePagePool()->addFreePage(arenaIndex(), memory)
; | 424 Heap::getFreePagePool()->addFreePage(arenaIndex(), memory); |
425 } | 425 } |
426 } | 426 } |
427 } | 427 } |
428 | 428 |
429 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); | 429 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); |
430 page->link(&m_firstPage); | 430 page->link(&m_firstPage); |
431 | 431 |
432 ThreadHeap::heapStats().increaseAllocatedSpace(page->size()); | 432 Heap::heapStats().increaseAllocatedSpace(page->size()); |
433 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 433 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
434 // Allow the following addToFreeList() to add the newly allocated memory | 434 // Allow the following addToFreeList() to add the newly allocated memory |
435 // to the free list. | 435 // to the free list. |
436 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 436 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
437 Address address = page->payload(); | 437 Address address = page->payload(); |
438 for (size_t i = 0; i < page->payloadSize(); i++) | 438 for (size_t i = 0; i < page->payloadSize(); i++) |
439 address[i] = reuseAllowedZapValue; | 439 address[i] = reuseAllowedZapValue; |
440 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 440 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
441 #endif | 441 #endif |
442 addToFreeList(page->payload(), page->payloadSize()); | 442 addToFreeList(page->payload(), page->payloadSize()); |
443 } | 443 } |
444 | 444 |
445 void NormalPageArena::freePage(NormalPage* page) | 445 void NormalPageArena::freePage(NormalPage* page) |
446 { | 446 { |
447 ThreadHeap::heapStats().decreaseAllocatedSpace(page->size()); | 447 Heap::heapStats().decreaseAllocatedSpace(page->size()); |
448 | 448 |
449 if (page->terminating()) { | 449 if (page->terminating()) { |
450 // The thread is shutting down and this page is being removed as a part | 450 // The thread is shutting down and this page is being removed as a part |
451 // of the thread local GC. In that case the object could be traced in | 451 // of the thread local GC. In that case the object could be traced in |
452 // the next global GC if there is a dangling pointer from a live thread | 452 // the next global GC if there is a dangling pointer from a live thread |
453 // heap to this dead thread heap. To guard against this, we put the | 453 // heap to this dead thread heap. To guard against this, we put the |
454 // page into the orphaned page pool and zap the page memory. This | 454 // page into the orphaned page pool and zap the page memory. This |
455 // ensures that tracing the dangling pointer in the next global GC just | 455 // ensures that tracing the dangling pointer in the next global GC just |
456 // crashes instead of causing use-after-frees. After the next global | 456 // crashes instead of causing use-after-frees. After the next global |
457 // GC, the orphaned pages are removed. | 457 // GC, the orphaned pages are removed. |
458 ThreadHeap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); | 458 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); |
459 } else { | 459 } else { |
460 PageMemory* memory = page->storage(); | 460 PageMemory* memory = page->storage(); |
461 page->~NormalPage(); | 461 page->~NormalPage(); |
462 ThreadHeap::getFreePagePool()->addFreePage(arenaIndex(), memory); | 462 Heap::getFreePagePool()->addFreePage(arenaIndex(), memory); |
463 } | 463 } |
464 } | 464 } |
465 | 465 |
466 bool NormalPageArena::coalesce() | 466 bool NormalPageArena::coalesce() |
467 { | 467 { |
468 // Don't coalesce arenas if there are not enough promptly freed entries | 468 // Don't coalesce arenas if there are not enough promptly freed entries |
469 // to be coalesced. | 469 // to be coalesced. |
470 // | 470 // |
471 // FIXME: This threshold is determined just to optimize blink_perf | 471 // FIXME: This threshold is determined just to optimize blink_perf |
472 // benchmarks. Coalescing is very sensitive to the threashold and | 472 // benchmarks. Coalescing is very sensitive to the threashold and |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
558 } | 558 } |
559 | 559 |
560 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) | 560 bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) |
561 { | 561 { |
562 // It's possible that Vector requests a smaller expanded size because | 562 // It's possible that Vector requests a smaller expanded size because |
563 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 563 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
564 // size. | 564 // size. |
565 ASSERT(header->checkHeader()); | 565 ASSERT(header->checkHeader()); |
566 if (header->payloadSize() >= newSize) | 566 if (header->payloadSize() >= newSize) |
567 return true; | 567 return true; |
568 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 568 size_t allocationSize = Heap::allocationSizeFromSize(newSize); |
569 ASSERT(allocationSize > header->size()); | 569 ASSERT(allocationSize > header->size()); |
570 size_t expandSize = allocationSize - header->size(); | 570 size_t expandSize = allocationSize - header->size(); |
571 if (isObjectAllocatedAtAllocationPoint(header) && expandSize <= m_remainingA
llocationSize) { | 571 if (isObjectAllocatedAtAllocationPoint(header) && expandSize <= m_remainingA
llocationSize) { |
572 m_currentAllocationPoint += expandSize; | 572 m_currentAllocationPoint += expandSize; |
573 ASSERT(m_remainingAllocationSize >= expandSize); | 573 ASSERT(m_remainingAllocationSize >= expandSize); |
574 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); | 574 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); |
575 // Unpoison the memory used for the object (payload). | 575 // Unpoison the memory used for the object (payload). |
576 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | 576 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); |
577 header->setSize(allocationSize); | 577 header->setSize(allocationSize); |
578 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 578 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); |
579 return true; | 579 return true; |
580 } | 580 } |
581 return false; | 581 return false; |
582 } | 582 } |
583 | 583 |
584 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) | 584 bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) |
585 { | 585 { |
586 ASSERT(header->checkHeader()); | 586 ASSERT(header->checkHeader()); |
587 ASSERT(header->payloadSize() > newSize); | 587 ASSERT(header->payloadSize() > newSize); |
588 size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize); | 588 size_t allocationSize = Heap::allocationSizeFromSize(newSize); |
589 ASSERT(header->size() > allocationSize); | 589 ASSERT(header->size() > allocationSize); |
590 size_t shrinkSize = header->size() - allocationSize; | 590 size_t shrinkSize = header->size() - allocationSize; |
591 if (isObjectAllocatedAtAllocationPoint(header)) { | 591 if (isObjectAllocatedAtAllocationPoint(header)) { |
592 m_currentAllocationPoint -= shrinkSize; | 592 m_currentAllocationPoint -= shrinkSize; |
593 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); | 593 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); |
594 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 594 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); |
595 header->setSize(allocationSize); | 595 header->setSize(allocationSize); |
596 return true; | 596 return true; |
597 } | 597 } |
598 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 598 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); |
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
789 Address LargeObjectArena::doAllocateLargeObjectPage(size_t allocationSize, size_
t gcInfoIndex) | 789 Address LargeObjectArena::doAllocateLargeObjectPage(size_t allocationSize, size_
t gcInfoIndex) |
790 { | 790 { |
791 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; | 791 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; |
792 // If ASan is supported we add allocationGranularity bytes to the allocated | 792 // If ASan is supported we add allocationGranularity bytes to the allocated |
793 // space and poison that to detect overflows | 793 // space and poison that to detect overflows |
794 #if defined(ADDRESS_SANITIZER) | 794 #if defined(ADDRESS_SANITIZER) |
795 largeObjectSize += allocationGranularity; | 795 largeObjectSize += allocationGranularity; |
796 #endif | 796 #endif |
797 | 797 |
798 getThreadState()->shouldFlushHeapDoesNotContainCache(); | 798 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
799 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize, ThreadHeap::g
etRegionTree()); | 799 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize, Heap::getRegi
onTree()); |
800 Address largeObjectAddress = pageMemory->writableStart(); | 800 Address largeObjectAddress = pageMemory->writableStart(); |
801 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize
(); | 801 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize
(); |
802 #if ENABLE(ASSERT) | 802 #if ENABLE(ASSERT) |
803 // Verify that the allocated PageMemory is expectedly zeroed. | 803 // Verify that the allocated PageMemory is expectedly zeroed. |
804 for (size_t i = 0; i < largeObjectSize; ++i) | 804 for (size_t i = 0; i < largeObjectSize; ++i) |
805 ASSERT(!largeObjectAddress[i]); | 805 ASSERT(!largeObjectAddress[i]); |
806 #endif | 806 #endif |
807 ASSERT(gcInfoIndex > 0); | 807 ASSERT(gcInfoIndex > 0); |
808 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | 808 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); |
809 Address result = headerAddress + sizeof(*header); | 809 Address result = headerAddress + sizeof(*header); |
810 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 810 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
811 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); | 811 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); |
812 ASSERT(header->checkHeader()); | 812 ASSERT(header->checkHeader()); |
813 | 813 |
814 // Poison the object header and allocationGranularity bytes after the object | 814 // Poison the object header and allocationGranularity bytes after the object |
815 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 815 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
816 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), a
llocationGranularity); | 816 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), a
llocationGranularity); |
817 | 817 |
818 largeObject->link(&m_firstPage); | 818 largeObject->link(&m_firstPage); |
819 | 819 |
820 ThreadHeap::heapStats().increaseAllocatedSpace(largeObject->size()); | 820 Heap::heapStats().increaseAllocatedSpace(largeObject->size()); |
821 getThreadState()->increaseAllocatedObjectSize(largeObject->size()); | 821 getThreadState()->increaseAllocatedObjectSize(largeObject->size()); |
822 return result; | 822 return result; |
823 } | 823 } |
824 | 824 |
825 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) | 825 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) |
826 { | 826 { |
827 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 827 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
828 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | 828 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); |
829 ThreadHeap::heapStats().decreaseAllocatedSpace(object->size()); | 829 Heap::heapStats().decreaseAllocatedSpace(object->size()); |
830 | 830 |
831 // Unpoison the object header and allocationGranularity bytes after the | 831 // Unpoison the object header and allocationGranularity bytes after the |
832 // object before freeing. | 832 // object before freeing. |
833 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | 833 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); |
834 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), allocatio
nGranularity); | 834 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), allocatio
nGranularity); |
835 | 835 |
836 if (object->terminating()) { | 836 if (object->terminating()) { |
837 ASSERT(ThreadState::current()->isTerminating()); | 837 ASSERT(ThreadState::current()->isTerminating()); |
838 // The thread is shutting down and this page is being removed as a part | 838 // The thread is shutting down and this page is being removed as a part |
839 // of the thread local GC. In that case the object could be traced in | 839 // of the thread local GC. In that case the object could be traced in |
840 // the next global GC if there is a dangling pointer from a live thread | 840 // the next global GC if there is a dangling pointer from a live thread |
841 // heap to this dead thread heap. To guard against this, we put the | 841 // heap to this dead thread heap. To guard against this, we put the |
842 // page into the orphaned page pool and zap the page memory. This | 842 // page into the orphaned page pool and zap the page memory. This |
843 // ensures that tracing the dangling pointer in the next global GC just | 843 // ensures that tracing the dangling pointer in the next global GC just |
844 // crashes instead of causing use-after-frees. After the next global | 844 // crashes instead of causing use-after-frees. After the next global |
845 // GC, the orphaned pages are removed. | 845 // GC, the orphaned pages are removed. |
846 ThreadHeap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), object)
; | 846 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), object); |
847 } else { | 847 } else { |
848 ASSERT(!ThreadState::current()->isTerminating()); | 848 ASSERT(!ThreadState::current()->isTerminating()); |
849 PageMemory* memory = object->storage(); | 849 PageMemory* memory = object->storage(); |
850 object->~LargeObjectPage(); | 850 object->~LargeObjectPage(); |
851 delete memory; | 851 delete memory; |
852 } | 852 } |
853 } | 853 } |
854 | 854 |
855 Address LargeObjectArena::lazySweepPages(size_t allocationSize, size_t gcInfoInd
ex) | 855 Address LargeObjectArena::lazySweepPages(size_t allocationSize, size_t gcInfoInd
ex) |
856 { | 856 { |
(...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1311 if (objectFields[i] != 0) | 1311 if (objectFields[i] != 0) |
1312 return false; | 1312 return false; |
1313 } | 1313 } |
1314 return true; | 1314 return true; |
1315 } | 1315 } |
1316 #endif | 1316 #endif |
1317 | 1317 |
1318 static void markPointer(Visitor* visitor, HeapObjectHeader* header) | 1318 static void markPointer(Visitor* visitor, HeapObjectHeader* header) |
1319 { | 1319 { |
1320 ASSERT(header->checkHeader()); | 1320 ASSERT(header->checkHeader()); |
1321 const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex()); | 1321 const GCInfo* gcInfo = Heap::gcInfo(header->gcInfoIndex()); |
1322 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { | 1322 if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) { |
1323 // We hit this branch when a GC strikes before GarbageCollected<>'s | 1323 // We hit this branch when a GC strikes before GarbageCollected<>'s |
1324 // constructor runs. | 1324 // constructor runs. |
1325 // | 1325 // |
1326 // class A : public GarbageCollected<A> { virtual void f() = 0; }; | 1326 // class A : public GarbageCollected<A> { virtual void f() = 0; }; |
1327 // class B : public A { | 1327 // class B : public A { |
1328 // B() : A(foo()) { }; | 1328 // B() : A(foo()) { }; |
1329 // }; | 1329 // }; |
1330 // | 1330 // |
1331 // If foo() allocates something and triggers a GC, the vtable of A | 1331 // If foo() allocates something and triggers a GC, the vtable of A |
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1561 | 1561 |
1562 m_hasEntries = true; | 1562 m_hasEntries = true; |
1563 size_t index = hash(address); | 1563 size_t index = hash(address); |
1564 ASSERT(!(index & 1)); | 1564 ASSERT(!(index & 1)); |
1565 Address cachePage = roundToBlinkPageStart(address); | 1565 Address cachePage = roundToBlinkPageStart(address); |
1566 m_entries[index + 1] = m_entries[index]; | 1566 m_entries[index + 1] = m_entries[index]; |
1567 m_entries[index] = cachePage; | 1567 m_entries[index] = cachePage; |
1568 } | 1568 } |
1569 | 1569 |
1570 } // namespace blink | 1570 } // namespace blink |
OLD | NEW |