Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 119 ASSERT(!m_firstUnsweptPage); | 119 ASSERT(!m_firstUnsweptPage); |
| 120 } | 120 } |
| 121 | 121 |
| 122 void BaseHeap::cleanupPages() | 122 void BaseHeap::cleanupPages() |
| 123 { | 123 { |
| 124 clearFreeLists(); | 124 clearFreeLists(); |
| 125 | 125 |
| 126 ASSERT(!m_firstUnsweptPage); | 126 ASSERT(!m_firstUnsweptPage); |
| 127 // Add the BaseHeap's pages to the orphanedPagePool. | 127 // Add the BaseHeap's pages to the orphanedPagePool. |
| 128 for (BasePage* page = m_firstPage; page; page = page->next()) { | 128 for (BasePage* page = m_firstPage; page; page = page->next()) { |
| 129 Heap::decreaseAllocatedSpace(page->size()); | 129 threadState()->gcGroup()->heapStats().decreaseAllocatedSpace(page->size( )); |
| 130 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 130 threadState()->gcGroup()->orphanedPagePool()->addOrphanedPage(heapIndex( ), page); |
| 131 } | 131 } |
| 132 m_firstPage = nullptr; | 132 m_firstPage = nullptr; |
| 133 } | 133 } |
| 134 | 134 |
| 135 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI nfo& info) | 135 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI nfo& info) |
| 136 { | 136 { |
| 137 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" | 137 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" |
| 138 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); | 138 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); |
| 139 size_t pageIndex = 0; | 139 size_t pageIndex = 0; |
| 140 size_t heapTotalFreeSize = 0; | 140 size_t heapTotalFreeSize = 0; |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 294 // lazySweepPages(). This check prevents the sweeping from being executed | 294 // lazySweepPages(). This check prevents the sweeping from being executed |
| 295 // recursively. | 295 // recursively. |
| 296 if (threadState()->sweepForbidden()) | 296 if (threadState()->sweepForbidden()) |
| 297 return nullptr; | 297 return nullptr; |
| 298 | 298 |
| 299 TRACE_EVENT0("blink_gc", "BaseHeap::lazySweepPages"); | 299 TRACE_EVENT0("blink_gc", "BaseHeap::lazySweepPages"); |
| 300 ThreadState::SweepForbiddenScope sweepForbidden(threadState()); | 300 ThreadState::SweepForbiddenScope sweepForbidden(threadState()); |
| 301 ScriptForbiddenIfMainThreadScope scriptForbidden; | 301 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 302 | 302 |
| 303 double startTime = WTF::currentTimeMS(); | 303 double startTime = WTF::currentTimeMS(); |
| 304 Address result = lazySweepPages(allocationSize, gcInfoIndex); | 304 Address result = lazySweepPages(allocationSize, gcInfoIndex, threadState()-> gcGroup()); |
| 305 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); | 305 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); |
| 306 Heap::reportMemoryUsageForTracing(); | 306 Heap::reportMemoryUsageForTracing(); |
| 307 | 307 |
| 308 return result; | 308 return result; |
| 309 } | 309 } |
| 310 | 310 |
| 311 void BaseHeap::sweepUnsweptPage() | 311 void BaseHeap::sweepUnsweptPage() |
| 312 { | 312 { |
| 313 BasePage* page = m_firstUnsweptPage; | 313 BasePage* page = m_firstUnsweptPage; |
| 314 if (page->isEmpty()) { | 314 if (page->isEmpty()) { |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 412 if (m_freeList.takeSnapshot(dumpName)) { | 412 if (m_freeList.takeSnapshot(dumpName)) { |
| 413 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | 413 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); |
| 414 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | 414 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); |
| 415 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid()); | 415 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid()); |
| 416 } | 416 } |
| 417 } | 417 } |
| 418 | 418 |
| 419 void NormalPageHeap::allocatePage() | 419 void NormalPageHeap::allocatePage() |
| 420 { | 420 { |
| 421 threadState()->shouldFlushHeapDoesNotContainCache(); | 421 threadState()->shouldFlushHeapDoesNotContainCache(); |
| 422 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); | 422 PageMemory* pageMemory = threadState()->gcGroup()->freePagePool()->takeFreeP age(heapIndex()); |
| 423 | 423 |
| 424 if (!pageMemory) { | 424 if (!pageMemory) { |
| 425 // Allocate a memory region for blinkPagesPerRegion pages that | 425 // Allocate a memory region for blinkPagesPerRegion pages that |
| 426 // will each have the following layout. | 426 // will each have the following layout. |
| 427 // | 427 // |
| 428 // [ guard os page | ... payload ... | guard os page ] | 428 // [ guard os page | ... payload ... | guard os page ] |
| 429 // ^---{ aligned to blink page size } | 429 // ^---{ aligned to blink page size } |
| 430 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); | 430 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(threadS tate()->gcGroup()); |
| 431 | 431 |
| 432 // Setup the PageMemory object for each of the pages in the region. | 432 // Setup the PageMemory object for each of the pages in the region. |
| 433 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | 433 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { |
| 434 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize()); | 434 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize()); |
| 435 // Take the first possible page ensuring that this thread actually | 435 // Take the first possible page ensuring that this thread actually |
| 436 // gets a page and add the rest to the page pool. | 436 // gets a page and add the rest to the page pool. |
| 437 if (!pageMemory) { | 437 if (!pageMemory) { |
| 438 bool result = memory->commit(); | 438 bool result = memory->commit(); |
| 439 // If you hit the ASSERT, it will mean that you're hitting | 439 // If you hit the ASSERT, it will mean that you're hitting |
| 440 // the limit of the number of mmapped regions OS can support | 440 // the limit of the number of mmapped regions OS can support |
| 441 // (e.g., /proc/sys/vm/max_map_count in Linux). | 441 // (e.g., /proc/sys/vm/max_map_count in Linux). |
| 442 RELEASE_ASSERT(result); | 442 RELEASE_ASSERT(result); |
| 443 pageMemory = memory; | 443 pageMemory = memory; |
| 444 } else { | 444 } else { |
| 445 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 445 threadState()->gcGroup()->freePagePool()->addFreePage(heapIndex( ), memory); |
| 446 } | 446 } |
| 447 } | 447 } |
| 448 } | 448 } |
| 449 | 449 |
| 450 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); | 450 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
| 451 page->link(&m_firstPage); | 451 page->link(&m_firstPage); |
| 452 | 452 |
| 453 Heap::increaseAllocatedSpace(page->size()); | 453 threadState()->gcGroup()->heapStats().increaseAllocatedSpace(page->size()); |
| 454 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 454 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 455 // Allow the following addToFreeList() to add the newly allocated memory | 455 // Allow the following addToFreeList() to add the newly allocated memory |
| 456 // to the free list. | 456 // to the free list. |
| 457 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 457 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
| 458 Address address = page->payload(); | 458 Address address = page->payload(); |
| 459 for (size_t i = 0; i < page->payloadSize(); i++) | 459 for (size_t i = 0; i < page->payloadSize(); i++) |
| 460 address[i] = reuseAllowedZapValue; | 460 address[i] = reuseAllowedZapValue; |
| 461 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); | 461 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); |
| 462 #endif | 462 #endif |
| 463 addToFreeList(page->payload(), page->payloadSize()); | 463 addToFreeList(page->payload(), page->payloadSize()); |
| 464 } | 464 } |
| 465 | 465 |
| 466 void NormalPageHeap::freePage(NormalPage* page) | 466 void NormalPageHeap::freePage(NormalPage* page) |
| 467 { | 467 { |
| 468 Heap::decreaseAllocatedSpace(page->size()); | 468 threadState()->gcGroup()->heapStats().decreaseAllocatedSpace(page->size()); |
| 469 | 469 |
| 470 if (page->terminating()) { | 470 if (page->terminating()) { |
| 471 // The thread is shutting down and this page is being removed as a part | 471 // The thread is shutting down and this page is being removed as a part |
| 472 // of the thread local GC. In that case the object could be traced in | 472 // of the thread local GC. In that case the object could be traced in |
| 473 // the next global GC if there is a dangling pointer from a live thread | 473 // the next global GC if there is a dangling pointer from a live thread |
| 474 // heap to this dead thread heap. To guard against this, we put the | 474 // heap to this dead thread heap. To guard against this, we put the |
| 475 // page into the orphaned page pool and zap the page memory. This | 475 // page into the orphaned page pool and zap the page memory. This |
| 476 // ensures that tracing the dangling pointer in the next global GC just | 476 // ensures that tracing the dangling pointer in the next global GC just |
| 477 // crashes instead of causing use-after-frees. After the next global | 477 // crashes instead of causing use-after-frees. After the next global |
| 478 // GC, the orphaned pages are removed. | 478 // GC, the orphaned pages are removed. |
| 479 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 479 threadState()->gcGroup()->orphanedPagePool()->addOrphanedPage(heapIndex( ), page); |
| 480 } else { | 480 } else { |
| 481 PageMemory* memory = page->storage(); | 481 PageMemory* memory = page->storage(); |
| 482 page->~NormalPage(); | 482 page->~NormalPage(); |
| 483 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 483 threadState()->gcGroup()->freePagePool()->addFreePage(heapIndex(), memor y); |
| 484 } | 484 } |
| 485 } | 485 } |
| 486 | 486 |
| 487 bool NormalPageHeap::coalesce() | 487 bool NormalPageHeap::coalesce() |
| 488 { | 488 { |
| 489 // Don't coalesce heaps if there are not enough promptly freed entries | 489 // Don't coalesce heaps if there are not enough promptly freed entries |
| 490 // to be coalesced. | 490 // to be coalesced. |
| 491 // | 491 // |
| 492 // FIXME: This threshold is determined just to optimize blink_perf | 492 // FIXME: This threshold is determined just to optimize blink_perf |
| 493 // benchmarks. Coalescing is very sensitive to the threashold and | 493 // benchmarks. Coalescing is very sensitive to the threashold and |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 538 if (startOfGap != headerAddress) | 538 if (startOfGap != headerAddress) |
| 539 addToFreeList(startOfGap, headerAddress - startOfGap); | 539 addToFreeList(startOfGap, headerAddress - startOfGap); |
| 540 | 540 |
| 541 headerAddress += size; | 541 headerAddress += size; |
| 542 startOfGap = headerAddress; | 542 startOfGap = headerAddress; |
| 543 } | 543 } |
| 544 | 544 |
| 545 if (startOfGap != page->payloadEnd()) | 545 if (startOfGap != page->payloadEnd()) |
| 546 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 546 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
| 547 } | 547 } |
| 548 Heap::decreaseAllocatedObjectSize(freedSize); | 548 threadState()->gcGroup()->heapStats().decreaseAllocatedObjectSize(freedSize) ; |
| 549 ASSERT(m_promptlyFreedSize == freedSize); | 549 ASSERT(m_promptlyFreedSize == freedSize); |
| 550 m_promptlyFreedSize = 0; | 550 m_promptlyFreedSize = 0; |
| 551 return true; | 551 return true; |
| 552 } | 552 } |
| 553 | 553 |
| 554 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) | 554 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) |
| 555 { | 555 { |
| 556 ASSERT(!threadState()->sweepForbidden()); | 556 ASSERT(!threadState()->sweepForbidden()); |
| 557 ASSERT(header->checkHeader()); | 557 ASSERT(header->checkHeader()); |
| 558 Address address = reinterpret_cast<Address>(header); | 558 Address address = reinterpret_cast<Address>(header); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 621 Address shrinkAddress = header->payloadEnd() - shrinkSize; | 621 Address shrinkAddress = header->payloadEnd() - shrinkSize; |
| 622 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex()); | 622 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade r(shrinkSize, header->gcInfoIndex()); |
| 623 freedHeader->markPromptlyFreed(); | 623 freedHeader->markPromptlyFreed(); |
| 624 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); | 624 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); |
| 625 m_promptlyFreedSize += shrinkSize; | 625 m_promptlyFreedSize += shrinkSize; |
| 626 header->setSize(allocationSize); | 626 header->setSize(allocationSize); |
| 627 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); | 627 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); |
| 628 return false; | 628 return false; |
| 629 } | 629 } |
| 630 | 630 |
| 631 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex ) | 631 Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex , GCGroup*) |
|
haraken
2016/01/28 15:52:49
The GCGroup parameter looks unnecessary.
keishi
2016/02/29 06:02:33
Done.
| |
| 632 { | 632 { |
| 633 ASSERT(!hasCurrentAllocationArea()); | 633 ASSERT(!hasCurrentAllocationArea()); |
| 634 Address result = nullptr; | 634 Address result = nullptr; |
| 635 while (m_firstUnsweptPage) { | 635 while (m_firstUnsweptPage) { |
| 636 BasePage* page = m_firstUnsweptPage; | 636 BasePage* page = m_firstUnsweptPage; |
| 637 if (page->isEmpty()) { | 637 if (page->isEmpty()) { |
| 638 page->unlink(&m_firstUnsweptPage); | 638 page->unlink(&m_firstUnsweptPage); |
| 639 page->removeFromHeap(); | 639 page->removeFromHeap(); |
| 640 } else { | 640 } else { |
| 641 // Sweep a page and move the page from m_firstUnsweptPages to | 641 // Sweep a page and move the page from m_firstUnsweptPages to |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 656 } | 656 } |
| 657 | 657 |
| 658 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e) | 658 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e) |
| 659 { | 659 { |
| 660 m_remainingAllocationSize = newRemainingAllocationSize; | 660 m_remainingAllocationSize = newRemainingAllocationSize; |
| 661 | 661 |
| 662 // Sync recorded allocated-object size: | 662 // Sync recorded allocated-object size: |
| 663 // - if previous alloc checkpoint is larger, allocation size has increased. | 663 // - if previous alloc checkpoint is larger, allocation size has increased. |
| 664 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize(). | 664 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize(). |
| 665 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) | 665 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) |
| 666 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_rema iningAllocationSize); | 666 threadState()->gcGroup()->heapStats().increaseAllocatedObjectSize(m_last RemainingAllocationSize - m_remainingAllocationSize); |
| 667 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) | 667 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) |
| 668 Heap::decreaseAllocatedObjectSize(m_remainingAllocationSize - m_lastRema iningAllocationSize); | 668 threadState()->gcGroup()->heapStats().decreaseAllocatedObjectSize(m_rema iningAllocationSize - m_lastRemainingAllocationSize); |
| 669 m_lastRemainingAllocationSize = m_remainingAllocationSize; | 669 m_lastRemainingAllocationSize = m_remainingAllocationSize; |
| 670 } | 670 } |
| 671 | 671 |
| 672 void NormalPageHeap::updateRemainingAllocationSize() | 672 void NormalPageHeap::updateRemainingAllocationSize() |
| 673 { | 673 { |
| 674 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 674 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 675 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); | 675 threadState()->gcGroup()->heapStats().increaseAllocatedObjectSize(m_last RemainingAllocationSize - remainingAllocationSize()); |
| 676 m_lastRemainingAllocationSize = remainingAllocationSize(); | 676 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 677 } | 677 } |
| 678 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 678 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 679 } | 679 } |
| 680 | 680 |
| 681 void NormalPageHeap::setAllocationPoint(Address point, size_t size) | 681 void NormalPageHeap::setAllocationPoint(Address point, size_t size) |
| 682 { | 682 { |
| 683 #if ENABLE(ASSERT) | 683 #if ENABLE(ASSERT) |
| 684 if (point) { | 684 if (point) { |
| 685 ASSERT(size); | 685 ASSERT(size); |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 699 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn dex) | 699 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn dex) |
| 700 { | 700 { |
| 701 ASSERT(allocationSize > remainingAllocationSize()); | 701 ASSERT(allocationSize > remainingAllocationSize()); |
| 702 ASSERT(allocationSize >= allocationGranularity); | 702 ASSERT(allocationSize >= allocationGranularity); |
| 703 | 703 |
| 704 // 1. If this allocation is big enough, allocate a large object. | 704 // 1. If this allocation is big enough, allocate a large object. |
| 705 if (allocationSize >= largeObjectSizeThreshold) { | 705 if (allocationSize >= largeObjectSizeThreshold) { |
| 706 // TODO(sof): support eagerly finalized large objects, if ever needed. | 706 // TODO(sof): support eagerly finalized large objects, if ever needed. |
| 707 RELEASE_ASSERT(heapIndex() != BlinkGC::EagerSweepHeapIndex); | 707 RELEASE_ASSERT(heapIndex() != BlinkGC::EagerSweepHeapIndex); |
| 708 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(BlinkGC::LargeObjectHeapIndex)); | 708 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(BlinkGC::LargeObjectHeapIndex)); |
| 709 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex); | 709 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex, threadState()->gcGroup()); |
| 710 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); | 710 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); |
| 711 return largeObject; | 711 return largeObject; |
| 712 } | 712 } |
| 713 | 713 |
| 714 // 2. Try to allocate from a free list. | 714 // 2. Try to allocate from a free list. |
| 715 updateRemainingAllocationSize(); | 715 updateRemainingAllocationSize(); |
| 716 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | 716 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 717 if (result) | 717 if (result) |
| 718 return result; | 718 return result; |
| 719 | 719 |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 778 } | 778 } |
| 779 m_freeList.m_biggestFreeListIndex = index; | 779 m_freeList.m_biggestFreeListIndex = index; |
| 780 return nullptr; | 780 return nullptr; |
| 781 } | 781 } |
| 782 | 782 |
| 783 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) | 783 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) |
| 784 : BaseHeap(state, index) | 784 : BaseHeap(state, index) |
| 785 { | 785 { |
| 786 } | 786 } |
| 787 | 787 |
| 788 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g cInfoIndex) | 788 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g cInfoIndex, GCGroup* gcGroup) |
|
haraken
2016/01/28 15:52:50
Remove the GCGroup parameter.
keishi
2016/02/29 06:02:33
Done.
| |
| 789 { | 789 { |
| 790 // Caller already added space for object header and rounded up to allocation | 790 // Caller already added space for object header and rounded up to allocation |
| 791 // alignment | 791 // alignment |
| 792 ASSERT(!(allocationSize & allocationMask)); | 792 ASSERT(!(allocationSize & allocationMask)); |
| 793 | 793 |
| 794 // 1. Try to sweep large objects more than allocationSize bytes | 794 // 1. Try to sweep large objects more than allocationSize bytes |
| 795 // before allocating a new large object. | 795 // before allocating a new large object. |
| 796 Address result = lazySweep(allocationSize, gcInfoIndex); | 796 Address result = lazySweep(allocationSize, gcInfoIndex); |
| 797 if (result) | 797 if (result) |
| 798 return result; | 798 return result; |
| 799 | 799 |
| 800 // 2. If we have failed in sweeping allocationSize bytes, | 800 // 2. If we have failed in sweeping allocationSize bytes, |
| 801 // we complete sweeping before allocating this large object. | 801 // we complete sweeping before allocating this large object. |
| 802 threadState()->completeSweep(); | 802 threadState()->completeSweep(); |
| 803 | 803 |
| 804 // 3. Check if we should trigger a GC. | 804 // 3. Check if we should trigger a GC. |
| 805 threadState()->scheduleGCIfNeeded(); | 805 threadState()->scheduleGCIfNeeded(); |
| 806 | 806 |
| 807 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 807 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex, gcGroup); |
| 808 } | 808 } |
| 809 | 809 |
| 810 Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t gcInfoIndex) | 810 Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t gcInfoIndex, GCGroup* gcGroup) |
|
haraken
2016/01/28 15:52:49
Remove the GCGroup parameter.
keishi
2016/02/29 06:02:33
Done.
| |
| 811 { | 811 { |
| 812 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; | 812 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; |
| 813 // If ASan is supported we add allocationGranularity bytes to the allocated | 813 // If ASan is supported we add allocationGranularity bytes to the allocated |
| 814 // space and poison that to detect overflows | 814 // space and poison that to detect overflows |
| 815 #if defined(ADDRESS_SANITIZER) | 815 #if defined(ADDRESS_SANITIZER) |
| 816 largeObjectSize += allocationGranularity; | 816 largeObjectSize += allocationGranularity; |
| 817 #endif | 817 #endif |
| 818 | 818 |
| 819 threadState()->shouldFlushHeapDoesNotContainCache(); | 819 threadState()->shouldFlushHeapDoesNotContainCache(); |
| 820 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | 820 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize, gcGroup); |
|
haraken
2016/01/28 15:52:49
And use threadState()->gcGroup().
keishi
2016/02/29 06:02:33
Done.
| |
| 821 Address largeObjectAddress = pageMemory->writableStart(); | 821 Address largeObjectAddress = pageMemory->writableStart(); |
| 822 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize (); | 822 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize (); |
| 823 #if ENABLE(ASSERT) | 823 #if ENABLE(ASSERT) |
| 824 // Verify that the allocated PageMemory is expectedly zeroed. | 824 // Verify that the allocated PageMemory is expectedly zeroed. |
| 825 for (size_t i = 0; i < largeObjectSize; ++i) | 825 for (size_t i = 0; i < largeObjectSize; ++i) |
| 826 ASSERT(!largeObjectAddress[i]); | 826 ASSERT(!largeObjectAddress[i]); |
| 827 #endif | 827 #endif |
| 828 ASSERT(gcInfoIndex > 0); | 828 ASSERT(gcInfoIndex > 0); |
| 829 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); | 829 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); |
| 830 Address result = headerAddress + sizeof(*header); | 830 Address result = headerAddress + sizeof(*header); |
| 831 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 831 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 832 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); | 832 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); |
| 833 ASSERT(header->checkHeader()); | 833 ASSERT(header->checkHeader()); |
| 834 | 834 |
| 835 // Poison the object header and allocationGranularity bytes after the object | 835 // Poison the object header and allocationGranularity bytes after the object |
| 836 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 836 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 837 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); | 837 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); |
| 838 | 838 |
| 839 largeObject->link(&m_firstPage); | 839 largeObject->link(&m_firstPage); |
| 840 | 840 |
| 841 Heap::increaseAllocatedSpace(largeObject->size()); | 841 threadState()->gcGroup()->heapStats().increaseAllocatedSpace(largeObject->si ze()); |
| 842 Heap::increaseAllocatedObjectSize(largeObject->size()); | 842 threadState()->gcGroup()->heapStats().increaseAllocatedObjectSize(largeObjec t->size()); |
| 843 return result; | 843 return result; |
| 844 } | 844 } |
| 845 | 845 |
| 846 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) | 846 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) |
| 847 { | 847 { |
| 848 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 848 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
| 849 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); | 849 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); |
| 850 Heap::decreaseAllocatedSpace(object->size()); | 850 threadState()->gcGroup()->heapStats().decreaseAllocatedSpace(object->size()) ; |
| 851 | 851 |
| 852 // Unpoison the object header and allocationGranularity bytes after the | 852 // Unpoison the object header and allocationGranularity bytes after the |
| 853 // object before freeing. | 853 // object before freeing. |
| 854 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der)); | 854 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der)); |
| 855 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); | 855 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); |
| 856 | 856 |
| 857 if (object->terminating()) { | 857 if (object->terminating()) { |
| 858 ASSERT(ThreadState::current()->isTerminating()); | 858 ASSERT(ThreadState::current()->isTerminating()); |
| 859 // The thread is shutting down and this page is being removed as a part | 859 // The thread is shutting down and this page is being removed as a part |
| 860 // of the thread local GC. In that case the object could be traced in | 860 // of the thread local GC. In that case the object could be traced in |
| 861 // the next global GC if there is a dangling pointer from a live thread | 861 // the next global GC if there is a dangling pointer from a live thread |
| 862 // heap to this dead thread heap. To guard against this, we put the | 862 // heap to this dead thread heap. To guard against this, we put the |
| 863 // page into the orphaned page pool and zap the page memory. This | 863 // page into the orphaned page pool and zap the page memory. This |
| 864 // ensures that tracing the dangling pointer in the next global GC just | 864 // ensures that tracing the dangling pointer in the next global GC just |
| 865 // crashes instead of causing use-after-frees. After the next global | 865 // crashes instead of causing use-after-frees. After the next global |
| 866 // GC, the orphaned pages are removed. | 866 // GC, the orphaned pages are removed. |
| 867 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); | 867 threadState()->gcGroup()->orphanedPagePool()->addOrphanedPage(heapIndex( ), object); |
| 868 } else { | 868 } else { |
| 869 ASSERT(!ThreadState::current()->isTerminating()); | 869 ASSERT(!ThreadState::current()->isTerminating()); |
| 870 PageMemory* memory = object->storage(); | 870 PageMemory* memory = object->storage(); |
| 871 object->~LargeObjectPage(); | 871 object->~LargeObjectPage(); |
| 872 delete memory; | 872 delete memory; |
| 873 } | 873 } |
| 874 } | 874 } |
| 875 | 875 |
| 876 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x) | 876 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x, GCGroup* gcGroup) |
|
haraken
2016/01/28 15:52:49
Remove the GCGroup parameter.
keishi
2016/02/29 06:02:33
Done.
| |
| 877 { | 877 { |
| 878 Address result = nullptr; | 878 Address result = nullptr; |
| 879 size_t sweptSize = 0; | 879 size_t sweptSize = 0; |
| 880 while (m_firstUnsweptPage) { | 880 while (m_firstUnsweptPage) { |
| 881 BasePage* page = m_firstUnsweptPage; | 881 BasePage* page = m_firstUnsweptPage; |
| 882 if (page->isEmpty()) { | 882 if (page->isEmpty()) { |
| 883 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + si zeof(HeapObjectHeader); | 883 sweptSize += static_cast<LargeObjectPage*>(page)->payloadSize() + si zeof(HeapObjectHeader); |
| 884 page->unlink(&m_firstUnsweptPage); | 884 page->unlink(&m_firstUnsweptPage); |
| 885 page->removeFromHeap(); | 885 page->removeFromHeap(); |
| 886 // For LargeObjectPage, stop lazy sweeping once we have swept | 886 // For LargeObjectPage, stop lazy sweeping once we have swept |
| 887 // more than allocationSize bytes. | 887 // more than allocationSize bytes. |
| 888 if (sweptSize >= allocationSize) { | 888 if (sweptSize >= allocationSize) { |
| 889 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 889 result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex, gcGroup); |
| 890 ASSERT(result); | 890 ASSERT(result); |
| 891 break; | 891 break; |
| 892 } | 892 } |
| 893 } else { | 893 } else { |
| 894 // Sweep a page and move the page from m_firstUnsweptPages to | 894 // Sweep a page and move the page from m_firstUnsweptPages to |
| 895 // m_firstPages. | 895 // m_firstPages. |
| 896 page->sweep(); | 896 page->sweep(); |
| 897 page->unlink(&m_firstUnsweptPage); | 897 page->unlink(&m_firstUnsweptPage); |
| 898 page->link(&m_firstPage); | 898 page->link(&m_firstPage); |
| 899 page->markAsSwept(); | 899 page->markAsSwept(); |
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1150 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); | 1150 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); |
| 1151 header->unmark(); | 1151 header->unmark(); |
| 1152 headerAddress += header->size(); | 1152 headerAddress += header->size(); |
| 1153 markedObjectSize += header->size(); | 1153 markedObjectSize += header->size(); |
| 1154 startOfGap = headerAddress; | 1154 startOfGap = headerAddress; |
| 1155 } | 1155 } |
| 1156 if (startOfGap != payloadEnd()) | 1156 if (startOfGap != payloadEnd()) |
| 1157 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); | 1157 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); |
| 1158 | 1158 |
| 1159 if (markedObjectSize) | 1159 if (markedObjectSize) |
| 1160 Heap::increaseMarkedObjectSize(markedObjectSize); | 1160 ThreadState::current()->gcGroup()->heapStats().increaseMarkedObjectSize( markedObjectSize); |
|
haraken
2016/01/28 15:52:50
heap()->threadState()
keishi
2016/02/29 06:02:33
Done.
| |
| 1161 } | 1161 } |
| 1162 | 1162 |
| 1163 void NormalPage::makeConsistentForGC() | 1163 void NormalPage::makeConsistentForGC() |
| 1164 { | 1164 { |
| 1165 size_t markedObjectSize = 0; | 1165 size_t markedObjectSize = 0; |
| 1166 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1166 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1167 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); | 1167 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); |
| 1168 ASSERT(header->size() < blinkPagePayloadSize()); | 1168 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1169 // Check if a free list entry first since we cannot call | 1169 // Check if a free list entry first since we cannot call |
| 1170 // isMarked on a free list entry. | 1170 // isMarked on a free list entry. |
| 1171 if (header->isFree()) { | 1171 if (header->isFree()) { |
| 1172 headerAddress += header->size(); | 1172 headerAddress += header->size(); |
| 1173 continue; | 1173 continue; |
| 1174 } | 1174 } |
| 1175 ASSERT(header->checkHeader()); | 1175 ASSERT(header->checkHeader()); |
| 1176 if (header->isMarked()) { | 1176 if (header->isMarked()) { |
| 1177 header->unmark(); | 1177 header->unmark(); |
| 1178 markedObjectSize += header->size(); | 1178 markedObjectSize += header->size(); |
| 1179 } else { | 1179 } else { |
| 1180 header->markDead(); | 1180 header->markDead(); |
| 1181 } | 1181 } |
| 1182 headerAddress += header->size(); | 1182 headerAddress += header->size(); |
| 1183 } | 1183 } |
| 1184 if (markedObjectSize) | 1184 if (markedObjectSize) |
| 1185 Heap::increaseMarkedObjectSize(markedObjectSize); | 1185 ThreadState::current()->gcGroup()->heapStats().increaseMarkedObjectSize( markedObjectSize); |
|
haraken
2016/01/28 15:52:49
heap()->threadState()
keishi
2016/02/29 06:02:33
Done.
| |
| 1186 } | 1186 } |
| 1187 | 1187 |
| 1188 void NormalPage::makeConsistentForMutator() | 1188 void NormalPage::makeConsistentForMutator() |
| 1189 { | 1189 { |
| 1190 Address startOfGap = payload(); | 1190 Address startOfGap = payload(); |
| 1191 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1191 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1192 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); | 1192 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); |
| 1193 size_t size = header->size(); | 1193 size_t size = header->size(); |
| 1194 ASSERT(size < blinkPagePayloadSize()); | 1194 ASSERT(size < blinkPagePayloadSize()); |
| 1195 if (header->isPromptlyFreed()) | 1195 if (header->isPromptlyFreed()) |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1442 } | 1442 } |
| 1443 | 1443 |
| 1444 void LargeObjectPage::removeFromHeap() | 1444 void LargeObjectPage::removeFromHeap() |
| 1445 { | 1445 { |
| 1446 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); | 1446 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); |
| 1447 } | 1447 } |
| 1448 | 1448 |
| 1449 void LargeObjectPage::sweep() | 1449 void LargeObjectPage::sweep() |
| 1450 { | 1450 { |
| 1451 heapObjectHeader()->unmark(); | 1451 heapObjectHeader()->unmark(); |
| 1452 Heap::increaseMarkedObjectSize(size()); | 1452 ThreadState::current()->gcGroup()->heapStats().increaseMarkedObjectSize(size ()); |
|
haraken
2016/01/28 15:52:49
heap()->threadState()
keishi
2016/02/29 06:02:33
Done.
| |
| 1453 } | 1453 } |
| 1454 | 1454 |
| 1455 void LargeObjectPage::makeConsistentForGC() | 1455 void LargeObjectPage::makeConsistentForGC() |
| 1456 { | 1456 { |
| 1457 HeapObjectHeader* header = heapObjectHeader(); | 1457 HeapObjectHeader* header = heapObjectHeader(); |
| 1458 if (header->isMarked()) { | 1458 if (header->isMarked()) { |
| 1459 header->unmark(); | 1459 header->unmark(); |
| 1460 Heap::increaseMarkedObjectSize(size()); | 1460 ThreadState::current()->gcGroup()->heapStats().increaseMarkedObjectSize( size()); |
|
haraken
2016/01/28 15:52:49
heap()->threadState()
keishi
2016/02/29 06:02:33
Done.
| |
| 1461 } else { | 1461 } else { |
| 1462 header->markDead(); | 1462 header->markDead(); |
| 1463 } | 1463 } |
| 1464 } | 1464 } |
| 1465 | 1465 |
| 1466 void LargeObjectPage::makeConsistentForMutator() | 1466 void LargeObjectPage::makeConsistentForMutator() |
| 1467 { | 1467 { |
| 1468 HeapObjectHeader* header = heapObjectHeader(); | 1468 HeapObjectHeader* header = heapObjectHeader(); |
| 1469 if (header->isMarked()) | 1469 if (header->isMarked()) |
| 1470 header->unmark(); | 1470 header->unmark(); |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1573 | 1573 |
| 1574 m_hasEntries = true; | 1574 m_hasEntries = true; |
| 1575 size_t index = hash(address); | 1575 size_t index = hash(address); |
| 1576 ASSERT(!(index & 1)); | 1576 ASSERT(!(index & 1)); |
| 1577 Address cachePage = roundToBlinkPageStart(address); | 1577 Address cachePage = roundToBlinkPageStart(address); |
| 1578 m_entries[index + 1] = m_entries[index]; | 1578 m_entries[index + 1] = m_entries[index]; |
| 1579 m_entries[index] = cachePage; | 1579 m_entries[index] = cachePage; |
| 1580 } | 1580 } |
| 1581 | 1581 |
| 1582 } // namespace blink | 1582 } // namespace blink |
| OLD | NEW |