Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 ASSERT(!m_firstUnsweptPage); 120 ASSERT(!m_firstUnsweptPage);
121 } 121 }
122 122
123 void BaseHeap::cleanupPages() 123 void BaseHeap::cleanupPages()
124 { 124 {
125 clearFreeLists(); 125 clearFreeLists();
126 126
127 ASSERT(!m_firstUnsweptPage); 127 ASSERT(!m_firstUnsweptPage);
128 // Add the BaseHeap's pages to the orphanedPagePool. 128 // Add the BaseHeap's pages to the orphanedPagePool.
129 for (BasePage* page = m_firstPage; page; page = page->next()) { 129 for (BasePage* page = m_firstPage; page; page = page->next()) {
130 Heap::decreaseAllocatedSpace(page->size()); 130 ThreadState::current()->decreaseAllocatedSpace(page->size());
131 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); 131 ThreadState::current()->orphanedPagePool()->addOrphanedPage(heapIndex(), page);
132 } 132 }
133 m_firstPage = nullptr; 133 m_firstPage = nullptr;
134 } 134 }
135 135
136 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI nfo& info) 136 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI nfo& info)
137 { 137 {
138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" 138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName"
139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); 139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName);
140 size_t pageIndex = 0; 140 size_t pageIndex = 0;
141 size_t heapTotalFreeSize = 0; 141 size_t heapTotalFreeSize = 0;
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 299
300 if (threadState()->isMainThread()) 300 if (threadState()->isMainThread())
301 ScriptForbiddenScope::enter(); 301 ScriptForbiddenScope::enter();
302 302
303 Address result = lazySweepPages(allocationSize, gcInfoIndex); 303 Address result = lazySweepPages(allocationSize, gcInfoIndex);
304 304
305 if (threadState()->isMainThread()) 305 if (threadState()->isMainThread())
306 ScriptForbiddenScope::exit(); 306 ScriptForbiddenScope::exit();
307 307
308 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); 308 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime);
309 Heap::reportMemoryUsageForTracing(); 309 threadState()->reportMemoryUsageForTracing();
310 310
311 return result; 311 return result;
312 } 312 }
313 313
314 void BaseHeap::sweepUnsweptPage() 314 void BaseHeap::sweepUnsweptPage()
315 { 315 {
316 BasePage* page = m_firstUnsweptPage; 316 BasePage* page = m_firstUnsweptPage;
317 if (page->isEmpty()) { 317 if (page->isEmpty()) {
318 page->unlink(&m_firstUnsweptPage); 318 page->unlink(&m_firstUnsweptPage);
319 page->removeFromHeap(); 319 page->removeFromHeap();
(...skipping 17 matching lines...) Expand all
337 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 337 RELEASE_ASSERT(threadState()->isSweepingInProgress());
338 ASSERT(threadState()->sweepForbidden()); 338 ASSERT(threadState()->sweepForbidden());
339 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden()); 339 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden());
340 340
341 int pageCount = 1; 341 int pageCount = 1;
342 while (m_firstUnsweptPage) { 342 while (m_firstUnsweptPage) {
343 sweepUnsweptPage(); 343 sweepUnsweptPage();
344 if (pageCount % deadlineCheckInterval == 0) { 344 if (pageCount % deadlineCheckInterval == 0) {
345 if (deadlineSeconds <= Platform::current()->monotonicallyIncreasingT imeSeconds()) { 345 if (deadlineSeconds <= Platform::current()->monotonicallyIncreasingT imeSeconds()) {
346 // Deadline has come. 346 // Deadline has come.
347 Heap::reportMemoryUsageForTracing(); 347 threadState()->reportMemoryUsageForTracing();
348 return !m_firstUnsweptPage; 348 return !m_firstUnsweptPage;
349 } 349 }
350 } 350 }
351 pageCount++; 351 pageCount++;
352 } 352 }
353 Heap::reportMemoryUsageForTracing(); 353 threadState()->reportMemoryUsageForTracing();
354 return true; 354 return true;
355 } 355 }
356 356
357 void BaseHeap::completeSweep() 357 void BaseHeap::completeSweep()
358 { 358 {
359 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 359 RELEASE_ASSERT(threadState()->isSweepingInProgress());
360 ASSERT(threadState()->sweepForbidden()); 360 ASSERT(threadState()->sweepForbidden());
361 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden()); 361 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden());
362 362
363 while (m_firstUnsweptPage) { 363 while (m_firstUnsweptPage) {
364 sweepUnsweptPage(); 364 sweepUnsweptPage();
365 } 365 }
366 Heap::reportMemoryUsageForTracing(); 366 threadState()->reportMemoryUsageForTracing();
367 } 367 }
368 368
369 NormalPageHeap::NormalPageHeap(ThreadState* state, int index) 369 NormalPageHeap::NormalPageHeap(ThreadState* state, int index)
370 : BaseHeap(state, index) 370 : BaseHeap(state, index)
371 , m_currentAllocationPoint(nullptr) 371 , m_currentAllocationPoint(nullptr)
372 , m_remainingAllocationSize(0) 372 , m_remainingAllocationSize(0)
373 , m_lastRemainingAllocationSize(0) 373 , m_lastRemainingAllocationSize(0)
374 , m_promptlyFreedSize(0) 374 , m_promptlyFreedSize(0)
375 { 375 {
376 clearFreeLists(); 376 clearFreeLists();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
415 if (m_freeList.takeSnapshot(dumpName)) { 415 if (m_freeList.takeSnapshot(dumpName)) {
416 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); 416 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets");
417 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); 417 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages");
418 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid()); 418 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid());
419 } 419 }
420 } 420 }
421 421
422 void NormalPageHeap::allocatePage() 422 void NormalPageHeap::allocatePage()
423 { 423 {
424 threadState()->shouldFlushHeapDoesNotContainCache(); 424 threadState()->shouldFlushHeapDoesNotContainCache();
425 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); 425 PageMemory* pageMemory = threadState()->freePagePool()->takeFreePage(heapInd ex());
426 426
427 if (!pageMemory) { 427 if (!pageMemory) {
428 // Allocate a memory region for blinkPagesPerRegion pages that 428 // Allocate a memory region for blinkPagesPerRegion pages that
429 // will each have the following layout. 429 // will each have the following layout.
430 // 430 //
431 // [ guard os page | ... payload ... | guard os page ] 431 // [ guard os page | ... payload ... | guard os page ]
432 // ^---{ aligned to blink page size } 432 // ^---{ aligned to blink page size }
433 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); 433 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
434 434
435 // Setup the PageMemory object for each of the pages in the region. 435 // Setup the PageMemory object for each of the pages in the region.
436 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { 436 for (size_t i = 0; i < blinkPagesPerRegion; ++i) {
437 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize()); 437 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize());
438 // Take the first possible page ensuring that this thread actually 438 // Take the first possible page ensuring that this thread actually
439 // gets a page and add the rest to the page pool. 439 // gets a page and add the rest to the page pool.
440 if (!pageMemory) { 440 if (!pageMemory) {
441 bool result = memory->commit(); 441 bool result = memory->commit();
442 // If you hit the ASSERT, it will mean that you're hitting 442 // If you hit the ASSERT, it will mean that you're hitting
443 // the limit of the number of mmapped regions OS can support 443 // the limit of the number of mmapped regions OS can support
444 // (e.g., /proc/sys/vm/max_map_count in Linux). 444 // (e.g., /proc/sys/vm/max_map_count in Linux).
445 RELEASE_ASSERT(result); 445 RELEASE_ASSERT(result);
446 pageMemory = memory; 446 pageMemory = memory;
447 } else { 447 } else {
448 Heap::freePagePool()->addFreePage(heapIndex(), memory); 448 threadState()->freePagePool()->addFreePage(heapIndex(), memory);
449 } 449 }
450 } 450 }
451 } 451 }
452 452
453 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); 453 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
454 page->link(&m_firstPage); 454 page->link(&m_firstPage);
455 455
456 Heap::increaseAllocatedSpace(page->size()); 456 threadState()->increaseAllocatedSpace(page->size());
457 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 457 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
458 // Allow the following addToFreeList() to add the newly allocated memory 458 // Allow the following addToFreeList() to add the newly allocated memory
459 // to the free list. 459 // to the free list.
460 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); 460 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize());
461 Address address = page->payload(); 461 Address address = page->payload();
462 for (size_t i = 0; i < page->payloadSize(); i++) 462 for (size_t i = 0; i < page->payloadSize(); i++)
463 address[i] = reuseAllowedZapValue; 463 address[i] = reuseAllowedZapValue;
464 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize()); 464 ASAN_POISON_MEMORY_REGION(page->payload(), page->payloadSize());
465 #endif 465 #endif
466 addToFreeList(page->payload(), page->payloadSize()); 466 addToFreeList(page->payload(), page->payloadSize());
467 } 467 }
468 468
469 void NormalPageHeap::freePage(NormalPage* page) 469 void NormalPageHeap::freePage(NormalPage* page)
470 { 470 {
471 Heap::decreaseAllocatedSpace(page->size()); 471 ThreadState::current()->decreaseAllocatedSpace(page->size());
472 472
473 if (page->terminating()) { 473 if (page->terminating()) {
474 // The thread is shutting down and this page is being removed as a part 474 // The thread is shutting down and this page is being removed as a part
475 // of the thread local GC. In that case the object could be traced in 475 // of the thread local GC. In that case the object could be traced in
476 // the next global GC if there is a dangling pointer from a live thread 476 // the next global GC if there is a dangling pointer from a live thread
477 // heap to this dead thread heap. To guard against this, we put the 477 // heap to this dead thread heap. To guard against this, we put the
478 // page into the orphaned page pool and zap the page memory. This 478 // page into the orphaned page pool and zap the page memory. This
479 // ensures that tracing the dangling pointer in the next global GC just 479 // ensures that tracing the dangling pointer in the next global GC just
480 // crashes instead of causing use-after-frees. After the next global 480 // crashes instead of causing use-after-frees. After the next global
481 // GC, the orphaned pages are removed. 481 // GC, the orphaned pages are removed.
482 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); 482 ThreadState::current()->orphanedPagePool()->addOrphanedPage(heapIndex(), page);
483 } else { 483 } else {
484 PageMemory* memory = page->storage(); 484 PageMemory* memory = page->storage();
485 page->~NormalPage(); 485 page->~NormalPage();
486 Heap::freePagePool()->addFreePage(heapIndex(), memory); 486 threadState()->freePagePool()->addFreePage(heapIndex(), memory);
487 } 487 }
488 } 488 }
489 489
490 bool NormalPageHeap::coalesce() 490 bool NormalPageHeap::coalesce()
491 { 491 {
492 // Don't coalesce heaps if there are not enough promptly freed entries 492 // Don't coalesce heaps if there are not enough promptly freed entries
493 // to be coalesced. 493 // to be coalesced.
494 // 494 //
495 // FIXME: This threshold is determined just to optimize blink_perf 495 // FIXME: This threshold is determined just to optimize blink_perf
496 // benchmarks. Coalescing is very sensitive to the threashold and 496 // benchmarks. Coalescing is very sensitive to the threashold and
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 if (startOfGap != headerAddress) 542 if (startOfGap != headerAddress)
543 addToFreeList(startOfGap, headerAddress - startOfGap); 543 addToFreeList(startOfGap, headerAddress - startOfGap);
544 544
545 headerAddress += size; 545 headerAddress += size;
546 startOfGap = headerAddress; 546 startOfGap = headerAddress;
547 } 547 }
548 548
549 if (startOfGap != page->payloadEnd()) 549 if (startOfGap != page->payloadEnd())
550 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); 550 addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
551 } 551 }
552 Heap::decreaseAllocatedObjectSize(freedSize); 552 threadState()->decreaseAllocatedObjectSize(freedSize);
553 ASSERT(m_promptlyFreedSize == freedSize); 553 ASSERT(m_promptlyFreedSize == freedSize);
554 m_promptlyFreedSize = 0; 554 m_promptlyFreedSize = 0;
555 return true; 555 return true;
556 } 556 }
557 557
558 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) 558 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header)
559 { 559 {
560 ASSERT(!threadState()->sweepForbidden()); 560 ASSERT(!threadState()->sweepForbidden());
561 ASSERT(header->checkHeader()); 561 ASSERT(header->checkHeader());
562 Address address = reinterpret_cast<Address>(header); 562 Address address = reinterpret_cast<Address>(header);
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
660 } 660 }
661 661
662 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e) 662 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e)
663 { 663 {
664 m_remainingAllocationSize = newRemainingAllocationSize; 664 m_remainingAllocationSize = newRemainingAllocationSize;
665 665
666 // Sync recorded allocated-object size: 666 // Sync recorded allocated-object size:
667 // - if previous alloc checkpoint is larger, allocation size has increased. 667 // - if previous alloc checkpoint is larger, allocation size has increased.
668 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize(). 668 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize().
669 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) 669 if (m_lastRemainingAllocationSize > m_remainingAllocationSize)
670 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_rema iningAllocationSize); 670 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_remainingAllocationSize);
671 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) 671 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize)
672 Heap::decreaseAllocatedObjectSize(m_remainingAllocationSize - m_lastRema iningAllocationSize); 672 threadState()->decreaseAllocatedObjectSize(m_remainingAllocationSize - m _lastRemainingAllocationSize);
673 m_lastRemainingAllocationSize = m_remainingAllocationSize; 673 m_lastRemainingAllocationSize = m_remainingAllocationSize;
674 } 674 }
675 675
676 void NormalPageHeap::updateRemainingAllocationSize() 676 void NormalPageHeap::updateRemainingAllocationSize()
677 { 677 {
678 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { 678 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
679 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); 679 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize());
680 m_lastRemainingAllocationSize = remainingAllocationSize(); 680 m_lastRemainingAllocationSize = remainingAllocationSize();
681 } 681 }
682 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); 682 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
683 } 683 }
684 684
685 void NormalPageHeap::setAllocationPoint(Address point, size_t size) 685 void NormalPageHeap::setAllocationPoint(Address point, size_t size)
686 { 686 {
687 #if ENABLE(ASSERT) 687 #if ENABLE(ASSERT)
688 if (point) { 688 if (point) {
689 ASSERT(size); 689 ASSERT(size);
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
835 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 835 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
836 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); 836 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize);
837 ASSERT(header->checkHeader()); 837 ASSERT(header->checkHeader());
838 838
839 // Poison the object header and allocationGranularity bytes after the object 839 // Poison the object header and allocationGranularity bytes after the object
840 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 840 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
841 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 841 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
842 842
843 largeObject->link(&m_firstPage); 843 largeObject->link(&m_firstPage);
844 844
845 Heap::increaseAllocatedSpace(largeObject->size()); 845 threadState()->increaseAllocatedSpace(largeObject->size());
846 Heap::increaseAllocatedObjectSize(largeObject->size()); 846 threadState()->increaseAllocatedObjectSize(largeObject->size());
847 return result; 847 return result;
848 } 848 }
849 849
850 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) 850 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object)
851 { 851 {
852 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); 852 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize());
853 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); 853 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( ));
854 Heap::decreaseAllocatedSpace(object->size()); 854 ThreadState::current()->decreaseAllocatedSpace(object->size());
855 855
856 // Unpoison the object header and allocationGranularity bytes after the 856 // Unpoison the object header and allocationGranularity bytes after the
857 // object before freeing. 857 // object before freeing.
858 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der)); 858 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der));
859 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 859 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
860 860
861 if (object->terminating()) { 861 if (object->terminating()) {
862 ASSERT(ThreadState::current()->isTerminating()); 862 ASSERT(ThreadState::current()->isTerminating());
863 // The thread is shutting down and this page is being removed as a part 863 // The thread is shutting down and this page is being removed as a part
864 // of the thread local GC. In that case the object could be traced in 864 // of the thread local GC. In that case the object could be traced in
865 // the next global GC if there is a dangling pointer from a live thread 865 // the next global GC if there is a dangling pointer from a live thread
866 // heap to this dead thread heap. To guard against this, we put the 866 // heap to this dead thread heap. To guard against this, we put the
867 // page into the orphaned page pool and zap the page memory. This 867 // page into the orphaned page pool and zap the page memory. This
868 // ensures that tracing the dangling pointer in the next global GC just 868 // ensures that tracing the dangling pointer in the next global GC just
869 // crashes instead of causing use-after-frees. After the next global 869 // crashes instead of causing use-after-frees. After the next global
870 // GC, the orphaned pages are removed. 870 // GC, the orphaned pages are removed.
871 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); 871 ThreadState::current()->orphanedPagePool()->addOrphanedPage(heapIndex(), object);
872 } else { 872 } else {
873 ASSERT(!ThreadState::current()->isTerminating()); 873 ASSERT(!ThreadState::current()->isTerminating());
874 PageMemory* memory = object->storage(); 874 PageMemory* memory = object->storage();
875 object->~LargeObjectPage(); 875 object->~LargeObjectPage();
876 delete memory; 876 delete memory;
877 } 877 }
878 } 878 }
879 879
880 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x) 880 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x)
881 { 881 {
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
1156 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); 1156 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap);
1157 header->unmark(); 1157 header->unmark();
1158 headerAddress += header->size(); 1158 headerAddress += header->size();
1159 markedObjectSize += header->size(); 1159 markedObjectSize += header->size();
1160 startOfGap = headerAddress; 1160 startOfGap = headerAddress;
1161 } 1161 }
1162 if (startOfGap != payloadEnd()) 1162 if (startOfGap != payloadEnd())
1163 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); 1163 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap );
1164 1164
1165 if (markedObjectSize) 1165 if (markedObjectSize)
1166 Heap::increaseMarkedObjectSize(markedObjectSize); 1166 ThreadState::current()->increaseMarkedObjectSize(markedObjectSize);
1167 } 1167 }
1168 1168
1169 void NormalPage::makeConsistentForGC() 1169 void NormalPage::makeConsistentForGC()
1170 { 1170 {
1171 size_t markedObjectSize = 0; 1171 size_t markedObjectSize = 0;
1172 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1172 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1173 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1173 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1174 ASSERT(header->size() < blinkPagePayloadSize()); 1174 ASSERT(header->size() < blinkPagePayloadSize());
1175 // Check if a free list entry first since we cannot call 1175 // Check if a free list entry first since we cannot call
1176 // isMarked on a free list entry. 1176 // isMarked on a free list entry.
1177 if (header->isFree()) { 1177 if (header->isFree()) {
1178 headerAddress += header->size(); 1178 headerAddress += header->size();
1179 continue; 1179 continue;
1180 } 1180 }
1181 ASSERT(header->checkHeader()); 1181 ASSERT(header->checkHeader());
1182 if (header->isMarked()) { 1182 if (header->isMarked()) {
1183 header->unmark(); 1183 header->unmark();
1184 markedObjectSize += header->size(); 1184 markedObjectSize += header->size();
1185 } else { 1185 } else {
1186 header->markDead(); 1186 header->markDead();
1187 } 1187 }
1188 headerAddress += header->size(); 1188 headerAddress += header->size();
1189 } 1189 }
1190 if (markedObjectSize) 1190 if (markedObjectSize)
1191 Heap::increaseMarkedObjectSize(markedObjectSize); 1191 ThreadState::current()->increaseMarkedObjectSize(markedObjectSize);
1192 } 1192 }
1193 1193
1194 void NormalPage::makeConsistentForMutator() 1194 void NormalPage::makeConsistentForMutator()
1195 { 1195 {
1196 Address startOfGap = payload(); 1196 Address startOfGap = payload();
1197 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1197 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1198 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1198 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1199 size_t size = header->size(); 1199 size_t size = header->size();
1200 ASSERT(size < blinkPagePayloadSize()); 1200 ASSERT(size < blinkPagePayloadSize());
1201 if (header->isPromptlyFreed()) 1201 if (header->isPromptlyFreed())
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
1453 } 1453 }
1454 1454
1455 void LargeObjectPage::removeFromHeap() 1455 void LargeObjectPage::removeFromHeap()
1456 { 1456 {
1457 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); 1457 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this);
1458 } 1458 }
1459 1459
1460 void LargeObjectPage::sweep() 1460 void LargeObjectPage::sweep()
1461 { 1461 {
1462 heapObjectHeader()->unmark(); 1462 heapObjectHeader()->unmark();
1463 Heap::increaseMarkedObjectSize(size()); 1463 ThreadState::current()->increaseMarkedObjectSize(size());
haraken 2015/11/30 02:54:42 ThreadState::current() needs to look up thread-loc
1464 } 1464 }
1465 1465
1466 void LargeObjectPage::makeConsistentForGC() 1466 void LargeObjectPage::makeConsistentForGC()
1467 { 1467 {
1468 HeapObjectHeader* header = heapObjectHeader(); 1468 HeapObjectHeader* header = heapObjectHeader();
1469 if (header->isMarked()) { 1469 if (header->isMarked()) {
1470 header->unmark(); 1470 header->unmark();
1471 Heap::increaseMarkedObjectSize(size()); 1471 ThreadState::current()->increaseMarkedObjectSize(size());
1472 } else { 1472 } else {
1473 header->markDead(); 1473 header->markDead();
1474 } 1474 }
1475 } 1475 }
1476 1476
1477 void LargeObjectPage::makeConsistentForMutator() 1477 void LargeObjectPage::makeConsistentForMutator()
1478 { 1478 {
1479 HeapObjectHeader* header = heapObjectHeader(); 1479 HeapObjectHeader* header = heapObjectHeader();
1480 if (header->isMarked()) 1480 if (header->isMarked())
1481 header->unmark(); 1481 header->unmark();
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 1584
1585 m_hasEntries = true; 1585 m_hasEntries = true;
1586 size_t index = hash(address); 1586 size_t index = hash(address);
1587 ASSERT(!(index & 1)); 1587 ASSERT(!(index & 1));
1588 Address cachePage = roundToBlinkPageStart(address); 1588 Address cachePage = roundToBlinkPageStart(address);
1589 m_entries[index + 1] = m_entries[index]; 1589 m_entries[index + 1] = m_entries[index];
1590 m_entries[index] = cachePage; 1590 m_entries[index] = cachePage;
1591 } 1591 }
1592 1592
1593 } // namespace blink 1593 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698