Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(276)

Side by Side Diff: src/heap/spaces.cc

Issue 2504993002: [heap] Minor MC: Add evacuation phase (Closed)
Patch Set: Rebase Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« src/heap/mark-compact.cc ('K') | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 1208 matching lines...) Expand 10 before | Expand all | Expand 10 after
1219 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) { 1219 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
1220 // Only during compaction pages can actually change ownership. This is 1220 // Only during compaction pages can actually change ownership. This is
1221 // safe because there exists no other competing action on the page links 1221 // safe because there exists no other competing action on the page links
1222 // during compaction. 1222 // during compaction.
1223 if (is_local() && (p->owner() != this)) { 1223 if (is_local() && (p->owner() != this)) {
1224 base::LockGuard<base::Mutex> guard( 1224 base::LockGuard<base::Mutex> guard(
1225 reinterpret_cast<PagedSpace*>(p->owner())->mutex()); 1225 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1226 p->Unlink(); 1226 p->Unlink();
1227 p->set_owner(this); 1227 p->set_owner(this);
1228 p->InsertAfter(anchor_.prev_page()); 1228 p->InsertAfter(anchor_.prev_page());
1229 } else {
1230 CHECK_EQ(this, p->owner());
1231 // Regular refill on main thread.
1232 if (p->available_in_free_list() < kPageReuseThreshold) {
1233 // Relink categories with only little memory left previous to anchor.
1234 p->Unlink();
1235 p->InsertAfter(anchor()->prev_page());
1236 }
1229 } 1237 }
1230 added += RelinkFreeListCategories(p); 1238 added += RelinkFreeListCategories(p);
1231 added += p->wasted_memory(); 1239 added += p->wasted_memory();
1232 if (is_local() && (added > kCompactionMemoryWanted)) break; 1240 if (is_local() && (added > kCompactionMemoryWanted)) break;
1233 } 1241 }
1234 } 1242 }
1235 accounting_stats_.IncreaseCapacity(added); 1243 accounting_stats_.IncreaseCapacity(added);
1236 } 1244 }
1237 1245
1238 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { 1246 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1299 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 1307 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1300 Address cur = obj->address(); 1308 Address cur = obj->address();
1301 Address next = cur + obj->Size(); 1309 Address next = cur + obj->Size();
1302 if ((cur <= addr) && (addr < next)) return obj; 1310 if ((cur <= addr) && (addr < next)) return obj;
1303 } 1311 }
1304 1312
1305 UNREACHABLE(); 1313 UNREACHABLE();
1306 return Smi::kZero; 1314 return Smi::kZero;
1307 } 1315 }
1308 1316
1317 Page* PagedSpace::RemovePageSafe() {
1318 base::LockGuard<base::Mutex> guard(mutex());
1319
1320 if (anchor()->next_page() == anchor() ||
1321 anchor()->next_page()->available_in_free_list() < kPageReuseThreshold)
1322 return nullptr;
1323
1324 Page* page = anchor()->next_page();
1325 AccountUncommitted(page->size());
1326 accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList());
1327 accounting_stats_.DecreaseCapacity(page->size());
1328 page->Unlink();
1329 UnlinkFreeListCategories(page);
1330 return page;
1331 }
1332
1333 void PagedSpace::AddPage(Page* page) {
1334 AccountCommitted(page->size());
1335 accounting_stats_.IncreaseCapacity(page->size());
1336 accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList());
1337 page->set_owner(this);
1338 RelinkFreeListCategories(page);
1339 page->InsertAfter(anchor()->prev_page());
1340 }
1341
1309 void PagedSpace::ShrinkImmortalImmovablePages() { 1342 void PagedSpace::ShrinkImmortalImmovablePages() {
1310 DCHECK(!heap()->deserialization_complete()); 1343 DCHECK(!heap()->deserialization_complete());
1311 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 1344 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1312 EmptyAllocationInfo(); 1345 EmptyAllocationInfo();
1313 ResetFreeList(); 1346 ResetFreeList();
1314 1347
1315 for (Page* page : *this) { 1348 for (Page* page : *this) {
1316 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); 1349 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1317 size_t unused = page->ShrinkToHighWaterMark(); 1350 size_t unused = page->ShrinkToHighWaterMark();
1318 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); 1351 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
1439 1472
1440 #ifdef DEBUG 1473 #ifdef DEBUG
1441 void PagedSpace::Print() {} 1474 void PagedSpace::Print() {}
1442 #endif 1475 #endif
1443 1476
1444 #ifdef VERIFY_HEAP 1477 #ifdef VERIFY_HEAP
1445 void PagedSpace::Verify(ObjectVisitor* visitor) { 1478 void PagedSpace::Verify(ObjectVisitor* visitor) {
1446 bool allocation_pointer_found_in_space = 1479 bool allocation_pointer_found_in_space =
1447 (allocation_info_.top() == allocation_info_.limit()); 1480 (allocation_info_.top() == allocation_info_.limit());
1448 for (Page* page : *this) { 1481 for (Page* page : *this) {
1482 if (page->IsFlagSet(Page::CANNOT_BE_VERIFIED)) continue;
1449 CHECK(page->owner() == this); 1483 CHECK(page->owner() == this);
1450 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) { 1484 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1451 allocation_pointer_found_in_space = true; 1485 allocation_pointer_found_in_space = true;
1452 } 1486 }
1453 CHECK(page->SweepingDone()); 1487 CHECK(page->SweepingDone());
1454 HeapObjectIterator it(page); 1488 HeapObjectIterator it(page);
1455 Address end_of_previous_object = page->area_start(); 1489 Address end_of_previous_object = page->area_start();
1456 Address top = page->area_end(); 1490 Address top = page->area_end();
1457 int black_size = 0; 1491 int black_size = 0;
1458 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 1492 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
(...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after
1840 // allocation pointer. 1874 // allocation pointer.
1841 Address current = to_space_.first_page()->area_start(); 1875 Address current = to_space_.first_page()->area_start();
1842 CHECK_EQ(current, to_space_.space_start()); 1876 CHECK_EQ(current, to_space_.space_start());
1843 1877
1844 while (current != top()) { 1878 while (current != top()) {
1845 if (!Page::IsAlignedToPageSize(current)) { 1879 if (!Page::IsAlignedToPageSize(current)) {
1846 // The allocation pointer should not be in the middle of an object. 1880 // The allocation pointer should not be in the middle of an object.
1847 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) || 1881 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
1848 current < top()); 1882 current < top());
1849 1883
1884 if (Page::FromAddress(current)->IsFlagSet(Page::CANNOT_BE_VERIFIED)) {
1885 Page* page = Page::FromAddress(current);
1886 if (page->next_page()->is_anchor()) {
1887 current = top();
1888 } else {
1889 current = Page::FromAddress(current)->area_end();
1890 }
1891 continue;
1892 }
1893
1850 HeapObject* object = HeapObject::FromAddress(current); 1894 HeapObject* object = HeapObject::FromAddress(current);
1851 1895
1852 // The first word should be a map, and we expect all map pointers to 1896 // The first word should be a map, and we expect all map pointers to
1853 // be in map space. 1897 // be in map space.
1854 Map* map = object->map(); 1898 Map* map = object->map();
1855 CHECK(map->IsMap()); 1899 CHECK(map->IsMap());
1856 CHECK(heap()->map_space()->Contains(map)); 1900 CHECK(heap()->map_space()->Contains(map));
1857 1901
1858 // The object should not be code or a map. 1902 // The object should not be code or a map.
1859 CHECK(!object->IsMap()); 1903 CHECK(!object->IsMap());
(...skipping 968 matching lines...) Expand 10 before | Expand all | Expand 10 after
2828 return free_list_.Allocate(size_in_bytes); 2872 return free_list_.Allocate(size_in_bytes);
2829 } 2873 }
2830 return nullptr; 2874 return nullptr;
2831 } 2875 }
2832 2876
2833 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2877 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2834 DCHECK_GE(size_in_bytes, 0); 2878 DCHECK_GE(size_in_bytes, 0);
2835 const int kMaxPagesToSweep = 1; 2879 const int kMaxPagesToSweep = 1;
2836 2880
2837 // Allocation in this space has failed. 2881 // Allocation in this space has failed.
2838
2839 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2882 MarkCompactCollector* collector = heap()->mark_compact_collector();
2840 // Sweeping is still in progress. 2883 // Sweeping is still in progress.
2841 if (collector->sweeping_in_progress()) { 2884 if (collector->sweeping_in_progress()) {
2842 // First try to refill the free-list, concurrent sweeper threads 2885 // First try to refill the free-list, concurrent sweeper threads
2843 // may have freed some objects in the meantime. 2886 // may have freed some objects in the meantime.
2844 RefillFreeList(); 2887 RefillFreeList();
2845 2888
2846 // Retry the free list allocation. 2889 // Retry the free list allocation.
2847 HeapObject* object = 2890 HeapObject* object =
2848 free_list_.Allocate(static_cast<size_t>(size_in_bytes)); 2891 free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2849 if (object != NULL) return object; 2892 if (object != NULL) return object;
2850 2893
2851 // If sweeping is still in progress try to sweep pages on the main thread. 2894 // If sweeping is still in progress try to sweep pages on the main thread.
2852 int max_freed = collector->sweeper().ParallelSweepSpace( 2895 int max_freed = collector->sweeper().ParallelSweepSpace(
2853 identity(), size_in_bytes, kMaxPagesToSweep); 2896 identity(), size_in_bytes, kMaxPagesToSweep);
2854 RefillFreeList(); 2897 RefillFreeList();
2855 if (max_freed >= size_in_bytes) { 2898 if (max_freed >= size_in_bytes) {
2856 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); 2899 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2857 if (object != nullptr) return object; 2900 if (object != nullptr) return object;
2858 } 2901 }
2902 } else if (is_local()) {
2903 // Sweeping not in progress and we are on a {CompactionSpace}.
2904 Page* page = heap()->old_space()->RemovePageSafe();
2905 if (page != nullptr) {
2906 PrintF("Reusing page: available free list memory: %" PRIuS "\n",
2907 page->available_in_free_list());
2908 AddPage(page);
2909 HeapObject* object =
2910 free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2911 if (object != nullptr) return object;
2912 }
2859 } 2913 }
2860 2914
2861 if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) { 2915 if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
2862 DCHECK((CountTotalPages() > 1) || 2916 DCHECK((CountTotalPages() > 1) ||
2863 (static_cast<size_t>(size_in_bytes) <= free_list_.Available())); 2917 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
2864 return free_list_.Allocate(static_cast<size_t>(size_in_bytes)); 2918 return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2865 } 2919 }
2866 2920
2867 // If sweeper threads are active, wait for them at that point and steal 2921 // If sweeper threads are active, wait for them at that point and steal
2868 // elements form their free-lists. Allocation may still fail their which 2922 // elements form their free-lists. Allocation may still fail their which
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after
3247 object->ShortPrint(); 3301 object->ShortPrint();
3248 PrintF("\n"); 3302 PrintF("\n");
3249 } 3303 }
3250 printf(" --------------------------------------\n"); 3304 printf(" --------------------------------------\n");
3251 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3305 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3252 } 3306 }
3253 3307
3254 #endif // DEBUG 3308 #endif // DEBUG
3255 } // namespace internal 3309 } // namespace internal
3256 } // namespace v8 3310 } // namespace v8
OLDNEW
« src/heap/mark-compact.cc ('K') | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698