Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/spaces.cc

Issue 2311963002: [heap,snapshot] Replace first page size from snapshots with page trimming (Closed)
Patch Set: Remove debugging print Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/isolate.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after
609 609
610 void MemoryChunk::Unlink() { 610 void MemoryChunk::Unlink() {
611 MemoryChunk* next_element = next_chunk(); 611 MemoryChunk* next_element = next_chunk();
612 MemoryChunk* prev_element = prev_chunk(); 612 MemoryChunk* prev_element = prev_chunk();
613 next_element->set_prev_chunk(prev_element); 613 next_element->set_prev_chunk(prev_element);
614 prev_element->set_next_chunk(next_element); 614 prev_element->set_next_chunk(next_element);
615 set_prev_chunk(NULL); 615 set_prev_chunk(NULL);
616 set_next_chunk(NULL); 616 set_next_chunk(NULL);
617 } 617 }
618 618
619 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
620 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
621 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
622 Address free_start = chunk->area_end_ - bytes_to_shrink;
623 // Don't adjust the size of the page. The area is just uncomitted but not
624 // released.
625 chunk->area_end_ -= bytes_to_shrink;
626 UncommitBlock(free_start, bytes_to_shrink);
627 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
628 if (chunk->reservation_.IsReserved())
629 chunk->reservation_.Guard(chunk->area_end_);
630 else
631 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
632 }
633 }
619 634
620 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 635 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
621 intptr_t commit_area_size, 636 intptr_t commit_area_size,
622 Executability executable, 637 Executability executable,
623 Space* owner) { 638 Space* owner) {
624 DCHECK(commit_area_size <= reserve_area_size); 639 DCHECK(commit_area_size <= reserve_area_size);
625 640
626 size_t chunk_size; 641 size_t chunk_size;
627 Heap* heap = isolate_->heap(); 642 Heap* heap = isolate_->heap();
628 Address base = NULL; 643 Address base = NULL;
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, 771 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
757 executable, owner, &reservation); 772 executable, owner, &reservation);
758 } 773 }
759 774
760 775
761 void Page::ResetFreeListStatistics() { 776 void Page::ResetFreeListStatistics() {
762 wasted_memory_ = 0; 777 wasted_memory_ = 0;
763 available_in_free_list_ = 0; 778 available_in_free_list_ = 0;
764 } 779 }
765 780
781 size_t Page::ShrinkToHighWaterMark() {
782 // Shrink pages to high water mark. The water mark points either to a filler
783 // or the area_end.
784 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
785 if (filler->address() == area_end()) return 0;
786 CHECK(filler->IsFiller());
787 if (!filler->IsFreeSpace()) return 0;
788
789 #ifdef DEBUG
790 // Check the the filler is indeed the last filler on the page.
791 HeapObjectIterator it(this);
792 HeapObject* filler2 = nullptr;
793 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
794 filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
795 }
796 if (filler2 == nullptr || filler2->address() == area_end()) return 0;
797 DCHECK(filler2->IsFiller());
798 DCHECK_EQ(filler->address(), filler2->address());
799 #endif // DEBUG
800
801 size_t unused = RoundDown(
802 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
803 base::OS::CommitPageSize());
804 if (unused > 0) {
805 if (FLAG_trace_gc_verbose) {
806 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
807 reinterpret_cast<void*>(this),
808 reinterpret_cast<void*>(area_end()),
809 reinterpret_cast<void*>(area_end() - unused));
810 }
811 heap()->CreateFillerObjectAt(
812 filler->address(),
813 static_cast<int>(area_end() - filler->address() - unused),
814 ClearRecordedSlots::kNo);
815 heap()->memory_allocator()->ShrinkChunk(this, unused);
816 CHECK(filler->IsFiller());
817 CHECK_EQ(filler->address() + filler->Size(), area_end());
818 }
819 return unused;
820 }
821
766 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, 822 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
767 Address start_free) { 823 Address start_free) {
768 // We do not allow partial shrink for code. 824 // We do not allow partial shrink for code.
769 DCHECK(chunk->executable() == NOT_EXECUTABLE); 825 DCHECK(chunk->executable() == NOT_EXECUTABLE);
770 826
771 intptr_t size; 827 intptr_t size;
772 base::VirtualMemory* reservation = chunk->reserved_memory(); 828 base::VirtualMemory* reservation = chunk->reserved_memory();
773 DCHECK(reservation->IsReserved()); 829 DCHECK(reservation->IsReserved());
774 size = static_cast<intptr_t>(reservation->size()); 830 size = static_cast<intptr_t>(reservation->size());
775 831
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 1283 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1228 Address cur = obj->address(); 1284 Address cur = obj->address();
1229 Address next = cur + obj->Size(); 1285 Address next = cur + obj->Size();
1230 if ((cur <= addr) && (addr < next)) return obj; 1286 if ((cur <= addr) && (addr < next)) return obj;
1231 } 1287 }
1232 1288
1233 UNREACHABLE(); 1289 UNREACHABLE();
1234 return Smi::FromInt(0); 1290 return Smi::FromInt(0);
1235 } 1291 }
1236 1292
1293 void PagedSpace::ShrinkImmortalImmovablePages() {
1294 DCHECK(!heap()->deserialization_complete());
1295 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1296 EmptyAllocationInfo();
1297 ResetFreeList();
1298
1299 for (Page* page : *this) {
1300 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1301 size_t unused = page->ShrinkToHighWaterMark();
1302 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1303 AccountUncommitted(unused);
1304 }
1305 }
1306
1237 bool PagedSpace::Expand() { 1307 bool PagedSpace::Expand() {
1238 int size = AreaSize(); 1308 const int size = AreaSize();
1239 if (snapshotable() && !HasPages()) {
1240 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1241 }
1242 1309
1243 if (!heap()->CanExpandOldGeneration(size)) return false; 1310 if (!heap()->CanExpandOldGeneration(size)) return false;
1244 1311
1245 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); 1312 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1246 if (p == nullptr) return false; 1313 if (p == nullptr) return false;
1247 1314
1248 AccountCommitted(static_cast<intptr_t>(p->size())); 1315 AccountCommitted(static_cast<intptr_t>(p->size()));
1249 1316
1250 // Pages created during bootstrapping may contain immortal immovable objects. 1317 // Pages created during bootstrapping may contain immortal immovable objects.
1251 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1318 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1328 SetTopAndLimit(NULL, NULL); 1395 SetTopAndLimit(NULL, NULL);
1329 Free(current_top, static_cast<int>(current_limit - current_top)); 1396 Free(current_top, static_cast<int>(current_limit - current_top));
1330 } 1397 }
1331 1398
1332 void PagedSpace::IncreaseCapacity(size_t bytes) { 1399 void PagedSpace::IncreaseCapacity(size_t bytes) {
1333 accounting_stats_.ExpandSpace(bytes); 1400 accounting_stats_.ExpandSpace(bytes);
1334 } 1401 }
1335 1402
1336 void PagedSpace::ReleasePage(Page* page) { 1403 void PagedSpace::ReleasePage(Page* page) {
1337 DCHECK_EQ(page->LiveBytes(), 0); 1404 DCHECK_EQ(page->LiveBytes(), 0);
1338 DCHECK_EQ(AreaSize(), page->area_size());
1339 DCHECK_EQ(page->owner(), this); 1405 DCHECK_EQ(page->owner(), this);
1340 1406
1341 free_list_.EvictFreeListItems(page); 1407 free_list_.EvictFreeListItems(page);
1342 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1408 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1343 1409
1344 page->ReleaseBlackAreaEndMarkerMap(); 1410 page->ReleaseBlackAreaEndMarkerMap();
1345 1411
1346 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { 1412 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1347 allocation_info_.Reset(nullptr, nullptr); 1413 allocation_info_.Reset(nullptr, nullptr);
1348 } 1414 }
1349 1415
1350 // If page is still in a list, unlink it from that list. 1416 // If page is still in a list, unlink it from that list.
1351 if (page->next_chunk() != NULL) { 1417 if (page->next_chunk() != NULL) {
1352 DCHECK(page->prev_chunk() != NULL); 1418 DCHECK(page->prev_chunk() != NULL);
1353 page->Unlink(); 1419 page->Unlink();
1354 } 1420 }
1355 1421
1356 AccountUncommitted(static_cast<intptr_t>(page->size())); 1422 AccountUncommitted(static_cast<intptr_t>(page->size()));
1423 accounting_stats_.ShrinkSpace(page->area_size());
1357 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); 1424 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1358
1359 DCHECK(Capacity() > 0);
1360 accounting_stats_.ShrinkSpace(AreaSize());
1361 } 1425 }
1362 1426
1363 #ifdef DEBUG 1427 #ifdef DEBUG
1364 void PagedSpace::Print() {} 1428 void PagedSpace::Print() {}
1365 #endif 1429 #endif
1366 1430
1367 #ifdef VERIFY_HEAP 1431 #ifdef VERIFY_HEAP
1368 void PagedSpace::Verify(ObjectVisitor* visitor) { 1432 void PagedSpace::Verify(ObjectVisitor* visitor) {
1369 bool allocation_pointer_found_in_space = 1433 bool allocation_pointer_found_in_space =
1370 (allocation_info_.top() == allocation_info_.limit()); 1434 (allocation_info_.top() == allocation_info_.limit());
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after
3165 object->ShortPrint(); 3229 object->ShortPrint();
3166 PrintF("\n"); 3230 PrintF("\n");
3167 } 3231 }
3168 printf(" --------------------------------------\n"); 3232 printf(" --------------------------------------\n");
3169 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3233 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3170 } 3234 }
3171 3235
3172 #endif // DEBUG 3236 #endif // DEBUG
3173 } // namespace internal 3237 } // namespace internal
3174 } // namespace v8 3238 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/isolate.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698