Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(16)

Side by Side Diff: src/heap/spaces.cc

Issue 2278653003: Reland of "[heap] Switch to 500k pages" (Closed)
Patch Set: Rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/isolate.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after
610 610
611 void MemoryChunk::Unlink() { 611 void MemoryChunk::Unlink() {
612 MemoryChunk* next_element = next_chunk(); 612 MemoryChunk* next_element = next_chunk();
613 MemoryChunk* prev_element = prev_chunk(); 613 MemoryChunk* prev_element = prev_chunk();
614 next_element->set_prev_chunk(prev_element); 614 next_element->set_prev_chunk(prev_element);
615 prev_element->set_next_chunk(next_element); 615 prev_element->set_next_chunk(next_element);
616 set_prev_chunk(NULL); 616 set_prev_chunk(NULL);
617 set_next_chunk(NULL); 617 set_next_chunk(NULL);
618 } 618 }
619 619
620 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
621 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
622 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
623 Address free_start = chunk->area_end_ - bytes_to_shrink;
624 // Don't adjust the size of the page. The area is just uncomitted but not
625 // released.
626 chunk->area_end_ -= bytes_to_shrink;
627 UncommitBlock(free_start, bytes_to_shrink);
628 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
629 if (chunk->reservation_.IsReserved())
630 chunk->reservation_.Guard(chunk->area_end_);
631 else
632 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
633 }
634 }
620 635
621 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 636 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
622 intptr_t commit_area_size, 637 intptr_t commit_area_size,
623 Executability executable, 638 Executability executable,
624 Space* owner) { 639 Space* owner) {
625 DCHECK(commit_area_size <= reserve_area_size); 640 DCHECK(commit_area_size <= reserve_area_size);
626 641
627 size_t chunk_size; 642 size_t chunk_size;
628 Heap* heap = isolate_->heap(); 643 Heap* heap = isolate_->heap();
629 Address base = NULL; 644 Address base = NULL;
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, 772 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
758 executable, owner, &reservation); 773 executable, owner, &reservation);
759 } 774 }
760 775
761 776
762 void Page::ResetFreeListStatistics() { 777 void Page::ResetFreeListStatistics() {
763 wasted_memory_ = 0; 778 wasted_memory_ = 0;
764 available_in_free_list_ = 0; 779 available_in_free_list_ = 0;
765 } 780 }
766 781
782 size_t Page::ShrinkToHighWaterMark() {
783 // Shrink pages to high water mark. The water mark points either to a filler
784 // or the area_end.
785 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
786 if (filler->address() == area_end()) return 0;
787 CHECK(filler->IsFiller());
788 if (!filler->IsFreeSpace()) return 0;
789
790 #ifdef DEBUG
791 // Check the the filler is indeed the last filler on the page.
792 HeapObjectIterator it(this);
793 HeapObject* filler2 = nullptr;
794 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
795 filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
796 }
797 if (filler2 == nullptr || filler2->address() == area_end()) return 0;
798 DCHECK(filler2->IsFiller());
799 DCHECK_EQ(filler->address(), filler2->address());
800 #endif // DEBUG
801
802 size_t unused = RoundDown(
803 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
804 base::OS::CommitPageSize());
805 if (unused > 0) {
806 if (FLAG_trace_gc_verbose) {
807 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
808 reinterpret_cast<void*>(this),
809 reinterpret_cast<void*>(area_end()),
810 reinterpret_cast<void*>(area_end() - unused));
811 }
812 heap()->CreateFillerObjectAt(
813 filler->address(),
814 static_cast<int>(area_end() - filler->address() - unused),
815 ClearRecordedSlots::kNo);
816 heap()->memory_allocator()->ShrinkChunk(this, unused);
817 CHECK(filler->IsFiller());
818 CHECK_EQ(filler->address() + filler->Size(), area_end());
819 }
820 return unused;
821 }
822
767 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, 823 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
768 Address start_free) { 824 Address start_free) {
769 // We do not allow partial shrink for code. 825 // We do not allow partial shrink for code.
770 DCHECK(chunk->executable() == NOT_EXECUTABLE); 826 DCHECK(chunk->executable() == NOT_EXECUTABLE);
771 827
772 intptr_t size; 828 intptr_t size;
773 base::VirtualMemory* reservation = chunk->reserved_memory(); 829 base::VirtualMemory* reservation = chunk->reserved_memory();
774 DCHECK(reservation->IsReserved()); 830 DCHECK(reservation->IsReserved());
775 size = static_cast<intptr_t>(reservation->size()); 831 size = static_cast<intptr_t>(reservation->size());
776 832
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after
1228 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 1284 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1229 Address cur = obj->address(); 1285 Address cur = obj->address();
1230 Address next = cur + obj->Size(); 1286 Address next = cur + obj->Size();
1231 if ((cur <= addr) && (addr < next)) return obj; 1287 if ((cur <= addr) && (addr < next)) return obj;
1232 } 1288 }
1233 1289
1234 UNREACHABLE(); 1290 UNREACHABLE();
1235 return Smi::FromInt(0); 1291 return Smi::FromInt(0);
1236 } 1292 }
1237 1293
1294 void PagedSpace::ShrinkImmortalImmovablePages() {
1295 DCHECK(!heap()->deserialization_complete());
1296 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1297 EmptyAllocationInfo();
1298 ResetFreeList();
1299
1300 for (Page* page : *this) {
1301 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1302 size_t unused = page->ShrinkToHighWaterMark();
1303 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1304 AccountUncommitted(unused);
1305 }
1306 }
1307
1238 bool PagedSpace::Expand() { 1308 bool PagedSpace::Expand() {
1239 int size = AreaSize(); 1309 const int size = AreaSize();
1240 if (snapshotable() && !HasPages()) {
1241 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1242 }
1243
1244 if (!heap()->CanExpandOldGeneration(size)) return false; 1310 if (!heap()->CanExpandOldGeneration(size)) return false;
1245
1246 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); 1311 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1247 if (p == nullptr) return false; 1312 if (p == nullptr) return false;
1248
1249 AccountCommitted(static_cast<intptr_t>(p->size())); 1313 AccountCommitted(static_cast<intptr_t>(p->size()));
1250 1314
1251 // Pages created during bootstrapping may contain immortal immovable objects. 1315 // Pages created during bootstrapping may contain immortal immovable objects.
1252 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1316 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1253 1317
1254 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1318 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1255 1319
1256 p->InsertAfter(anchor_.prev_page()); 1320 p->InsertAfter(anchor_.prev_page());
1257 1321
1258 return true; 1322 return true;
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1329 SetTopAndLimit(NULL, NULL); 1393 SetTopAndLimit(NULL, NULL);
1330 Free(current_top, static_cast<int>(current_limit - current_top)); 1394 Free(current_top, static_cast<int>(current_limit - current_top));
1331 } 1395 }
1332 1396
1333 void PagedSpace::IncreaseCapacity(size_t bytes) { 1397 void PagedSpace::IncreaseCapacity(size_t bytes) {
1334 accounting_stats_.ExpandSpace(bytes); 1398 accounting_stats_.ExpandSpace(bytes);
1335 } 1399 }
1336 1400
1337 void PagedSpace::ReleasePage(Page* page) { 1401 void PagedSpace::ReleasePage(Page* page) {
1338 DCHECK_EQ(page->LiveBytes(), 0); 1402 DCHECK_EQ(page->LiveBytes(), 0);
1339 DCHECK_EQ(AreaSize(), page->area_size());
1340 DCHECK_EQ(page->owner(), this); 1403 DCHECK_EQ(page->owner(), this);
1341 1404
1342 free_list_.EvictFreeListItems(page); 1405 free_list_.EvictFreeListItems(page);
1343 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1406 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1344 1407
1345 page->ReleaseBlackAreaEndMarkerMap(); 1408 page->ReleaseBlackAreaEndMarkerMap();
1346 1409
1347 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { 1410 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1348 allocation_info_.Reset(nullptr, nullptr); 1411 allocation_info_.Reset(nullptr, nullptr);
1349 } 1412 }
1350 1413
1351 // If page is still in a list, unlink it from that list. 1414 // If page is still in a list, unlink it from that list.
1352 if (page->next_chunk() != NULL) { 1415 if (page->next_chunk() != NULL) {
1353 DCHECK(page->prev_chunk() != NULL); 1416 DCHECK(page->prev_chunk() != NULL);
1354 page->Unlink(); 1417 page->Unlink();
1355 } 1418 }
1356 1419
1357 AccountUncommitted(static_cast<intptr_t>(page->size())); 1420 AccountUncommitted(static_cast<intptr_t>(page->size()));
1421 accounting_stats_.ShrinkSpace(page->area_size());
1358 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); 1422 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1359
1360 DCHECK(Capacity() > 0);
1361 accounting_stats_.ShrinkSpace(AreaSize());
1362 } 1423 }
1363 1424
1364 #ifdef DEBUG 1425 #ifdef DEBUG
1365 void PagedSpace::Print() {} 1426 void PagedSpace::Print() {}
1366 #endif 1427 #endif
1367 1428
1368 #ifdef VERIFY_HEAP 1429 #ifdef VERIFY_HEAP
1369 void PagedSpace::Verify(ObjectVisitor* visitor) { 1430 void PagedSpace::Verify(ObjectVisitor* visitor) {
1370 bool allocation_pointer_found_in_space = 1431 bool allocation_pointer_found_in_space =
1371 (allocation_info_.top() == allocation_info_.limit()); 1432 (allocation_info_.top() == allocation_info_.limit());
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after
3166 object->ShortPrint(); 3227 object->ShortPrint();
3167 PrintF("\n"); 3228 PrintF("\n");
3168 } 3229 }
3169 printf(" --------------------------------------\n"); 3230 printf(" --------------------------------------\n");
3170 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3231 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3171 } 3232 }
3172 3233
3173 #endif // DEBUG 3234 #endif // DEBUG
3174 } // namespace internal 3235 } // namespace internal
3175 } // namespace v8 3236 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/isolate.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698