OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
610 | 610 |
611 void MemoryChunk::Unlink() { | 611 void MemoryChunk::Unlink() { |
612 MemoryChunk* next_element = next_chunk(); | 612 MemoryChunk* next_element = next_chunk(); |
613 MemoryChunk* prev_element = prev_chunk(); | 613 MemoryChunk* prev_element = prev_chunk(); |
614 next_element->set_prev_chunk(prev_element); | 614 next_element->set_prev_chunk(prev_element); |
615 prev_element->set_next_chunk(next_element); | 615 prev_element->set_next_chunk(next_element); |
616 set_prev_chunk(NULL); | 616 set_prev_chunk(NULL); |
617 set_next_chunk(NULL); | 617 set_next_chunk(NULL); |
618 } | 618 } |
619 | 619 |
620 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { | |
621 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); | |
622 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); | |
623 Address free_start = chunk->area_end_ - bytes_to_shrink; | |
624 // Don't adjust the size of the page. The area is just uncomitted but not | |
625 // released. | |
626 chunk->area_end_ -= bytes_to_shrink; | |
627 UncommitBlock(free_start, bytes_to_shrink); | |
628 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { | |
629 if (chunk->reservation_.IsReserved()) | |
630 chunk->reservation_.Guard(chunk->area_end_); | |
631 else | |
632 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); | |
633 } | |
634 } | |
635 | 620 |
636 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 621 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
637 intptr_t commit_area_size, | 622 intptr_t commit_area_size, |
638 Executability executable, | 623 Executability executable, |
639 Space* owner) { | 624 Space* owner) { |
640 DCHECK(commit_area_size <= reserve_area_size); | 625 DCHECK(commit_area_size <= reserve_area_size); |
641 | 626 |
642 size_t chunk_size; | 627 size_t chunk_size; |
643 Heap* heap = isolate_->heap(); | 628 Heap* heap = isolate_->heap(); |
644 Address base = NULL; | 629 Address base = NULL; |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
772 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 757 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
773 executable, owner, &reservation); | 758 executable, owner, &reservation); |
774 } | 759 } |
775 | 760 |
776 | 761 |
777 void Page::ResetFreeListStatistics() { | 762 void Page::ResetFreeListStatistics() { |
778 wasted_memory_ = 0; | 763 wasted_memory_ = 0; |
779 available_in_free_list_ = 0; | 764 available_in_free_list_ = 0; |
780 } | 765 } |
781 | 766 |
782 size_t Page::ShrinkToHighWaterMark() { | |
783 // Shrink pages to high water mark. The water mark points either to a filler | |
784 // or the area_end. | |
785 HeapObject* filler = HeapObject::FromAddress(HighWaterMark()); | |
786 if (filler->address() == area_end()) return 0; | |
787 CHECK(filler->IsFiller()); | |
788 if (!filler->IsFreeSpace()) return 0; | |
789 | |
790 #ifdef DEBUG | |
791 // Check the the filler is indeed the last filler on the page. | |
792 HeapObjectIterator it(this); | |
793 HeapObject* filler2 = nullptr; | |
794 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { | |
795 filler2 = HeapObject::FromAddress(obj->address() + obj->Size()); | |
796 } | |
797 if (filler2 == nullptr || filler2->address() == area_end()) return 0; | |
798 DCHECK(filler2->IsFiller()); | |
799 DCHECK_EQ(filler->address(), filler2->address()); | |
800 #endif // DEBUG | |
801 | |
802 size_t unused = RoundDown( | |
803 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), | |
804 base::OS::CommitPageSize()); | |
805 if (unused > 0) { | |
806 if (FLAG_trace_gc_verbose) { | |
807 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", | |
808 reinterpret_cast<void*>(this), | |
809 reinterpret_cast<void*>(area_end()), | |
810 reinterpret_cast<void*>(area_end() - unused)); | |
811 } | |
812 heap()->CreateFillerObjectAt( | |
813 filler->address(), | |
814 static_cast<int>(area_end() - filler->address() - unused), | |
815 ClearRecordedSlots::kNo); | |
816 heap()->memory_allocator()->ShrinkChunk(this, unused); | |
817 CHECK(filler->IsFiller()); | |
818 CHECK_EQ(filler->address() + filler->Size(), area_end()); | |
819 } | |
820 return unused; | |
821 } | |
822 | |
823 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, | 767 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
824 Address start_free) { | 768 Address start_free) { |
825 // We do not allow partial shrink for code. | 769 // We do not allow partial shrink for code. |
826 DCHECK(chunk->executable() == NOT_EXECUTABLE); | 770 DCHECK(chunk->executable() == NOT_EXECUTABLE); |
827 | 771 |
828 intptr_t size; | 772 intptr_t size; |
829 base::VirtualMemory* reservation = chunk->reserved_memory(); | 773 base::VirtualMemory* reservation = chunk->reserved_memory(); |
830 DCHECK(reservation->IsReserved()); | 774 DCHECK(reservation->IsReserved()); |
831 size = static_cast<intptr_t>(reservation->size()); | 775 size = static_cast<intptr_t>(reservation->size()); |
832 | 776 |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1284 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 1228 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
1285 Address cur = obj->address(); | 1229 Address cur = obj->address(); |
1286 Address next = cur + obj->Size(); | 1230 Address next = cur + obj->Size(); |
1287 if ((cur <= addr) && (addr < next)) return obj; | 1231 if ((cur <= addr) && (addr < next)) return obj; |
1288 } | 1232 } |
1289 | 1233 |
1290 UNREACHABLE(); | 1234 UNREACHABLE(); |
1291 return Smi::FromInt(0); | 1235 return Smi::FromInt(0); |
1292 } | 1236 } |
1293 | 1237 |
1294 void PagedSpace::ShrinkImmortalImmovablePages() { | 1238 bool PagedSpace::Expand() { |
1295 DCHECK(!heap()->deserialization_complete()); | 1239 int size = AreaSize(); |
1296 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1240 if (snapshotable() && !HasPages()) { |
1297 EmptyAllocationInfo(); | 1241 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
1298 ResetFreeList(); | 1242 } |
1299 | 1243 |
1300 for (Page* page : *this) { | 1244 if (!heap()->CanExpandOldGeneration(size)) return false; |
1301 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); | |
1302 size_t unused = page->ShrinkToHighWaterMark(); | |
1303 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); | |
1304 AccountUncommitted(unused); | |
1305 } | |
1306 } | |
1307 | 1245 |
1308 bool PagedSpace::Expand() { | |
1309 const int size = AreaSize(); | |
1310 if (!heap()->CanExpandOldGeneration(size)) return false; | |
1311 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1246 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1312 if (p == nullptr) return false; | 1247 if (p == nullptr) return false; |
| 1248 |
1313 AccountCommitted(static_cast<intptr_t>(p->size())); | 1249 AccountCommitted(static_cast<intptr_t>(p->size())); |
1314 | 1250 |
1315 // Pages created during bootstrapping may contain immortal immovable objects. | 1251 // Pages created during bootstrapping may contain immortal immovable objects. |
1316 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1252 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1317 | 1253 |
1318 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1254 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1319 | 1255 |
1320 p->InsertAfter(anchor_.prev_page()); | 1256 p->InsertAfter(anchor_.prev_page()); |
1321 | 1257 |
1322 return true; | 1258 return true; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1393 SetTopAndLimit(NULL, NULL); | 1329 SetTopAndLimit(NULL, NULL); |
1394 Free(current_top, static_cast<int>(current_limit - current_top)); | 1330 Free(current_top, static_cast<int>(current_limit - current_top)); |
1395 } | 1331 } |
1396 | 1332 |
1397 void PagedSpace::IncreaseCapacity(size_t bytes) { | 1333 void PagedSpace::IncreaseCapacity(size_t bytes) { |
1398 accounting_stats_.ExpandSpace(bytes); | 1334 accounting_stats_.ExpandSpace(bytes); |
1399 } | 1335 } |
1400 | 1336 |
1401 void PagedSpace::ReleasePage(Page* page) { | 1337 void PagedSpace::ReleasePage(Page* page) { |
1402 DCHECK_EQ(page->LiveBytes(), 0); | 1338 DCHECK_EQ(page->LiveBytes(), 0); |
| 1339 DCHECK_EQ(AreaSize(), page->area_size()); |
1403 DCHECK_EQ(page->owner(), this); | 1340 DCHECK_EQ(page->owner(), this); |
1404 | 1341 |
1405 free_list_.EvictFreeListItems(page); | 1342 free_list_.EvictFreeListItems(page); |
1406 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1343 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1407 | 1344 |
1408 page->ReleaseBlackAreaEndMarkerMap(); | 1345 page->ReleaseBlackAreaEndMarkerMap(); |
1409 | 1346 |
1410 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { | 1347 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { |
1411 allocation_info_.Reset(nullptr, nullptr); | 1348 allocation_info_.Reset(nullptr, nullptr); |
1412 } | 1349 } |
1413 | 1350 |
1414 // If page is still in a list, unlink it from that list. | 1351 // If page is still in a list, unlink it from that list. |
1415 if (page->next_chunk() != NULL) { | 1352 if (page->next_chunk() != NULL) { |
1416 DCHECK(page->prev_chunk() != NULL); | 1353 DCHECK(page->prev_chunk() != NULL); |
1417 page->Unlink(); | 1354 page->Unlink(); |
1418 } | 1355 } |
1419 | 1356 |
1420 AccountUncommitted(static_cast<intptr_t>(page->size())); | 1357 AccountUncommitted(static_cast<intptr_t>(page->size())); |
1421 accounting_stats_.ShrinkSpace(page->area_size()); | |
1422 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 1358 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
| 1359 |
| 1360 DCHECK(Capacity() > 0); |
| 1361 accounting_stats_.ShrinkSpace(AreaSize()); |
1423 } | 1362 } |
1424 | 1363 |
1425 #ifdef DEBUG | 1364 #ifdef DEBUG |
1426 void PagedSpace::Print() {} | 1365 void PagedSpace::Print() {} |
1427 #endif | 1366 #endif |
1428 | 1367 |
1429 #ifdef VERIFY_HEAP | 1368 #ifdef VERIFY_HEAP |
1430 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1369 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1431 bool allocation_pointer_found_in_space = | 1370 bool allocation_pointer_found_in_space = |
1432 (allocation_info_.top() == allocation_info_.limit()); | 1371 (allocation_info_.top() == allocation_info_.limit()); |
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3227 object->ShortPrint(); | 3166 object->ShortPrint(); |
3228 PrintF("\n"); | 3167 PrintF("\n"); |
3229 } | 3168 } |
3230 printf(" --------------------------------------\n"); | 3169 printf(" --------------------------------------\n"); |
3231 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3170 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3232 } | 3171 } |
3233 | 3172 |
3234 #endif // DEBUG | 3173 #endif // DEBUG |
3235 } // namespace internal | 3174 } // namespace internal |
3236 } // namespace v8 | 3175 } // namespace v8 |
OLD | NEW |