OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 578 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
589 | 589 |
590 void MemoryChunk::Unlink() { | 590 void MemoryChunk::Unlink() { |
591 MemoryChunk* next_element = next_chunk(); | 591 MemoryChunk* next_element = next_chunk(); |
592 MemoryChunk* prev_element = prev_chunk(); | 592 MemoryChunk* prev_element = prev_chunk(); |
593 next_element->set_prev_chunk(prev_element); | 593 next_element->set_prev_chunk(prev_element); |
594 prev_element->set_next_chunk(next_element); | 594 prev_element->set_next_chunk(next_element); |
595 set_prev_chunk(NULL); | 595 set_prev_chunk(NULL); |
596 set_next_chunk(NULL); | 596 set_next_chunk(NULL); |
597 } | 597 } |
598 | 598 |
599 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { | |
600 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); | |
601 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); | |
602 Address free_start = chunk->area_end_ - bytes_to_shrink; | |
603 // Don't adjust the size of the page. The area is just uncomitted but not | |
604 // released. | |
605 chunk->area_end_ -= bytes_to_shrink; | |
606 UncommitBlock(free_start, bytes_to_shrink); | |
607 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { | |
608 if (chunk->reservation_.IsReserved()) | |
609 chunk->reservation_.Guard(chunk->area_end_); | |
610 else | |
611 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); | |
612 } | |
613 } | |
614 | 599 |
615 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 600 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
616 intptr_t commit_area_size, | 601 intptr_t commit_area_size, |
617 Executability executable, | 602 Executability executable, |
618 Space* owner) { | 603 Space* owner) { |
619 DCHECK(commit_area_size <= reserve_area_size); | 604 DCHECK(commit_area_size <= reserve_area_size); |
620 | 605 |
621 size_t chunk_size; | 606 size_t chunk_size; |
622 Heap* heap = isolate_->heap(); | 607 Heap* heap = isolate_->heap(); |
623 Address base = NULL; | 608 Address base = NULL; |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
751 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 736 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
752 executable, owner, &reservation); | 737 executable, owner, &reservation); |
753 } | 738 } |
754 | 739 |
755 | 740 |
756 void Page::ResetFreeListStatistics() { | 741 void Page::ResetFreeListStatistics() { |
757 wasted_memory_ = 0; | 742 wasted_memory_ = 0; |
758 available_in_free_list_ = 0; | 743 available_in_free_list_ = 0; |
759 } | 744 } |
760 | 745 |
761 size_t Page::ShrinkToHighWaterMark() { | |
762 // Shrink pages to high water mark. The water mark points either to a filler | |
763 // or the area_end. | |
764 HeapObject* filler = HeapObject::FromAddress(HighWaterMark()); | |
765 if (filler->address() == area_end()) return 0; | |
766 CHECK(filler->IsFiller()); | |
767 if (!filler->IsFreeSpace()) return 0; | |
768 | |
769 #ifdef DEBUG | |
770 // Check the the filler is indeed the last filler on the page. | |
771 HeapObjectIterator it(this); | |
772 HeapObject* filler2 = nullptr; | |
773 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { | |
774 filler2 = HeapObject::FromAddress(obj->address() + obj->Size()); | |
775 } | |
776 if (filler2 == nullptr || filler2->address() == area_end()) return 0; | |
777 DCHECK(filler2->IsFiller()); | |
778 DCHECK_EQ(filler->address(), filler2->address()); | |
779 #endif // DEBUG | |
780 | |
781 size_t unused = RoundDown( | |
782 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), | |
783 base::OS::CommitPageSize()); | |
784 if (unused > 0) { | |
785 if (FLAG_trace_gc_verbose) { | |
786 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", | |
787 reinterpret_cast<void*>(this), | |
788 reinterpret_cast<void*>(area_end()), | |
789 reinterpret_cast<void*>(area_end() - unused)); | |
790 } | |
791 heap()->CreateFillerObjectAt( | |
792 filler->address(), | |
793 static_cast<int>(area_end() - filler->address() - unused), | |
794 ClearRecordedSlots::kNo); | |
795 heap()->memory_allocator()->ShrinkChunk(this, unused); | |
796 CHECK(filler->IsFiller()); | |
797 CHECK_EQ(filler->address() + filler->Size(), area_end()); | |
798 } | |
799 return unused; | |
800 } | |
801 | |
802 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, | 746 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
803 Address start_free) { | 747 Address start_free) { |
804 // We do not allow partial shrink for code. | 748 // We do not allow partial shrink for code. |
805 DCHECK(chunk->executable() == NOT_EXECUTABLE); | 749 DCHECK(chunk->executable() == NOT_EXECUTABLE); |
806 | 750 |
807 intptr_t size; | 751 intptr_t size; |
808 base::VirtualMemory* reservation = chunk->reserved_memory(); | 752 base::VirtualMemory* reservation = chunk->reserved_memory(); |
809 DCHECK(reservation->IsReserved()); | 753 DCHECK(reservation->IsReserved()); |
810 size = static_cast<intptr_t>(reservation->size()); | 754 size = static_cast<intptr_t>(reservation->size()); |
811 | 755 |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1263 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 1207 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
1264 Address cur = obj->address(); | 1208 Address cur = obj->address(); |
1265 Address next = cur + obj->Size(); | 1209 Address next = cur + obj->Size(); |
1266 if ((cur <= addr) && (addr < next)) return obj; | 1210 if ((cur <= addr) && (addr < next)) return obj; |
1267 } | 1211 } |
1268 | 1212 |
1269 UNREACHABLE(); | 1213 UNREACHABLE(); |
1270 return Smi::FromInt(0); | 1214 return Smi::FromInt(0); |
1271 } | 1215 } |
1272 | 1216 |
1273 void PagedSpace::ShrinkImmortalImmovablePages() { | 1217 bool PagedSpace::Expand() { |
1274 DCHECK(!heap()->deserialization_complete()); | 1218 int size = AreaSize(); |
1275 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1219 if (snapshotable() && !HasPages()) { |
1276 EmptyAllocationInfo(); | 1220 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
1277 ResetFreeList(); | 1221 } |
1278 | 1222 |
1279 for (Page* page : *this) { | 1223 if (!heap()->CanExpandOldGeneration(size)) return false; |
1280 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); | |
1281 size_t unused = page->ShrinkToHighWaterMark(); | |
1282 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); | |
1283 AccountUncommitted(unused); | |
1284 } | |
1285 } | |
1286 | 1224 |
1287 bool PagedSpace::Expand() { | |
1288 const int size = AreaSize(); | |
1289 if (!heap()->CanExpandOldGeneration(size)) return false; | |
1290 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1225 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1291 if (p == nullptr) return false; | 1226 if (p == nullptr) return false; |
| 1227 |
1292 AccountCommitted(static_cast<intptr_t>(p->size())); | 1228 AccountCommitted(static_cast<intptr_t>(p->size())); |
1293 | 1229 |
1294 // Pages created during bootstrapping may contain immortal immovable objects. | 1230 // Pages created during bootstrapping may contain immortal immovable objects. |
1295 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1231 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1296 | 1232 |
1297 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1233 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1298 | 1234 |
1299 p->InsertAfter(anchor_.prev_page()); | 1235 p->InsertAfter(anchor_.prev_page()); |
1300 | 1236 |
1301 return true; | 1237 return true; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1372 SetTopAndLimit(NULL, NULL); | 1308 SetTopAndLimit(NULL, NULL); |
1373 Free(current_top, static_cast<int>(current_limit - current_top)); | 1309 Free(current_top, static_cast<int>(current_limit - current_top)); |
1374 } | 1310 } |
1375 | 1311 |
1376 void PagedSpace::IncreaseCapacity(int size) { | 1312 void PagedSpace::IncreaseCapacity(int size) { |
1377 accounting_stats_.ExpandSpace(size); | 1313 accounting_stats_.ExpandSpace(size); |
1378 } | 1314 } |
1379 | 1315 |
1380 void PagedSpace::ReleasePage(Page* page) { | 1316 void PagedSpace::ReleasePage(Page* page) { |
1381 DCHECK_EQ(page->LiveBytes(), 0); | 1317 DCHECK_EQ(page->LiveBytes(), 0); |
| 1318 DCHECK_EQ(AreaSize(), page->area_size()); |
1382 DCHECK_EQ(page->owner(), this); | 1319 DCHECK_EQ(page->owner(), this); |
1383 | 1320 |
1384 free_list_.EvictFreeListItems(page); | 1321 free_list_.EvictFreeListItems(page); |
1385 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1322 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1386 | 1323 |
1387 page->ReleaseBlackAreaEndMarkerMap(); | 1324 page->ReleaseBlackAreaEndMarkerMap(); |
1388 | 1325 |
1389 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { | 1326 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { |
1390 allocation_info_.Reset(nullptr, nullptr); | 1327 allocation_info_.Reset(nullptr, nullptr); |
1391 } | 1328 } |
1392 | 1329 |
1393 // If page is still in a list, unlink it from that list. | 1330 // If page is still in a list, unlink it from that list. |
1394 if (page->next_chunk() != NULL) { | 1331 if (page->next_chunk() != NULL) { |
1395 DCHECK(page->prev_chunk() != NULL); | 1332 DCHECK(page->prev_chunk() != NULL); |
1396 page->Unlink(); | 1333 page->Unlink(); |
1397 } | 1334 } |
1398 | 1335 |
1399 AccountUncommitted(static_cast<intptr_t>(page->size())); | 1336 AccountUncommitted(static_cast<intptr_t>(page->size())); |
1400 accounting_stats_.ShrinkSpace(page->area_size()); | |
1401 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 1337 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
| 1338 |
| 1339 DCHECK(Capacity() > 0); |
| 1340 accounting_stats_.ShrinkSpace(AreaSize()); |
1402 } | 1341 } |
1403 | 1342 |
1404 #ifdef DEBUG | 1343 #ifdef DEBUG |
1405 void PagedSpace::Print() {} | 1344 void PagedSpace::Print() {} |
1406 #endif | 1345 #endif |
1407 | 1346 |
1408 #ifdef VERIFY_HEAP | 1347 #ifdef VERIFY_HEAP |
1409 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1348 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1410 bool allocation_pointer_found_in_space = | 1349 bool allocation_pointer_found_in_space = |
1411 (allocation_info_.top() == allocation_info_.limit()); | 1350 (allocation_info_.top() == allocation_info_.limit()); |
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3206 object->ShortPrint(); | 3145 object->ShortPrint(); |
3207 PrintF("\n"); | 3146 PrintF("\n"); |
3208 } | 3147 } |
3209 printf(" --------------------------------------\n"); | 3148 printf(" --------------------------------------\n"); |
3210 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3149 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3211 } | 3150 } |
3212 | 3151 |
3213 #endif // DEBUG | 3152 #endif // DEBUG |
3214 } // namespace internal | 3153 } // namespace internal |
3215 } // namespace v8 | 3154 } // namespace v8 |
OLD | NEW |