OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 578 matching lines...) Loading... |
589 | 589 |
590 void MemoryChunk::Unlink() { | 590 void MemoryChunk::Unlink() { |
591 MemoryChunk* next_element = next_chunk(); | 591 MemoryChunk* next_element = next_chunk(); |
592 MemoryChunk* prev_element = prev_chunk(); | 592 MemoryChunk* prev_element = prev_chunk(); |
593 next_element->set_prev_chunk(prev_element); | 593 next_element->set_prev_chunk(prev_element); |
594 prev_element->set_next_chunk(next_element); | 594 prev_element->set_next_chunk(next_element); |
595 set_prev_chunk(NULL); | 595 set_prev_chunk(NULL); |
596 set_next_chunk(NULL); | 596 set_next_chunk(NULL); |
597 } | 597 } |
598 | 598 |
| 599 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
| 600 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); |
| 601 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); |
| 602 Address free_start = chunk->area_end_ - bytes_to_shrink; |
| 603 // Don't adjust the size of the page. The area is just uncomitted but not |
| 604 // released. |
| 605 chunk->area_end_ -= bytes_to_shrink; |
| 606 UncommitBlock(free_start, bytes_to_shrink); |
| 607 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { |
| 608 if (chunk->reservation_.IsReserved()) |
| 609 chunk->reservation_.Guard(chunk->area_end_); |
| 610 else |
| 611 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); |
| 612 } |
| 613 } |
599 | 614 |
600 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 615 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
601 intptr_t commit_area_size, | 616 intptr_t commit_area_size, |
602 Executability executable, | 617 Executability executable, |
603 Space* owner) { | 618 Space* owner) { |
604 DCHECK(commit_area_size <= reserve_area_size); | 619 DCHECK(commit_area_size <= reserve_area_size); |
605 | 620 |
606 size_t chunk_size; | 621 size_t chunk_size; |
607 Heap* heap = isolate_->heap(); | 622 Heap* heap = isolate_->heap(); |
608 Address base = NULL; | 623 Address base = NULL; |
(...skipping 127 matching lines...) Loading... |
736 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 751 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
737 executable, owner, &reservation); | 752 executable, owner, &reservation); |
738 } | 753 } |
739 | 754 |
740 | 755 |
741 void Page::ResetFreeListStatistics() { | 756 void Page::ResetFreeListStatistics() { |
742 wasted_memory_ = 0; | 757 wasted_memory_ = 0; |
743 available_in_free_list_ = 0; | 758 available_in_free_list_ = 0; |
744 } | 759 } |
745 | 760 |
| 761 size_t Page::ShrinkToHighWaterMark() { |
| 762 // Shrink pages to high water mark. The water mark points either to a filler |
| 763 // or the area_end. |
| 764 HeapObject* filler = HeapObject::FromAddress(HighWaterMark()); |
| 765 if (filler->address() == area_end()) return 0; |
| 766 CHECK(filler->IsFiller()); |
| 767 if (!filler->IsFreeSpace()) return 0; |
| 768 |
| 769 #ifdef DEBUG |
| 770 // Check the the filler is indeed the last filler on the page. |
| 771 HeapObjectIterator it(this); |
| 772 HeapObject* filler2 = nullptr; |
| 773 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { |
| 774 filler2 = HeapObject::FromAddress(obj->address() + obj->Size()); |
| 775 } |
| 776 if (filler2 == nullptr || filler2->address() == area_end()) return 0; |
| 777 DCHECK(filler2->IsFiller()); |
| 778 DCHECK_EQ(filler->address(), filler2->address()); |
| 779 #endif // DEBUG |
| 780 |
| 781 size_t unused = RoundDown( |
| 782 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), |
| 783 base::OS::CommitPageSize()); |
| 784 if (unused > 0) { |
| 785 if (FLAG_trace_gc_verbose) { |
| 786 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", |
| 787 reinterpret_cast<void*>(this), |
| 788 reinterpret_cast<void*>(area_end()), |
| 789 reinterpret_cast<void*>(area_end() - unused)); |
| 790 } |
| 791 heap()->CreateFillerObjectAt( |
| 792 filler->address(), |
| 793 static_cast<int>(area_end() - filler->address() - unused), |
| 794 ClearRecordedSlots::kNo); |
| 795 heap()->memory_allocator()->ShrinkChunk(this, unused); |
| 796 CHECK(filler->IsFiller()); |
| 797 CHECK_EQ(filler->address() + filler->Size(), area_end()); |
| 798 } |
| 799 return unused; |
| 800 } |
| 801 |
746 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, | 802 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
747 Address start_free) { | 803 Address start_free) { |
748 // We do not allow partial shrink for code. | 804 // We do not allow partial shrink for code. |
749 DCHECK(chunk->executable() == NOT_EXECUTABLE); | 805 DCHECK(chunk->executable() == NOT_EXECUTABLE); |
750 | 806 |
751 intptr_t size; | 807 intptr_t size; |
752 base::VirtualMemory* reservation = chunk->reserved_memory(); | 808 base::VirtualMemory* reservation = chunk->reserved_memory(); |
753 DCHECK(reservation->IsReserved()); | 809 DCHECK(reservation->IsReserved()); |
754 size = static_cast<intptr_t>(reservation->size()); | 810 size = static_cast<intptr_t>(reservation->size()); |
755 | 811 |
(...skipping 451 matching lines...) Loading... |
1207 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 1263 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
1208 Address cur = obj->address(); | 1264 Address cur = obj->address(); |
1209 Address next = cur + obj->Size(); | 1265 Address next = cur + obj->Size(); |
1210 if ((cur <= addr) && (addr < next)) return obj; | 1266 if ((cur <= addr) && (addr < next)) return obj; |
1211 } | 1267 } |
1212 | 1268 |
1213 UNREACHABLE(); | 1269 UNREACHABLE(); |
1214 return Smi::FromInt(0); | 1270 return Smi::FromInt(0); |
1215 } | 1271 } |
1216 | 1272 |
| 1273 void PagedSpace::ShrinkImmortalImmovablePages() { |
| 1274 DCHECK(!heap()->deserialization_complete()); |
| 1275 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1276 EmptyAllocationInfo(); |
| 1277 ResetFreeList(); |
| 1278 |
| 1279 for (Page* page : *this) { |
| 1280 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE)); |
| 1281 size_t unused = page->ShrinkToHighWaterMark(); |
| 1282 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); |
| 1283 AccountUncommitted(unused); |
| 1284 } |
| 1285 } |
| 1286 |
1217 bool PagedSpace::Expand() { | 1287 bool PagedSpace::Expand() { |
1218 int size = AreaSize(); | 1288 const int size = AreaSize(); |
1219 if (snapshotable() && !HasPages()) { | |
1220 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); | |
1221 } | |
1222 | |
1223 if (!heap()->CanExpandOldGeneration(size)) return false; | 1289 if (!heap()->CanExpandOldGeneration(size)) return false; |
1224 | |
1225 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1290 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1226 if (p == nullptr) return false; | 1291 if (p == nullptr) return false; |
1227 | |
1228 AccountCommitted(static_cast<intptr_t>(p->size())); | 1292 AccountCommitted(static_cast<intptr_t>(p->size())); |
1229 | 1293 |
1230 // Pages created during bootstrapping may contain immortal immovable objects. | 1294 // Pages created during bootstrapping may contain immortal immovable objects. |
1231 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1295 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1232 | 1296 |
1233 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1297 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1234 | 1298 |
1235 p->InsertAfter(anchor_.prev_page()); | 1299 p->InsertAfter(anchor_.prev_page()); |
1236 | 1300 |
1237 return true; | 1301 return true; |
(...skipping 70 matching lines...) Loading... |
1308 SetTopAndLimit(NULL, NULL); | 1372 SetTopAndLimit(NULL, NULL); |
1309 Free(current_top, static_cast<int>(current_limit - current_top)); | 1373 Free(current_top, static_cast<int>(current_limit - current_top)); |
1310 } | 1374 } |
1311 | 1375 |
1312 void PagedSpace::IncreaseCapacity(int size) { | 1376 void PagedSpace::IncreaseCapacity(int size) { |
1313 accounting_stats_.ExpandSpace(size); | 1377 accounting_stats_.ExpandSpace(size); |
1314 } | 1378 } |
1315 | 1379 |
1316 void PagedSpace::ReleasePage(Page* page) { | 1380 void PagedSpace::ReleasePage(Page* page) { |
1317 DCHECK_EQ(page->LiveBytes(), 0); | 1381 DCHECK_EQ(page->LiveBytes(), 0); |
1318 DCHECK_EQ(AreaSize(), page->area_size()); | |
1319 DCHECK_EQ(page->owner(), this); | 1382 DCHECK_EQ(page->owner(), this); |
1320 | 1383 |
1321 free_list_.EvictFreeListItems(page); | 1384 free_list_.EvictFreeListItems(page); |
1322 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1385 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1323 | 1386 |
1324 page->ReleaseBlackAreaEndMarkerMap(); | 1387 page->ReleaseBlackAreaEndMarkerMap(); |
1325 | 1388 |
1326 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { | 1389 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { |
1327 allocation_info_.Reset(nullptr, nullptr); | 1390 allocation_info_.Reset(nullptr, nullptr); |
1328 } | 1391 } |
1329 | 1392 |
1330 // If page is still in a list, unlink it from that list. | 1393 // If page is still in a list, unlink it from that list. |
1331 if (page->next_chunk() != NULL) { | 1394 if (page->next_chunk() != NULL) { |
1332 DCHECK(page->prev_chunk() != NULL); | 1395 DCHECK(page->prev_chunk() != NULL); |
1333 page->Unlink(); | 1396 page->Unlink(); |
1334 } | 1397 } |
1335 | 1398 |
1336 AccountUncommitted(static_cast<intptr_t>(page->size())); | 1399 AccountUncommitted(static_cast<intptr_t>(page->size())); |
| 1400 accounting_stats_.ShrinkSpace(page->area_size()); |
1337 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 1401 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
1338 | |
1339 DCHECK(Capacity() > 0); | |
1340 accounting_stats_.ShrinkSpace(AreaSize()); | |
1341 } | 1402 } |
1342 | 1403 |
1343 #ifdef DEBUG | 1404 #ifdef DEBUG |
1344 void PagedSpace::Print() {} | 1405 void PagedSpace::Print() {} |
1345 #endif | 1406 #endif |
1346 | 1407 |
1347 #ifdef VERIFY_HEAP | 1408 #ifdef VERIFY_HEAP |
1348 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1409 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1349 bool allocation_pointer_found_in_space = | 1410 bool allocation_pointer_found_in_space = |
1350 (allocation_info_.top() == allocation_info_.limit()); | 1411 (allocation_info_.top() == allocation_info_.limit()); |
(...skipping 1794 matching lines...) Loading... |
3145 object->ShortPrint(); | 3206 object->ShortPrint(); |
3146 PrintF("\n"); | 3207 PrintF("\n"); |
3147 } | 3208 } |
3148 printf(" --------------------------------------\n"); | 3209 printf(" --------------------------------------\n"); |
3149 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3210 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3150 } | 3211 } |
3151 | 3212 |
3152 #endif // DEBUG | 3213 #endif // DEBUG |
3153 } // namespace internal | 3214 } // namespace internal |
3154 } // namespace v8 | 3215 } // namespace v8 |
OLD | NEW |