OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
588 | 588 |
589 void MemoryChunk::Unlink() { | 589 void MemoryChunk::Unlink() { |
590 MemoryChunk* next_element = next_chunk(); | 590 MemoryChunk* next_element = next_chunk(); |
591 MemoryChunk* prev_element = prev_chunk(); | 591 MemoryChunk* prev_element = prev_chunk(); |
592 next_element->set_prev_chunk(prev_element); | 592 next_element->set_prev_chunk(prev_element); |
593 prev_element->set_next_chunk(next_element); | 593 prev_element->set_next_chunk(next_element); |
594 set_prev_chunk(NULL); | 594 set_prev_chunk(NULL); |
595 set_next_chunk(NULL); | 595 set_next_chunk(NULL); |
596 } | 596 } |
597 | 597 |
| 598 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
| 599 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); |
| 600 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); |
| 601 Address free_start = chunk->area_end_ - bytes_to_shrink; |
| 602 // Don't adjust the size of the page. The area is just uncomitted but not |
| 603 // released. |
| 604 chunk->area_end_ -= bytes_to_shrink; |
| 605 UncommitBlock(free_start, bytes_to_shrink); |
| 606 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { |
| 607 if (chunk->reservation_.IsReserved()) |
| 608 chunk->reservation_.Guard(chunk->area_end_); |
| 609 else |
| 610 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); |
| 611 } |
| 612 } |
598 | 613 |
599 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 614 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
600 intptr_t commit_area_size, | 615 intptr_t commit_area_size, |
601 Executability executable, | 616 Executability executable, |
602 Space* owner) { | 617 Space* owner) { |
603 DCHECK(commit_area_size <= reserve_area_size); | 618 DCHECK(commit_area_size <= reserve_area_size); |
604 | 619 |
605 size_t chunk_size; | 620 size_t chunk_size; |
606 Heap* heap = isolate_->heap(); | 621 Heap* heap = isolate_->heap(); |
607 Address base = NULL; | 622 Address base = NULL; |
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1206 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 1221 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
1207 Address cur = obj->address(); | 1222 Address cur = obj->address(); |
1208 Address next = cur + obj->Size(); | 1223 Address next = cur + obj->Size(); |
1209 if ((cur <= addr) && (addr < next)) return obj; | 1224 if ((cur <= addr) && (addr < next)) return obj; |
1210 } | 1225 } |
1211 | 1226 |
1212 UNREACHABLE(); | 1227 UNREACHABLE(); |
1213 return Smi::FromInt(0); | 1228 return Smi::FromInt(0); |
1214 } | 1229 } |
1215 | 1230 |
| 1231 void PagedSpace::ShrinkPagesToHighWaterMark() { |
| 1232 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1233 EmptyAllocationInfo(); |
| 1234 ResetFreeList(); |
| 1235 |
| 1236 for (Page* page : *this) { |
| 1237 // There should be a filler at the high water mark (see CHECK below). We |
| 1238 // keep this filler and fix it to provide consistent heap iteration using |
| 1239 // size and area_end. |
| 1240 size_t unused = |
| 1241 RoundDown(static_cast<size_t>(page->area_end() - page->HighWaterMark() - |
| 1242 FreeSpace::kSize), |
| 1243 base::OS::CommitPageSize()); |
| 1244 if (unused > 0) { |
| 1245 HeapObject* filler = HeapObject::FromAddress(page->HighWaterMark()); |
| 1246 CHECK(filler->IsFiller()); |
| 1247 heap()->CreateFillerObjectAt( |
| 1248 filler->address(), |
| 1249 static_cast<int>(page->area_end() - unused - filler->address()), |
| 1250 ClearRecordedSlots::kNo); |
| 1251 heap()->memory_allocator()->ShrinkChunk(page, unused); |
| 1252 CHECK(filler->IsFiller()); |
| 1253 CHECK_EQ(filler->address() + filler->Size(), page->area_end()); |
| 1254 accounting_stats_.DecreaseCapacity(unused); |
| 1255 AccountUncommitted(unused); |
| 1256 } |
| 1257 } |
| 1258 } |
| 1259 |
1216 bool PagedSpace::Expand() { | 1260 bool PagedSpace::Expand() { |
1217 int size = AreaSize(); | 1261 const int size = AreaSize(); |
1218 if (snapshotable() && !HasPages()) { | |
1219 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); | |
1220 } | |
1221 | |
1222 if (!heap()->CanExpandOldGeneration(size)) return false; | 1262 if (!heap()->CanExpandOldGeneration(size)) return false; |
1223 | |
1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1263 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
1225 if (p == nullptr) return false; | 1264 if (p == nullptr) return false; |
1226 | |
1227 AccountCommitted(static_cast<intptr_t>(p->size())); | 1265 AccountCommitted(static_cast<intptr_t>(p->size())); |
1228 | 1266 |
1229 // Pages created during bootstrapping may contain immortal immovable objects. | 1267 // Pages created during bootstrapping may contain immortal immovable objects. |
1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1268 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
1231 | 1269 |
1232 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1270 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
1233 | 1271 |
1234 p->InsertAfter(anchor_.prev_page()); | 1272 p->InsertAfter(anchor_.prev_page()); |
1235 | 1273 |
1236 return true; | 1274 return true; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1297 } | 1335 } |
1298 Free(current_top, old_linear_size); | 1336 Free(current_top, old_linear_size); |
1299 } | 1337 } |
1300 | 1338 |
1301 void PagedSpace::IncreaseCapacity(int size) { | 1339 void PagedSpace::IncreaseCapacity(int size) { |
1302 accounting_stats_.ExpandSpace(size); | 1340 accounting_stats_.ExpandSpace(size); |
1303 } | 1341 } |
1304 | 1342 |
1305 void PagedSpace::ReleasePage(Page* page) { | 1343 void PagedSpace::ReleasePage(Page* page) { |
1306 DCHECK_EQ(page->LiveBytes(), 0); | 1344 DCHECK_EQ(page->LiveBytes(), 0); |
1307 DCHECK_EQ(AreaSize(), page->area_size()); | |
1308 DCHECK_EQ(page->owner(), this); | 1345 DCHECK_EQ(page->owner(), this); |
1309 | 1346 |
1310 free_list_.EvictFreeListItems(page); | 1347 free_list_.EvictFreeListItems(page); |
1311 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1348 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1312 | 1349 |
1313 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { | 1350 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { |
1314 allocation_info_.Reset(nullptr, nullptr); | 1351 allocation_info_.Reset(nullptr, nullptr); |
1315 } | 1352 } |
1316 | 1353 |
1317 // If page is still in a list, unlink it from that list. | 1354 // If page is still in a list, unlink it from that list. |
1318 if (page->next_chunk() != NULL) { | 1355 if (page->next_chunk() != NULL) { |
1319 DCHECK(page->prev_chunk() != NULL); | 1356 DCHECK(page->prev_chunk() != NULL); |
1320 page->Unlink(); | 1357 page->Unlink(); |
1321 } | 1358 } |
1322 | 1359 |
1323 AccountUncommitted(static_cast<intptr_t>(page->size())); | 1360 AccountUncommitted(static_cast<intptr_t>(page->size())); |
1324 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 1361 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
1325 | 1362 |
1326 DCHECK(Capacity() > 0); | 1363 DCHECK(Capacity() > 0); |
1327 accounting_stats_.ShrinkSpace(AreaSize()); | 1364 accounting_stats_.ShrinkSpace(page->area_size()); |
1328 } | 1365 } |
1329 | 1366 |
1330 #ifdef DEBUG | 1367 #ifdef DEBUG |
1331 void PagedSpace::Print() {} | 1368 void PagedSpace::Print() {} |
1332 #endif | 1369 #endif |
1333 | 1370 |
1334 #ifdef VERIFY_HEAP | 1371 #ifdef VERIFY_HEAP |
1335 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1372 void PagedSpace::Verify(ObjectVisitor* visitor) { |
1336 bool allocation_pointer_found_in_space = | 1373 bool allocation_pointer_found_in_space = |
1337 (allocation_info_.top() == allocation_info_.limit()); | 1374 (allocation_info_.top() == allocation_info_.limit()); |
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3132 object->ShortPrint(); | 3169 object->ShortPrint(); |
3133 PrintF("\n"); | 3170 PrintF("\n"); |
3134 } | 3171 } |
3135 printf(" --------------------------------------\n"); | 3172 printf(" --------------------------------------\n"); |
3136 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3173 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3137 } | 3174 } |
3138 | 3175 |
3139 #endif // DEBUG | 3176 #endif // DEBUG |
3140 } // namespace internal | 3177 } // namespace internal |
3141 } // namespace v8 | 3178 } // namespace v8 |
OLD | NEW |