| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/base/platform/semaphore.h" | 9 #include "src/base/platform/semaphore.h" |
| 10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
| (...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 588 | 588 |
| 589 void MemoryChunk::Unlink() { | 589 void MemoryChunk::Unlink() { |
| 590 MemoryChunk* next_element = next_chunk(); | 590 MemoryChunk* next_element = next_chunk(); |
| 591 MemoryChunk* prev_element = prev_chunk(); | 591 MemoryChunk* prev_element = prev_chunk(); |
| 592 next_element->set_prev_chunk(prev_element); | 592 next_element->set_prev_chunk(prev_element); |
| 593 prev_element->set_next_chunk(next_element); | 593 prev_element->set_next_chunk(next_element); |
| 594 set_prev_chunk(NULL); | 594 set_prev_chunk(NULL); |
| 595 set_next_chunk(NULL); | 595 set_next_chunk(NULL); |
| 596 } | 596 } |
| 597 | 597 |
| 598 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { | |
| 599 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); | |
| 600 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); | |
| 601 Address free_start = chunk->area_end_ - bytes_to_shrink; | |
| 602 // Don't adjust the size of the page. The area is just uncomitted but not | |
| 603 // released. | |
| 604 chunk->area_end_ -= bytes_to_shrink; | |
| 605 UncommitBlock(free_start, bytes_to_shrink); | |
| 606 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { | |
| 607 if (chunk->reservation_.IsReserved()) | |
| 608 chunk->reservation_.Guard(chunk->area_end_); | |
| 609 else | |
| 610 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); | |
| 611 } | |
| 612 } | |
| 613 | 598 |
| 614 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 599 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
| 615 intptr_t commit_area_size, | 600 intptr_t commit_area_size, |
| 616 Executability executable, | 601 Executability executable, |
| 617 Space* owner) { | 602 Space* owner) { |
| 618 DCHECK(commit_area_size <= reserve_area_size); | 603 DCHECK(commit_area_size <= reserve_area_size); |
| 619 | 604 |
| 620 size_t chunk_size; | 605 size_t chunk_size; |
| 621 Heap* heap = isolate_->heap(); | 606 Heap* heap = isolate_->heap(); |
| 622 Address base = NULL; | 607 Address base = NULL; |
| (...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1221 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 1206 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 1222 Address cur = obj->address(); | 1207 Address cur = obj->address(); |
| 1223 Address next = cur + obj->Size(); | 1208 Address next = cur + obj->Size(); |
| 1224 if ((cur <= addr) && (addr < next)) return obj; | 1209 if ((cur <= addr) && (addr < next)) return obj; |
| 1225 } | 1210 } |
| 1226 | 1211 |
| 1227 UNREACHABLE(); | 1212 UNREACHABLE(); |
| 1228 return Smi::FromInt(0); | 1213 return Smi::FromInt(0); |
| 1229 } | 1214 } |
| 1230 | 1215 |
| 1231 void PagedSpace::ShrinkPagesToHighWaterMark() { | 1216 bool PagedSpace::Expand() { |
| 1232 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1217 int size = AreaSize(); |
| 1233 EmptyAllocationInfo(); | 1218 if (snapshotable() && !HasPages()) { |
| 1234 ResetFreeList(); | 1219 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
| 1220 } |
| 1235 | 1221 |
| 1236 for (Page* page : *this) { | 1222 if (!heap()->CanExpandOldGeneration(size)) return false; |
| 1237 // Only shrink immortal immovable pages after deserialization. | |
| 1238 if (!page->IsFlagSet(Page::NEVER_EVACUATE)) continue; | |
| 1239 | 1223 |
| 1240 // In order to shrink the page, we need to find the last filler. Since | |
| 1241 // a GC could've happened we need to manually traverse the page to find | |
| 1242 // any free space at the end. | |
| 1243 HeapObjectIterator it(page); | |
| 1244 HeapObject* filler = nullptr; | |
| 1245 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { | |
| 1246 filler = HeapObject::FromAddress(obj->address() + obj->Size()); | |
| 1247 } | |
| 1248 if (filler == nullptr || filler->address() == page->area_end()) continue; | |
| 1249 CHECK(filler->IsFiller()); | |
| 1250 if (!filler->IsFreeSpace()) continue; | |
| 1251 | |
| 1252 size_t unused = | |
| 1253 RoundDown(static_cast<size_t>(page->area_end() - filler->address() - | |
| 1254 FreeSpace::kSize), | |
| 1255 base::OS::CommitPageSize()); | |
| 1256 if (unused > 0) { | |
| 1257 heap()->CreateFillerObjectAt( | |
| 1258 filler->address(), | |
| 1259 static_cast<int>(page->area_end() - filler->address() - unused), | |
| 1260 ClearRecordedSlots::kNo); | |
| 1261 heap()->memory_allocator()->ShrinkChunk(page, unused); | |
| 1262 CHECK(filler->IsFiller()); | |
| 1263 CHECK_EQ(filler->address() + filler->Size(), page->area_end()); | |
| 1264 accounting_stats_.DecreaseCapacity(unused); | |
| 1265 AccountUncommitted(unused); | |
| 1266 } | |
| 1267 } | |
| 1268 } | |
| 1269 | |
| 1270 bool PagedSpace::Expand() { | |
| 1271 const int size = AreaSize(); | |
| 1272 if (!heap()->CanExpandOldGeneration(size)) return false; | |
| 1273 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); | 1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
| 1274 if (p == nullptr) return false; | 1225 if (p == nullptr) return false; |
| 1226 |
| 1275 AccountCommitted(static_cast<intptr_t>(p->size())); | 1227 AccountCommitted(static_cast<intptr_t>(p->size())); |
| 1276 | 1228 |
| 1277 // Pages created during bootstrapping may contain immortal immovable objects. | 1229 // Pages created during bootstrapping may contain immortal immovable objects. |
| 1278 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
| 1279 | 1231 |
| 1280 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); | 1232 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
| 1281 | 1233 |
| 1282 p->InsertAfter(anchor_.prev_page()); | 1234 p->InsertAfter(anchor_.prev_page()); |
| 1283 | 1235 |
| 1284 return true; | 1236 return true; |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1345 } | 1297 } |
| 1346 Free(current_top, old_linear_size); | 1298 Free(current_top, old_linear_size); |
| 1347 } | 1299 } |
| 1348 | 1300 |
| 1349 void PagedSpace::IncreaseCapacity(int size) { | 1301 void PagedSpace::IncreaseCapacity(int size) { |
| 1350 accounting_stats_.ExpandSpace(size); | 1302 accounting_stats_.ExpandSpace(size); |
| 1351 } | 1303 } |
| 1352 | 1304 |
| 1353 void PagedSpace::ReleasePage(Page* page) { | 1305 void PagedSpace::ReleasePage(Page* page) { |
| 1354 DCHECK_EQ(page->LiveBytes(), 0); | 1306 DCHECK_EQ(page->LiveBytes(), 0); |
| 1307 DCHECK_EQ(AreaSize(), page->area_size()); |
| 1355 DCHECK_EQ(page->owner(), this); | 1308 DCHECK_EQ(page->owner(), this); |
| 1356 | 1309 |
| 1357 free_list_.EvictFreeListItems(page); | 1310 free_list_.EvictFreeListItems(page); |
| 1358 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1311 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
| 1359 | 1312 |
| 1360 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { | 1313 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { |
| 1361 allocation_info_.Reset(nullptr, nullptr); | 1314 allocation_info_.Reset(nullptr, nullptr); |
| 1362 } | 1315 } |
| 1363 | 1316 |
| 1364 // If page is still in a list, unlink it from that list. | 1317 // If page is still in a list, unlink it from that list. |
| 1365 if (page->next_chunk() != NULL) { | 1318 if (page->next_chunk() != NULL) { |
| 1366 DCHECK(page->prev_chunk() != NULL); | 1319 DCHECK(page->prev_chunk() != NULL); |
| 1367 page->Unlink(); | 1320 page->Unlink(); |
| 1368 } | 1321 } |
| 1369 | 1322 |
| 1370 AccountUncommitted(static_cast<intptr_t>(page->size())); | 1323 AccountUncommitted(static_cast<intptr_t>(page->size())); |
| 1371 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); | 1324 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
| 1372 | 1325 |
| 1373 DCHECK(Capacity() > 0); | 1326 DCHECK(Capacity() > 0); |
| 1374 accounting_stats_.ShrinkSpace(page->area_size()); | 1327 accounting_stats_.ShrinkSpace(AreaSize()); |
| 1375 } | 1328 } |
| 1376 | 1329 |
| 1377 #ifdef DEBUG | 1330 #ifdef DEBUG |
| 1378 void PagedSpace::Print() {} | 1331 void PagedSpace::Print() {} |
| 1379 #endif | 1332 #endif |
| 1380 | 1333 |
| 1381 #ifdef VERIFY_HEAP | 1334 #ifdef VERIFY_HEAP |
| 1382 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1335 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1383 bool allocation_pointer_found_in_space = | 1336 bool allocation_pointer_found_in_space = |
| 1384 (allocation_info_.top() == allocation_info_.limit()); | 1337 (allocation_info_.top() == allocation_info_.limit()); |
| (...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3179 object->ShortPrint(); | 3132 object->ShortPrint(); |
| 3180 PrintF("\n"); | 3133 PrintF("\n"); |
| 3181 } | 3134 } |
| 3182 printf(" --------------------------------------\n"); | 3135 printf(" --------------------------------------\n"); |
| 3183 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3136 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3184 } | 3137 } |
| 3185 | 3138 |
| 3186 #endif // DEBUG | 3139 #endif // DEBUG |
| 3187 } // namespace internal | 3140 } // namespace internal |
| 3188 } // namespace v8 | 3141 } // namespace v8 |
| OLD | NEW |