Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(441)

Side by Side Diff: src/heap/spaces.cc

Issue 2232653003: Reland of "[heap] Switch to 500k pages" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: s/ShrinkPagesToHighWaterMark/ShrinkImmortalImmovablePages/ Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/base/platform/semaphore.h" 9 #include "src/base/platform/semaphore.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 588
589 void MemoryChunk::Unlink() { 589 void MemoryChunk::Unlink() {
590 MemoryChunk* next_element = next_chunk(); 590 MemoryChunk* next_element = next_chunk();
591 MemoryChunk* prev_element = prev_chunk(); 591 MemoryChunk* prev_element = prev_chunk();
592 next_element->set_prev_chunk(prev_element); 592 next_element->set_prev_chunk(prev_element);
593 prev_element->set_next_chunk(next_element); 593 prev_element->set_next_chunk(next_element);
594 set_prev_chunk(NULL); 594 set_prev_chunk(NULL);
595 set_next_chunk(NULL); 595 set_next_chunk(NULL);
596 } 596 }
597 597
598 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
599 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
600 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
601 Address free_start = chunk->area_end_ - bytes_to_shrink;
602 // Don't adjust the size of the page. The area is just uncomitted but not
603 // released.
604 chunk->area_end_ -= bytes_to_shrink;
605 UncommitBlock(free_start, bytes_to_shrink);
606 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
607 if (chunk->reservation_.IsReserved())
608 chunk->reservation_.Guard(chunk->area_end_);
609 else
610 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
611 }
612 }
598 613
599 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 614 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
600 intptr_t commit_area_size, 615 intptr_t commit_area_size,
601 Executability executable, 616 Executability executable,
602 Space* owner) { 617 Space* owner) {
603 DCHECK(commit_area_size <= reserve_area_size); 618 DCHECK(commit_area_size <= reserve_area_size);
604 619
605 size_t chunk_size; 620 size_t chunk_size;
606 Heap* heap = isolate_->heap(); 621 Heap* heap = isolate_->heap();
607 Address base = NULL; 622 Address base = NULL;
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after
1206 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 1221 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1207 Address cur = obj->address(); 1222 Address cur = obj->address();
1208 Address next = cur + obj->Size(); 1223 Address next = cur + obj->Size();
1209 if ((cur <= addr) && (addr < next)) return obj; 1224 if ((cur <= addr) && (addr < next)) return obj;
1210 } 1225 }
1211 1226
1212 UNREACHABLE(); 1227 UNREACHABLE();
1213 return Smi::FromInt(0); 1228 return Smi::FromInt(0);
1214 } 1229 }
1215 1230
1231 void PagedSpace::ShrinkImmortalImmovablePages() {
1232 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1233 EmptyAllocationInfo();
1234 ResetFreeList();
1235
1236 for (Page* page : *this) {
1237 // Only shrink immortal immovable pages after deserialization.
1238 if (!page->IsFlagSet(Page::NEVER_EVACUATE)) continue;
1239
1240 // In order to shrink the page, we need to find the last filler. Since
1241 // a GC could've happened we need to manually traverse the page to find
1242 // any free space at the end.
1243 HeapObjectIterator it(page);
1244 HeapObject* filler = nullptr;
1245 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
Hannes Payer (out of office) 2016/08/10 21:17:35 This seems to be pretty costly. Let's discuss this
Michael Lippautz 2016/08/11 09:48:10 As discussed offline: Switch back to using the hig
1246 filler = HeapObject::FromAddress(obj->address() + obj->Size());
1247 }
1248 if (filler == nullptr || filler->address() == page->area_end()) continue;
1249 CHECK(filler->IsFiller());
1250 if (!filler->IsFreeSpace()) continue;
1251
1252 size_t unused =
1253 RoundDown(static_cast<size_t>(page->area_end() - filler->address() -
1254 FreeSpace::kSize),
1255 base::OS::CommitPageSize());
1256 if (unused > 0) {
1257 heap()->CreateFillerObjectAt(
1258 filler->address(),
1259 static_cast<int>(page->area_end() - filler->address() - unused),
1260 ClearRecordedSlots::kNo);
1261 heap()->memory_allocator()->ShrinkChunk(page, unused);
1262 CHECK(filler->IsFiller());
1263 CHECK_EQ(filler->address() + filler->Size(), page->area_end());
1264 accounting_stats_.DecreaseCapacity(unused);
1265 AccountUncommitted(unused);
1266 }
1267 }
1268 }
1269
1216 bool PagedSpace::Expand() { 1270 bool PagedSpace::Expand() {
1217 int size = AreaSize(); 1271 const int size = AreaSize();
1218 if (snapshotable() && !HasPages()) {
1219 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1220 }
1221
1222 if (!heap()->CanExpandOldGeneration(size)) return false; 1272 if (!heap()->CanExpandOldGeneration(size)) return false;
1223
1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); 1273 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1225 if (p == nullptr) return false; 1274 if (p == nullptr) return false;
1226
1227 AccountCommitted(static_cast<intptr_t>(p->size())); 1275 AccountCommitted(static_cast<intptr_t>(p->size()));
1228 1276
1229 // Pages created during bootstrapping may contain immortal immovable objects. 1277 // Pages created during bootstrapping may contain immortal immovable objects.
1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1278 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1231 1279
1232 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1280 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1233 1281
1234 p->InsertAfter(anchor_.prev_page()); 1282 p->InsertAfter(anchor_.prev_page());
1235 1283
1236 return true; 1284 return true;
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1297 } 1345 }
1298 Free(current_top, old_linear_size); 1346 Free(current_top, old_linear_size);
1299 } 1347 }
1300 1348
1301 void PagedSpace::IncreaseCapacity(int size) { 1349 void PagedSpace::IncreaseCapacity(int size) {
1302 accounting_stats_.ExpandSpace(size); 1350 accounting_stats_.ExpandSpace(size);
1303 } 1351 }
1304 1352
1305 void PagedSpace::ReleasePage(Page* page) { 1353 void PagedSpace::ReleasePage(Page* page) {
1306 DCHECK_EQ(page->LiveBytes(), 0); 1354 DCHECK_EQ(page->LiveBytes(), 0);
1307 DCHECK_EQ(AreaSize(), page->area_size());
1308 DCHECK_EQ(page->owner(), this); 1355 DCHECK_EQ(page->owner(), this);
1309 1356
1310 free_list_.EvictFreeListItems(page); 1357 free_list_.EvictFreeListItems(page);
1311 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1358 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1312 1359
1313 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { 1360 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1314 allocation_info_.Reset(nullptr, nullptr); 1361 allocation_info_.Reset(nullptr, nullptr);
1315 } 1362 }
1316 1363
1317 // If page is still in a list, unlink it from that list. 1364 // If page is still in a list, unlink it from that list.
1318 if (page->next_chunk() != NULL) { 1365 if (page->next_chunk() != NULL) {
1319 DCHECK(page->prev_chunk() != NULL); 1366 DCHECK(page->prev_chunk() != NULL);
1320 page->Unlink(); 1367 page->Unlink();
1321 } 1368 }
1322 1369
1323 AccountUncommitted(static_cast<intptr_t>(page->size())); 1370 AccountUncommitted(static_cast<intptr_t>(page->size()));
1371 accounting_stats_.ShrinkSpace(page->area_size());
1324 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); 1372 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1325
1326 DCHECK(Capacity() > 0);
1327 accounting_stats_.ShrinkSpace(AreaSize());
1328 } 1373 }
1329 1374
1330 #ifdef DEBUG 1375 #ifdef DEBUG
1331 void PagedSpace::Print() {} 1376 void PagedSpace::Print() {}
1332 #endif 1377 #endif
1333 1378
1334 #ifdef VERIFY_HEAP 1379 #ifdef VERIFY_HEAP
1335 void PagedSpace::Verify(ObjectVisitor* visitor) { 1380 void PagedSpace::Verify(ObjectVisitor* visitor) {
1336 bool allocation_pointer_found_in_space = 1381 bool allocation_pointer_found_in_space =
1337 (allocation_info_.top() == allocation_info_.limit()); 1382 (allocation_info_.top() == allocation_info_.limit());
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after
3132 object->ShortPrint(); 3177 object->ShortPrint();
3133 PrintF("\n"); 3178 PrintF("\n");
3134 } 3179 }
3135 printf(" --------------------------------------\n"); 3180 printf(" --------------------------------------\n");
3136 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3181 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3137 } 3182 }
3138 3183
3139 #endif // DEBUG 3184 #endif // DEBUG
3140 } // namespace internal 3185 } // namespace internal
3141 } // namespace v8 3186 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698