Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: src/heap/spaces.cc

Issue 2232653003: Reland of "[heap] Switch to 500k pages" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Switch back to using the high water mark. Also: dont sweep immortal immovable pages Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/objects.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/base/platform/semaphore.h" 9 #include "src/base/platform/semaphore.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 588
589 void MemoryChunk::Unlink() { 589 void MemoryChunk::Unlink() {
590 MemoryChunk* next_element = next_chunk(); 590 MemoryChunk* next_element = next_chunk();
591 MemoryChunk* prev_element = prev_chunk(); 591 MemoryChunk* prev_element = prev_chunk();
592 next_element->set_prev_chunk(prev_element); 592 next_element->set_prev_chunk(prev_element);
593 prev_element->set_next_chunk(next_element); 593 prev_element->set_next_chunk(next_element);
594 set_prev_chunk(NULL); 594 set_prev_chunk(NULL);
595 set_next_chunk(NULL); 595 set_next_chunk(NULL);
596 } 596 }
597 597
598 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
599 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
600 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
601 Address free_start = chunk->area_end_ - bytes_to_shrink;
602 // Don't adjust the size of the page. The area is just uncomitted but not
603 // released.
604 chunk->area_end_ -= bytes_to_shrink;
605 UncommitBlock(free_start, bytes_to_shrink);
606 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
607 if (chunk->reservation_.IsReserved())
608 chunk->reservation_.Guard(chunk->area_end_);
609 else
610 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
611 }
612 }
598 613
599 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 614 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
600 intptr_t commit_area_size, 615 intptr_t commit_area_size,
601 Executability executable, 616 Executability executable,
602 Space* owner) { 617 Space* owner) {
603 DCHECK(commit_area_size <= reserve_area_size); 618 DCHECK(commit_area_size <= reserve_area_size);
604 619
605 size_t chunk_size; 620 size_t chunk_size;
606 Heap* heap = isolate_->heap(); 621 Heap* heap = isolate_->heap();
607 Address base = NULL; 622 Address base = NULL;
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after
1206 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 1221 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1207 Address cur = obj->address(); 1222 Address cur = obj->address();
1208 Address next = cur + obj->Size(); 1223 Address next = cur + obj->Size();
1209 if ((cur <= addr) && (addr < next)) return obj; 1224 if ((cur <= addr) && (addr < next)) return obj;
1210 } 1225 }
1211 1226
1212 UNREACHABLE(); 1227 UNREACHABLE();
1213 return Smi::FromInt(0); 1228 return Smi::FromInt(0);
1214 } 1229 }
1215 1230
1231 void PagedSpace::ShrinkImmortalImmovablePages() {
1232 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1233 EmptyAllocationInfo();
1234 ResetFreeList();
1235
1236 for (Page* page : *this) {
1237 // Only shrink immortal immovable pages after deserialization.
1238 if (!page->IsFlagSet(Page::NEVER_EVACUATE) ||
1239 !page->IsFlagSet(Page::NEVER_SWEEP))
1240 continue;
1241
1242 // Shrink pages to high water mark. Since those pages are never swept, there
1243 // should be a filler exactly at the high water mark.
1244 HeapObject* filler = HeapObject::FromAddress(page->HighWaterMark());
1245 if (filler->address() == page->area_end()) continue;
1246 CHECK(filler->IsFiller());
1247 if (!filler->IsFreeSpace()) continue;
1248
1249 #ifdef DEBUG
Michael Lippautz 2016/08/11 09:48:12 Debug verification useful?
1250 // Check the the filler is indeed the last filler on the page.
1251 HeapObjectIterator it(page);
1252 HeapObject* filler2 = nullptr;
1253 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1254 filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
1255 }
1256 if (filler2 == nullptr || filler2->address() == page->area_end()) continue;
1257 DCHECK(filler2->IsFiller());
1258 DCHECK_EQ(filler->address(), filler2->address());
1259 #endif // DEBUG
1260
1261 size_t unused =
1262 RoundDown(static_cast<size_t>(page->area_end() - filler->address() -
1263 FreeSpace::kSize),
1264 base::OS::CommitPageSize());
1265 if (unused > 0) {
1266 if (FLAG_trace_gc_verbose) {
1267 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
1268 reinterpret_cast<void*>(page),
1269 reinterpret_cast<void*>(page->area_end()),
1270 reinterpret_cast<void*>(page->area_end() - unused));
1271 }
1272 heap()->CreateFillerObjectAt(
1273 filler->address(),
1274 static_cast<int>(page->area_end() - filler->address() - unused),
1275 ClearRecordedSlots::kNo);
1276 heap()->memory_allocator()->ShrinkChunk(page, unused);
1277 CHECK(filler->IsFiller());
1278 CHECK_EQ(filler->address() + filler->Size(), page->area_end());
1279 if (heap()->gc_count() > 0) {
1280 // Since a GC already happened, the page area has been accounted for
1281 // as capacity *and* allocated bytes.
1282 accounting_stats_.ShrinkSpace(static_cast<int>(unused));
1283 } else {
1284 // No GC happened, the page area has only been accounted for as
1285 // capacity.
1286 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1287 }
1288 AccountUncommitted(unused);
1289 }
1290 }
1291 }
1292
1216 bool PagedSpace::Expand() { 1293 bool PagedSpace::Expand() {
1217 int size = AreaSize(); 1294 const int size = AreaSize();
1218 if (snapshotable() && !HasPages()) {
1219 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1220 }
1221
1222 if (!heap()->CanExpandOldGeneration(size)) return false; 1295 if (!heap()->CanExpandOldGeneration(size)) return false;
1223
1224 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); 1296 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1225 if (p == nullptr) return false; 1297 if (p == nullptr) return false;
1226
1227 AccountCommitted(static_cast<intptr_t>(p->size())); 1298 AccountCommitted(static_cast<intptr_t>(p->size()));
1228 1299
1229 // Pages created during bootstrapping may contain immortal immovable objects. 1300 // Pages created during bootstrapping may contain immortal immovable objects.
1230 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1301 if (!heap()->deserialization_complete()) {
1302 p->MarkNeverEvacuate();
1303 p->SetFlag(Page::NEVER_SWEEP);
1304 }
1231 1305
1232 DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); 1306 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1233 1307
1234 p->InsertAfter(anchor_.prev_page()); 1308 p->InsertAfter(anchor_.prev_page());
1235 1309
1236 return true; 1310 return true;
1237 } 1311 }
1238 1312
1239 1313
1240 int PagedSpace::CountTotalPages() { 1314 int PagedSpace::CountTotalPages() {
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1297 } 1371 }
1298 Free(current_top, old_linear_size); 1372 Free(current_top, old_linear_size);
1299 } 1373 }
1300 1374
1301 void PagedSpace::IncreaseCapacity(int size) { 1375 void PagedSpace::IncreaseCapacity(int size) {
1302 accounting_stats_.ExpandSpace(size); 1376 accounting_stats_.ExpandSpace(size);
1303 } 1377 }
1304 1378
1305 void PagedSpace::ReleasePage(Page* page) { 1379 void PagedSpace::ReleasePage(Page* page) {
1306 DCHECK_EQ(page->LiveBytes(), 0); 1380 DCHECK_EQ(page->LiveBytes(), 0);
1307 DCHECK_EQ(AreaSize(), page->area_size());
1308 DCHECK_EQ(page->owner(), this); 1381 DCHECK_EQ(page->owner(), this);
1309 1382
1310 free_list_.EvictFreeListItems(page); 1383 free_list_.EvictFreeListItems(page);
1311 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1384 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1312 1385
1313 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) { 1386 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1314 allocation_info_.Reset(nullptr, nullptr); 1387 allocation_info_.Reset(nullptr, nullptr);
1315 } 1388 }
1316 1389
1317 // If page is still in a list, unlink it from that list. 1390 // If page is still in a list, unlink it from that list.
1318 if (page->next_chunk() != NULL) { 1391 if (page->next_chunk() != NULL) {
1319 DCHECK(page->prev_chunk() != NULL); 1392 DCHECK(page->prev_chunk() != NULL);
1320 page->Unlink(); 1393 page->Unlink();
1321 } 1394 }
1322 1395
1323 AccountUncommitted(static_cast<intptr_t>(page->size())); 1396 AccountUncommitted(static_cast<intptr_t>(page->size()));
1397 accounting_stats_.ShrinkSpace(page->area_size());
1324 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); 1398 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1325
1326 DCHECK(Capacity() > 0);
1327 accounting_stats_.ShrinkSpace(AreaSize());
1328 } 1399 }
1329 1400
1330 #ifdef DEBUG 1401 #ifdef DEBUG
1331 void PagedSpace::Print() {} 1402 void PagedSpace::Print() {}
1332 #endif 1403 #endif
1333 1404
1334 #ifdef VERIFY_HEAP 1405 #ifdef VERIFY_HEAP
1335 void PagedSpace::Verify(ObjectVisitor* visitor) { 1406 void PagedSpace::Verify(ObjectVisitor* visitor) {
1336 bool allocation_pointer_found_in_space = 1407 bool allocation_pointer_found_in_space =
1337 (allocation_info_.top() == allocation_info_.limit()); 1408 (allocation_info_.top() == allocation_info_.limit());
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after
3132 object->ShortPrint(); 3203 object->ShortPrint();
3133 PrintF("\n"); 3204 PrintF("\n");
3134 } 3205 }
3135 printf(" --------------------------------------\n"); 3206 printf(" --------------------------------------\n");
3136 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3207 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3137 } 3208 }
3138 3209
3139 #endif // DEBUG 3210 #endif // DEBUG
3140 } // namespace internal 3211 } // namespace internal
3141 } // namespace v8 3212 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698