Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/spaces.cc

Issue 2406363002: [heap] Use size_t in free list and evacuation candidate selection. (Closed)
Patch Set: fix typo Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 758 matching lines...) Expand 10 before | Expand all | Expand 10 after
769 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, 769 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
770 executable, owner, &reservation); 770 executable, owner, &reservation);
771 } 771 }
772 772
773 773
774 void Page::ResetFreeListStatistics() { 774 void Page::ResetFreeListStatistics() {
775 wasted_memory_ = 0; 775 wasted_memory_ = 0;
776 available_in_free_list_ = 0; 776 available_in_free_list_ = 0;
777 } 777 }
778 778
779 size_t Page::AvailableInFreeList() {
780 size_t sum = 0;
781 ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
782 sum += category->available();
783 });
784 return sum;
785 }
786
779 size_t Page::ShrinkToHighWaterMark() { 787 size_t Page::ShrinkToHighWaterMark() {
780 // Shrink pages to high water mark. The water mark points either to a filler 788 // Shrink pages to high water mark. The water mark points either to a filler
781 // or the area_end. 789 // or the area_end.
782 HeapObject* filler = HeapObject::FromAddress(HighWaterMark()); 790 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
783 if (filler->address() == area_end()) return 0; 791 if (filler->address() == area_end()) return 0;
784 CHECK(filler->IsFiller()); 792 CHECK(filler->IsFiller());
785 if (!filler->IsFreeSpace()) return 0; 793 if (!filler->IsFreeSpace()) return 0;
786 794
787 #ifdef DEBUG 795 #ifdef DEBUG
788 // Check the the filler is indeed the last filler on the page. 796 // Check the the filler is indeed the last filler on the page.
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after
1238 for (auto it = other->begin(); it != other->end();) { 1246 for (auto it = other->begin(); it != other->end();) {
1239 Page* p = *(it++); 1247 Page* p = *(it++);
1240 1248
1241 // Relinking requires the category to be unlinked. 1249 // Relinking requires the category to be unlinked.
1242 other->UnlinkFreeListCategories(p); 1250 other->UnlinkFreeListCategories(p);
1243 1251
1244 p->Unlink(); 1252 p->Unlink();
1245 p->set_owner(this); 1253 p->set_owner(this);
1246 p->InsertAfter(anchor_.prev_page()); 1254 p->InsertAfter(anchor_.prev_page());
1247 RelinkFreeListCategories(p); 1255 RelinkFreeListCategories(p);
1256 DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
1248 } 1257 }
1249 } 1258 }
1250 1259
1251 1260
1252 size_t PagedSpace::CommittedPhysicalMemory() { 1261 size_t PagedSpace::CommittedPhysicalMemory() {
1253 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); 1262 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1254 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 1263 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1255 size_t size = 0; 1264 size_t size = 0;
1256 for (Page* page : *this) { 1265 for (Page* page : *this) {
1257 size += page->CommittedPhysicalMemory(); 1266 size += page->CommittedPhysicalMemory();
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
1382 1391
1383 // Clear the bits in the unused black area. 1392 // Clear the bits in the unused black area.
1384 if (current_top != current_limit) { 1393 if (current_top != current_limit) {
1385 page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top), 1394 page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
1386 page->AddressToMarkbitIndex(current_limit)); 1395 page->AddressToMarkbitIndex(current_limit));
1387 page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top)); 1396 page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
1388 } 1397 }
1389 } 1398 }
1390 1399
1391 SetTopAndLimit(NULL, NULL); 1400 SetTopAndLimit(NULL, NULL);
1392 Free(current_top, static_cast<int>(current_limit - current_top)); 1401 DCHECK_GE(current_limit, current_top);
1402 Free(current_top, current_limit - current_top);
1393 } 1403 }
1394 1404
1395 void PagedSpace::IncreaseCapacity(size_t bytes) { 1405 void PagedSpace::IncreaseCapacity(size_t bytes) {
1396 accounting_stats_.ExpandSpace(bytes); 1406 accounting_stats_.ExpandSpace(bytes);
1397 } 1407 }
1398 1408
1399 void PagedSpace::ReleasePage(Page* page) { 1409 void PagedSpace::ReleasePage(Page* page) {
1400 DCHECK_EQ(page->LiveBytes(), 0); 1410 DCHECK_EQ(page->LiveBytes(), 0);
1401 DCHECK_EQ(page->owner(), this); 1411 DCHECK_EQ(page->owner(), this);
1402 1412
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
1592 current_page = 1602 current_page =
1593 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( 1603 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1594 Page::kAllocatableMemory, this, executable()); 1604 Page::kAllocatableMemory, this, executable());
1595 if (current_page == nullptr) return false; 1605 if (current_page == nullptr) return false;
1596 DCHECK_NOT_NULL(current_page); 1606 DCHECK_NOT_NULL(current_page);
1597 current_page->InsertAfter(anchor()); 1607 current_page->InsertAfter(anchor());
1598 current_page->ClearLiveness(); 1608 current_page->ClearLiveness();
1599 current_page->SetFlags(anchor()->prev_page()->GetFlags(), 1609 current_page->SetFlags(anchor()->prev_page()->GetFlags(),
1600 Page::kCopyAllFlags); 1610 Page::kCopyAllFlags);
1601 heap()->CreateFillerObjectAt(current_page->area_start(), 1611 heap()->CreateFillerObjectAt(current_page->area_start(),
1602 current_page->area_size(), 1612 static_cast<int>(current_page->area_size()),
1603 ClearRecordedSlots::kNo); 1613 ClearRecordedSlots::kNo);
1604 } 1614 }
1605 } 1615 }
1606 return true; 1616 return true;
1607 } 1617 }
1608 1618
1609 void LocalAllocationBuffer::Close() { 1619 void LocalAllocationBuffer::Close() {
1610 if (IsValid()) { 1620 if (IsValid()) {
1611 heap_->CreateFillerObjectAt( 1621 heap_->CreateFillerObjectAt(
1612 allocation_info_.top(), 1622 allocation_info_.top(),
(...skipping 718 matching lines...) Expand 10 before | Expand all | Expand 10 after
2331 // Free lists for old object spaces implementation 2341 // Free lists for old object spaces implementation
2332 2342
2333 2343
2334 void FreeListCategory::Reset() { 2344 void FreeListCategory::Reset() {
2335 set_top(nullptr); 2345 set_top(nullptr);
2336 set_prev(nullptr); 2346 set_prev(nullptr);
2337 set_next(nullptr); 2347 set_next(nullptr);
2338 available_ = 0; 2348 available_ = 0;
2339 } 2349 }
2340 2350
2341 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) { 2351 FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
2342 DCHECK(page()->CanAllocate()); 2352 DCHECK(page()->CanAllocate());
2343 2353
2344 FreeSpace* node = top(); 2354 FreeSpace* node = top();
2345 if (node == nullptr) return nullptr; 2355 if (node == nullptr) return nullptr;
2346 set_top(node->next()); 2356 set_top(node->next());
2347 *node_size = node->Size(); 2357 *node_size = node->Size();
2348 available_ -= *node_size; 2358 available_ -= *node_size;
2349 return node; 2359 return node;
2350 } 2360 }
2351 2361
2352 FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size, 2362 FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
2353 int* node_size) { 2363 size_t* node_size) {
2354 DCHECK(page()->CanAllocate()); 2364 DCHECK(page()->CanAllocate());
2355 2365
2356 FreeSpace* node = PickNodeFromList(node_size); 2366 FreeSpace* node = PickNodeFromList(node_size);
2357 if ((node != nullptr) && (*node_size < minimum_size)) { 2367 if ((node != nullptr) && (*node_size < minimum_size)) {
2358 Free(node, *node_size, kLinkCategory); 2368 Free(node, *node_size, kLinkCategory);
2359 *node_size = 0; 2369 *node_size = 0;
2360 return nullptr; 2370 return nullptr;
2361 } 2371 }
2362 return node; 2372 return node;
2363 } 2373 }
2364 2374
2365 FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size, 2375 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2366 int* node_size) { 2376 size_t* node_size) {
2367 DCHECK(page()->CanAllocate()); 2377 DCHECK(page()->CanAllocate());
2368 2378
2369 FreeSpace* prev_non_evac_node = nullptr; 2379 FreeSpace* prev_non_evac_node = nullptr;
2370 for (FreeSpace* cur_node = top(); cur_node != nullptr; 2380 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2371 cur_node = cur_node->next()) { 2381 cur_node = cur_node->next()) {
2372 int size = cur_node->size(); 2382 size_t size = cur_node->size();
2373 if (size >= minimum_size) { 2383 if (size >= minimum_size) {
2384 DCHECK_GE(available_, size);
2374 available_ -= size; 2385 available_ -= size;
2375 if (cur_node == top()) { 2386 if (cur_node == top()) {
2376 set_top(cur_node->next()); 2387 set_top(cur_node->next());
2377 } 2388 }
2378 if (prev_non_evac_node != nullptr) { 2389 if (prev_non_evac_node != nullptr) {
2379 prev_non_evac_node->set_next(cur_node->next()); 2390 prev_non_evac_node->set_next(cur_node->next());
2380 } 2391 }
2381 *node_size = size; 2392 *node_size = size;
2382 return cur_node; 2393 return cur_node;
2383 } 2394 }
2384 2395
2385 prev_non_evac_node = cur_node; 2396 prev_non_evac_node = cur_node;
2386 } 2397 }
2387 return nullptr; 2398 return nullptr;
2388 } 2399 }
2389 2400
2390 bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes, 2401 bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
2391 FreeMode mode) { 2402 FreeMode mode) {
2392 if (!page()->CanAllocate()) return false; 2403 if (!page()->CanAllocate()) return false;
2393 2404
2394 free_space->set_next(top()); 2405 free_space->set_next(top());
2395 set_top(free_space); 2406 set_top(free_space);
2396 available_ += size_in_bytes; 2407 available_ += size_in_bytes;
2397 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) { 2408 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2398 owner()->AddCategory(this); 2409 owner()->AddCategory(this);
2399 } 2410 }
2400 return true; 2411 return true;
(...skipping 12 matching lines...) Expand all
2413 n = n->next(); 2424 n = n->next();
2414 } 2425 }
2415 } 2426 }
2416 2427
2417 void FreeListCategory::Relink() { 2428 void FreeListCategory::Relink() {
2418 DCHECK(!is_linked()); 2429 DCHECK(!is_linked());
2419 owner()->AddCategory(this); 2430 owner()->AddCategory(this);
2420 } 2431 }
2421 2432
2422 void FreeListCategory::Invalidate() { 2433 void FreeListCategory::Invalidate() {
2423 page()->add_available_in_free_list(-available()); 2434 page()->remove_available_in_free_list(available());
2424 Reset(); 2435 Reset();
2425 type_ = kInvalidCategory; 2436 type_ = kInvalidCategory;
2426 } 2437 }
2427 2438
2428 FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) { 2439 FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2429 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 2440 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2430 categories_[i] = nullptr; 2441 categories_[i] = nullptr;
2431 } 2442 }
2432 Reset(); 2443 Reset();
2433 } 2444 }
2434 2445
2435 2446
2436 void FreeList::Reset() { 2447 void FreeList::Reset() {
2437 ForAllFreeListCategories( 2448 ForAllFreeListCategories(
2438 [](FreeListCategory* category) { category->Reset(); }); 2449 [](FreeListCategory* category) { category->Reset(); });
2439 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 2450 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2440 categories_[i] = nullptr; 2451 categories_[i] = nullptr;
2441 } 2452 }
2442 ResetStats(); 2453 ResetStats();
2443 } 2454 }
2444 2455
2445 int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) { 2456 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2446 if (size_in_bytes == 0) return 0; 2457 if (size_in_bytes == 0) return 0;
2447 2458
2448 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes, 2459 owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2449 ClearRecordedSlots::kNo); 2460 ClearRecordedSlots::kNo);
2450 2461
2451 Page* page = Page::FromAddress(start); 2462 Page* page = Page::FromAddress(start);
2452 2463
2453 // Blocks have to be a minimum size to hold free list items. 2464 // Blocks have to be a minimum size to hold free list items.
2454 if (size_in_bytes < kMinBlockSize) { 2465 if (size_in_bytes < kMinBlockSize) {
2455 page->add_wasted_memory(size_in_bytes); 2466 page->add_wasted_memory(size_in_bytes);
2456 wasted_bytes_.Increment(size_in_bytes); 2467 wasted_bytes_.Increment(size_in_bytes);
2457 return size_in_bytes; 2468 return size_in_bytes;
2458 } 2469 }
2459 2470
2460 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start)); 2471 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2461 // Insert other blocks at the head of a free list of the appropriate 2472 // Insert other blocks at the head of a free list of the appropriate
2462 // magnitude. 2473 // magnitude.
2463 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes); 2474 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2464 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) { 2475 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2465 page->add_available_in_free_list(size_in_bytes); 2476 page->add_available_in_free_list(size_in_bytes);
2466 } 2477 }
2478 DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
2467 return 0; 2479 return 0;
2468 } 2480 }
2469 2481
2470 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) { 2482 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
2471 FreeListCategoryIterator it(this, type); 2483 FreeListCategoryIterator it(this, type);
2472 FreeSpace* node = nullptr; 2484 FreeSpace* node = nullptr;
2473 while (it.HasNext()) { 2485 while (it.HasNext()) {
2474 FreeListCategory* current = it.Next(); 2486 FreeListCategory* current = it.Next();
2475 node = current->PickNodeFromList(node_size); 2487 node = current->PickNodeFromList(node_size);
2476 if (node != nullptr) { 2488 if (node != nullptr) {
2477 Page::FromAddress(node->address()) 2489 Page::FromAddress(node->address())
2478 ->add_available_in_free_list(-(*node_size)); 2490 ->remove_available_in_free_list(*node_size);
2479 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2491 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2480 return node; 2492 return node;
2481 } 2493 }
2482 RemoveCategory(current); 2494 RemoveCategory(current);
2483 } 2495 }
2484 return node; 2496 return node;
2485 } 2497 }
2486 2498
2487 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size, 2499 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
2488 int minimum_size) { 2500 size_t minimum_size) {
2489 if (categories_[type] == nullptr) return nullptr; 2501 if (categories_[type] == nullptr) return nullptr;
2490 FreeSpace* node = 2502 FreeSpace* node =
2491 categories_[type]->TryPickNodeFromList(minimum_size, node_size); 2503 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
2492 if (node != nullptr) { 2504 if (node != nullptr) {
2493 Page::FromAddress(node->address()) 2505 Page::FromAddress(node->address())
2494 ->add_available_in_free_list(-(*node_size)); 2506 ->remove_available_in_free_list(*node_size);
2495 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2507 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2496 } 2508 }
2497 return node; 2509 return node;
2498 } 2510 }
2499 2511
2500 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type, 2512 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2501 int* node_size, int minimum_size) { 2513 size_t* node_size,
2514 size_t minimum_size) {
2502 FreeListCategoryIterator it(this, type); 2515 FreeListCategoryIterator it(this, type);
2503 FreeSpace* node = nullptr; 2516 FreeSpace* node = nullptr;
2504 while (it.HasNext()) { 2517 while (it.HasNext()) {
2505 FreeListCategory* current = it.Next(); 2518 FreeListCategory* current = it.Next();
2506 node = current->SearchForNodeInList(minimum_size, node_size); 2519 node = current->SearchForNodeInList(minimum_size, node_size);
2507 if (node != nullptr) { 2520 if (node != nullptr) {
2508 Page::FromAddress(node->address()) 2521 Page::FromAddress(node->address())
2509 ->add_available_in_free_list(-(*node_size)); 2522 ->remove_available_in_free_list(*node_size);
2510 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2523 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2511 return node; 2524 return node;
2512 } 2525 }
2513 if (current->is_empty()) { 2526 if (current->is_empty()) {
2514 RemoveCategory(current); 2527 RemoveCategory(current);
2515 } 2528 }
2516 } 2529 }
2517 return node; 2530 return node;
2518 } 2531 }
2519 2532
2520 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { 2533 FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
2521 FreeSpace* node = nullptr; 2534 FreeSpace* node = nullptr;
2522 2535
2523 // First try the allocation fast path: try to allocate the minimum element 2536 // First try the allocation fast path: try to allocate the minimum element
2524 // size of a free list category. This operation is constant time. 2537 // size of a free list category. This operation is constant time.
2525 FreeListCategoryType type = 2538 FreeListCategoryType type =
2526 SelectFastAllocationFreeListCategoryType(size_in_bytes); 2539 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2527 for (int i = type; i < kHuge; i++) { 2540 for (int i = type; i < kHuge; i++) {
2528 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size); 2541 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
2529 if (node != nullptr) return node; 2542 if (node != nullptr) return node;
2530 } 2543 }
(...skipping 16 matching lines...) Expand all
2547 node = TryFindNodeIn(type, node_size, size_in_bytes); 2560 node = TryFindNodeIn(type, node_size, size_in_bytes);
2548 2561
2549 DCHECK(IsVeryLong() || Available() == SumFreeLists()); 2562 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2550 return node; 2563 return node;
2551 } 2564 }
2552 2565
2553 // Allocation on the old space free list. If it succeeds then a new linear 2566 // Allocation on the old space free list. If it succeeds then a new linear
2554 // allocation space has been set up with the top and limit of the space. If 2567 // allocation space has been set up with the top and limit of the space. If
2555 // the allocation fails then NULL is returned, and the caller can perform a GC 2568 // the allocation fails then NULL is returned, and the caller can perform a GC
2556 // or allocate a new page before retrying. 2569 // or allocate a new page before retrying.
2557 HeapObject* FreeList::Allocate(int size_in_bytes) { 2570 HeapObject* FreeList::Allocate(size_t size_in_bytes) {
2558 DCHECK(0 < size_in_bytes);
2559 DCHECK(size_in_bytes <= kMaxBlockSize); 2571 DCHECK(size_in_bytes <= kMaxBlockSize);
2560 DCHECK(IsAligned(size_in_bytes, kPointerSize)); 2572 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2561 // Don't free list allocate if there is linear space available. 2573 // Don't free list allocate if there is linear space available.
2562 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); 2574 DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
2575 size_in_bytes);
2563 2576
2564 // Mark the old linear allocation area with a free space map so it can be 2577 // Mark the old linear allocation area with a free space map so it can be
2565 // skipped when scanning the heap. This also puts it back in the free list 2578 // skipped when scanning the heap. This also puts it back in the free list
2566 // if it is big enough. 2579 // if it is big enough.
2567 owner_->EmptyAllocationInfo(); 2580 owner_->EmptyAllocationInfo();
2568 2581
2569 owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached( 2582 owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
2570 Heap::kNoGCFlags, kNoGCCallbackFlags); 2583 Heap::kNoGCFlags, kNoGCCallbackFlags);
2571 2584
2572 int new_node_size = 0; 2585 size_t new_node_size = 0;
2573 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); 2586 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2574 if (new_node == nullptr) return nullptr; 2587 if (new_node == nullptr) return nullptr;
2575 2588
2576 int bytes_left = new_node_size - size_in_bytes; 2589 DCHECK_GE(new_node_size, size_in_bytes);
2577 DCHECK(bytes_left >= 0); 2590 size_t bytes_left = new_node_size - size_in_bytes;
2578 2591
2579 #ifdef DEBUG 2592 #ifdef DEBUG
2580 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { 2593 for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
2581 reinterpret_cast<Object**>(new_node->address())[i] = 2594 reinterpret_cast<Object**>(new_node->address())[i] =
2582 Smi::FromInt(kCodeZapValue); 2595 Smi::FromInt(kCodeZapValue);
2583 } 2596 }
2584 #endif 2597 #endif
2585 2598
2586 // The old-space-step might have finished sweeping and restarted marking. 2599 // The old-space-step might have finished sweeping and restarted marking.
2587 // Verify that it did not turn the page of the new node into an evacuation 2600 // Verify that it did not turn the page of the new node into an evacuation
2588 // candidate. 2601 // candidate.
2589 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); 2602 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2590 2603
2591 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 2604 const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
2592 2605
2593 // Memory in the linear allocation area is counted as allocated. We may free 2606 // Memory in the linear allocation area is counted as allocated. We may free
2594 // a little of this again immediately - see below. 2607 // a little of this again immediately - see below.
2595 owner_->Allocate(new_node_size); 2608 owner_->Allocate(static_cast<int>(new_node_size));
2596 2609
2597 if (owner_->heap()->inline_allocation_disabled()) { 2610 if (owner_->heap()->inline_allocation_disabled()) {
2598 // Keep the linear allocation area empty if requested to do so, just 2611 // Keep the linear allocation area empty if requested to do so, just
2599 // return area back to the free list instead. 2612 // return area back to the free list instead.
2600 owner_->Free(new_node->address() + size_in_bytes, bytes_left); 2613 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2601 owner_->SetAllocationInfo(new_node->address() + size_in_bytes, 2614 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2602 new_node->address() + size_in_bytes); 2615 new_node->address() + size_in_bytes);
2603 } else if (bytes_left > kThreshold && 2616 } else if (bytes_left > kThreshold &&
2604 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && 2617 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2605 FLAG_incremental_marking) { 2618 FLAG_incremental_marking) {
2606 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 2619 size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2607 // We don't want to give too large linear areas to the allocator while 2620 // We don't want to give too large linear areas to the allocator while
2608 // incremental marking is going on, because we won't check again whether 2621 // incremental marking is going on, because we won't check again whether
2609 // we want to do another increment until the linear area is used up. 2622 // we want to do another increment until the linear area is used up.
2623 DCHECK_GE(new_node_size, size_in_bytes + linear_size);
2610 owner_->Free(new_node->address() + size_in_bytes + linear_size, 2624 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2611 new_node_size - size_in_bytes - linear_size); 2625 new_node_size - size_in_bytes - linear_size);
2612 owner_->SetAllocationInfo( 2626 owner_->SetAllocationInfo(
2613 new_node->address() + size_in_bytes, 2627 new_node->address() + size_in_bytes,
2614 new_node->address() + size_in_bytes + linear_size); 2628 new_node->address() + size_in_bytes + linear_size);
2615 } else { 2629 } else {
2616 DCHECK(bytes_left >= 0);
2617 // Normally we give the rest of the node to the allocator as its new 2630 // Normally we give the rest of the node to the allocator as its new
2618 // linear allocation area. 2631 // linear allocation area.
2619 owner_->SetAllocationInfo(new_node->address() + size_in_bytes, 2632 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2620 new_node->address() + new_node_size); 2633 new_node->address() + new_node_size);
2621 } 2634 }
2622 2635
2623 return new_node; 2636 return new_node;
2624 } 2637 }
2625 2638
2626 intptr_t FreeList::EvictFreeListItems(Page* page) { 2639 size_t FreeList::EvictFreeListItems(Page* page) {
2627 intptr_t sum = 0; 2640 size_t sum = 0;
2628 page->ForAllFreeListCategories( 2641 page->ForAllFreeListCategories(
2629 [this, &sum, page](FreeListCategory* category) { 2642 [this, &sum, page](FreeListCategory* category) {
2630 DCHECK_EQ(this, category->owner()); 2643 DCHECK_EQ(this, category->owner());
2631 sum += category->available(); 2644 sum += category->available();
2632 RemoveCategory(category); 2645 RemoveCategory(category);
2633 category->Invalidate(); 2646 category->Invalidate();
2634 }); 2647 });
2635 return sum; 2648 return sum;
2636 } 2649 }
2637 2650
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
2691 static_cast<void*>(categories_[type]), type); 2704 static_cast<void*>(categories_[type]), type);
2692 while (it.HasNext()) { 2705 while (it.HasNext()) {
2693 FreeListCategory* current = it.Next(); 2706 FreeListCategory* current = it.Next();
2694 PrintF("%p -> ", static_cast<void*>(current)); 2707 PrintF("%p -> ", static_cast<void*>(current));
2695 } 2708 }
2696 PrintF("null\n"); 2709 PrintF("null\n");
2697 } 2710 }
2698 2711
2699 2712
2700 #ifdef DEBUG 2713 #ifdef DEBUG
2701 intptr_t FreeListCategory::SumFreeList() { 2714 size_t FreeListCategory::SumFreeList() {
2702 intptr_t sum = 0; 2715 size_t sum = 0;
2703 FreeSpace* cur = top(); 2716 FreeSpace* cur = top();
2704 while (cur != NULL) { 2717 while (cur != NULL) {
2705 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex)); 2718 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2706 sum += cur->nobarrier_size(); 2719 sum += cur->nobarrier_size();
2707 cur = cur->next(); 2720 cur = cur->next();
2708 } 2721 }
2709 return sum; 2722 return sum;
2710 } 2723 }
2711 2724
2712 int FreeListCategory::FreeListLength() { 2725 int FreeListCategory::FreeListLength() {
(...skipping 16 matching lines...) Expand all
2729 if (len >= FreeListCategory::kVeryLongFreeList) return true; 2742 if (len >= FreeListCategory::kVeryLongFreeList) return true;
2730 } 2743 }
2731 } 2744 }
2732 return false; 2745 return false;
2733 } 2746 }
2734 2747
2735 2748
2736 // This can take a very long time because it is linear in the number of entries 2749 // This can take a very long time because it is linear in the number of entries
2737 // on the free list, so it should not be called if FreeListLength returns 2750 // on the free list, so it should not be called if FreeListLength returns
2738 // kVeryLongFreeList. 2751 // kVeryLongFreeList.
2739 intptr_t FreeList::SumFreeLists() { 2752 size_t FreeList::SumFreeLists() {
2740 intptr_t sum = 0; 2753 size_t sum = 0;
2741 ForAllFreeListCategories( 2754 ForAllFreeListCategories(
2742 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); }); 2755 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
2743 return sum; 2756 return sum;
2744 } 2757 }
2745 #endif 2758 #endif
2746 2759
2747 2760
2748 // ----------------------------------------------------------------------------- 2761 // -----------------------------------------------------------------------------
2749 // OldSpace implementation 2762 // OldSpace implementation
2750 2763
(...skipping 18 matching lines...) Expand all
2769 2782
2770 // After we have booted, we have created a map which represents free space 2783 // After we have booted, we have created a map which represents free space
2771 // on the heap. If there was already a free list then the elements on it 2784 // on the heap. If there was already a free list then the elements on it
2772 // were created with the wrong FreeSpaceMap (normally NULL), so we need to 2785 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2773 // fix them. 2786 // fix them.
2774 void PagedSpace::RepairFreeListsAfterDeserialization() { 2787 void PagedSpace::RepairFreeListsAfterDeserialization() {
2775 free_list_.RepairLists(heap()); 2788 free_list_.RepairLists(heap());
2776 // Each page may have a small free space that is not tracked by a free list. 2789 // Each page may have a small free space that is not tracked by a free list.
2777 // Update the maps for those free space objects. 2790 // Update the maps for those free space objects.
2778 for (Page* page : *this) { 2791 for (Page* page : *this) {
2779 int size = static_cast<int>(page->wasted_memory()); 2792 size_t size = page->wasted_memory();
2780 if (size == 0) continue; 2793 if (size == 0) continue;
2794 DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
2781 Address address = page->OffsetToAddress(Page::kPageSize - size); 2795 Address address = page->OffsetToAddress(Page::kPageSize - size);
2782 heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo); 2796 heap()->CreateFillerObjectAt(address, static_cast<int>(size),
2797 ClearRecordedSlots::kNo);
2783 } 2798 }
2784 } 2799 }
2785 2800
2786 2801
2787 void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { 2802 void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2788 if (allocation_info_.top() >= allocation_info_.limit()) return; 2803 if (allocation_info_.top() >= allocation_info_.limit()) return;
2789 2804
2790 if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) { 2805 if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
2791 // Create filler object to keep page iterable if it was iterable. 2806 // Create filler object to keep page iterable if it was iterable.
2792 int remaining = 2807 int remaining =
(...skipping 21 matching lines...) Expand all
2814 2829
2815 HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { 2830 HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2816 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2831 MarkCompactCollector* collector = heap()->mark_compact_collector();
2817 if (collector->sweeping_in_progress()) { 2832 if (collector->sweeping_in_progress()) {
2818 collector->SweepAndRefill(this); 2833 collector->SweepAndRefill(this);
2819 return free_list_.Allocate(size_in_bytes); 2834 return free_list_.Allocate(size_in_bytes);
2820 } 2835 }
2821 return nullptr; 2836 return nullptr;
2822 } 2837 }
2823 2838
2824
2825 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2839 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2826 const int kMaxPagesToSweep = 1; 2840 const int kMaxPagesToSweep = 1;
2827 2841
2828 // Allocation in this space has failed. 2842 // Allocation in this space has failed.
2829 2843
2830 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2844 MarkCompactCollector* collector = heap()->mark_compact_collector();
2831 // Sweeping is still in progress. 2845 // Sweeping is still in progress.
2832 if (collector->sweeping_in_progress()) { 2846 if (collector->sweeping_in_progress()) {
2833 // First try to refill the free-list, concurrent sweeper threads 2847 // First try to refill the free-list, concurrent sweeper threads
2834 // may have freed some objects in the meantime. 2848 // may have freed some objects in the meantime.
2835 RefillFreeList(); 2849 RefillFreeList();
2836 2850
2837 // Retry the free list allocation. 2851 // Retry the free list allocation.
2838 HeapObject* object = free_list_.Allocate(size_in_bytes); 2852 HeapObject* object =
2853 free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2839 if (object != NULL) return object; 2854 if (object != NULL) return object;
2840 2855
2841 // If sweeping is still in progress try to sweep pages on the main thread. 2856 // If sweeping is still in progress try to sweep pages on the main thread.
2842 int max_freed = collector->sweeper().ParallelSweepSpace( 2857 int max_freed = collector->sweeper().ParallelSweepSpace(
2843 identity(), size_in_bytes, kMaxPagesToSweep); 2858 identity(), size_in_bytes, kMaxPagesToSweep);
2844 RefillFreeList(); 2859 RefillFreeList();
2845 if (max_freed >= size_in_bytes) { 2860 if (max_freed >= size_in_bytes) {
2846 object = free_list_.Allocate(size_in_bytes); 2861 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2847 if (object != nullptr) return object; 2862 if (object != nullptr) return object;
2848 } 2863 }
2849 } 2864 }
2850 2865
2851 if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) { 2866 if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
2852 DCHECK((CountTotalPages() > 1) || 2867 DCHECK((CountTotalPages() > 1) ||
2853 (size_in_bytes <= free_list_.Available())); 2868 (size_in_bytes <= free_list_.Available()));
2854 return free_list_.Allocate(size_in_bytes); 2869 return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2855 } 2870 }
2856 2871
2857 // If sweeper threads are active, wait for them at that point and steal 2872 // If sweeper threads are active, wait for them at that point and steal
2858 // elements form their free-lists. Allocation may still fail their which 2873 // elements form their free-lists. Allocation may still fail their which
2859 // would indicate that there is not enough memory for the given allocation. 2874 // would indicate that there is not enough memory for the given allocation.
2860 return SweepAndRetryAllocation(size_in_bytes); 2875 return SweepAndRetryAllocation(size_in_bytes);
2861 } 2876 }
2862 2877
2863 #ifdef DEBUG 2878 #ifdef DEBUG
2864 void PagedSpace::ReportStatistics() { 2879 void PagedSpace::ReportStatistics() {
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2964 Executability executable) { 2979 Executability executable) {
2965 // Check if we want to force a GC before growing the old space further. 2980 // Check if we want to force a GC before growing the old space further.
2966 // If so, fail the allocation. 2981 // If so, fail the allocation.
2967 if (!heap()->CanExpandOldGeneration(object_size)) { 2982 if (!heap()->CanExpandOldGeneration(object_size)) {
2968 return AllocationResult::Retry(identity()); 2983 return AllocationResult::Retry(identity());
2969 } 2984 }
2970 2985
2971 LargePage* page = heap()->memory_allocator()->AllocateLargePage( 2986 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
2972 object_size, this, executable); 2987 object_size, this, executable);
2973 if (page == NULL) return AllocationResult::Retry(identity()); 2988 if (page == NULL) return AllocationResult::Retry(identity());
2974 DCHECK(page->area_size() >= object_size); 2989 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
2975 2990
2976 size_ += static_cast<int>(page->size()); 2991 size_ += static_cast<int>(page->size());
2977 AccountCommitted(page->size()); 2992 AccountCommitted(page->size());
2978 objects_size_ += object_size; 2993 objects_size_ += object_size;
2979 page_count_++; 2994 page_count_++;
2980 page->set_next_page(first_page_); 2995 page->set_next_page(first_page_);
2981 first_page_ = page; 2996 first_page_ = page;
2982 2997
2983 InsertChunkMapEntries(page); 2998 InsertChunkMapEntries(page);
2984 2999
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
3230 object->ShortPrint(); 3245 object->ShortPrint();
3231 PrintF("\n"); 3246 PrintF("\n");
3232 } 3247 }
3233 printf(" --------------------------------------\n"); 3248 printf(" --------------------------------------\n");
3234 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3249 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3235 } 3250 }
3236 3251
3237 #endif // DEBUG 3252 #endif // DEBUG
3238 } // namespace internal 3253 } // namespace internal
3239 } // namespace v8 3254 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698