Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(898)

Side by Side Diff: src/heap/spaces.cc

Issue 1853783002: [heap] Non-contiguous young generation (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Another round of cleanup/reorganization/adding comments. Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slot-set.h" 10 #include "src/heap/slot-set.h"
(...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after
309 309
310 size_ = 0; 310 size_ = 0;
311 size_executable_ = 0; 311 size_executable_ = 0;
312 312
313 return true; 313 return true;
314 } 314 }
315 315
316 316
317 void MemoryAllocator::TearDown() { 317 void MemoryAllocator::TearDown() {
318 // Check that spaces were torn down before MemoryAllocator. 318 // Check that spaces were torn down before MemoryAllocator.
319 DCHECK(size_.Value() == 0); 319 DCHECK_EQ(size_.Value(), 0);
320 // TODO(gc) this will be true again when we fix FreeMemory. 320 // TODO(gc) this will be true again when we fix FreeMemory.
321 // DCHECK(size_executable_ == 0); 321 // DCHECK(size_executable_ == 0);
322 capacity_ = 0; 322 capacity_ = 0;
323 capacity_executable_ = 0; 323 capacity_executable_ = 0;
324 } 324 }
325 325
326
327 bool MemoryAllocator::CommitMemory(Address base, size_t size, 326 bool MemoryAllocator::CommitMemory(Address base, size_t size,
328 Executability executable) { 327 Executability executable) {
329 if (!base::VirtualMemory::CommitRegion(base, size, 328 if (!base::VirtualMemory::CommitRegion(base, size,
330 executable == EXECUTABLE)) { 329 executable == EXECUTABLE)) {
331 return false; 330 return false;
332 } 331 }
333 UpdateAllocatedSpaceLimits(base, base + size); 332 UpdateAllocatedSpaceLimits(base, base + size);
334 return true; 333 return true;
335 } 334 }
336 335
337 336
338 void MemoryAllocator::FreeNewSpaceMemory(Address addr,
339 base::VirtualMemory* reservation,
340 Executability executable) {
341 LOG(isolate_, DeleteEvent("NewSpace", addr));
342
343 DCHECK(reservation->IsReserved());
344 const intptr_t size = static_cast<intptr_t>(reservation->size());
345 DCHECK(size_.Value() >= size);
346 size_.Increment(-size);
347 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
348 FreeMemory(reservation, NOT_EXECUTABLE);
349 }
350
351
352 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, 337 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
353 Executability executable) { 338 Executability executable) {
354 // TODO(gc) make code_range part of memory allocator? 339 // TODO(gc) make code_range part of memory allocator?
355 // Code which is part of the code-range does not have its own VirtualMemory. 340 // Code which is part of the code-range does not have its own VirtualMemory.
356 DCHECK(isolate_->code_range() == NULL || 341 DCHECK(isolate_->code_range() == NULL ||
357 !isolate_->code_range()->contains( 342 !isolate_->code_range()->contains(
358 static_cast<Address>(reservation->address()))); 343 static_cast<Address>(reservation->address())));
359 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || 344 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
360 !isolate_->code_range()->valid() || 345 !isolate_->code_range()->valid() ||
361 reservation->size() <= Page::kPageSize); 346 reservation->size() <= Page::kPageSize);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
426 return base; 411 return base;
427 } 412 }
428 413
429 414
430 void Page::InitializeAsAnchor(PagedSpace* owner) { 415 void Page::InitializeAsAnchor(PagedSpace* owner) {
431 set_owner(owner); 416 set_owner(owner);
432 set_prev_page(this); 417 set_prev_page(this);
433 set_next_page(this); 418 set_next_page(this);
434 } 419 }
435 420
436
437 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
438 SemiSpace* semi_space) {
439 Address area_start = start + NewSpacePage::kObjectStartOffset;
440 Address area_end = start + Page::kPageSize;
441
442 MemoryChunk* chunk =
443 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
444 area_end, NOT_EXECUTABLE, semi_space, nullptr);
445 bool in_to_space = (semi_space->id() != kFromSpace);
446 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
447 : MemoryChunk::IN_FROM_SPACE);
448 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
449 : MemoryChunk::IN_TO_SPACE));
450 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
451 heap->incremental_marking()->SetNewSpacePageFlags(page);
452 return page;
453 }
454
455
456 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { 421 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
457 set_owner(semi_space); 422 set_owner(semi_space);
458 set_next_chunk(this); 423 set_next_chunk(this);
459 set_prev_chunk(this); 424 set_prev_chunk(this);
460 // Flags marks this invalid page as not being in new-space. 425 // Flags marks this invalid page as not being in new-space.
461 // All real new-space pages will be in new-space. 426 // All real new-space pages will be in new-space.
462 SetFlags(0, ~0); 427 SetFlags(0, ~0);
463 } 428 }
464 429
465 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, 430 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
708 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, 673 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
709 executable, owner, &reservation); 674 executable, owner, &reservation);
710 } 675 }
711 676
712 677
713 void Page::ResetFreeListStatistics() { 678 void Page::ResetFreeListStatistics() {
714 wasted_memory_ = 0; 679 wasted_memory_ = 0;
715 available_in_free_list_ = 0; 680 available_in_free_list_ = 0;
716 } 681 }
717 682
718
719 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
720 Executability executable) {
721 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
722 if (chunk == NULL) return NULL;
723 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
724 }
725
726
727 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 683 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
728 Space* owner, 684 Space* owner,
729 Executability executable) { 685 Executability executable) {
730 MemoryChunk* chunk = 686 MemoryChunk* chunk =
731 AllocateChunk(object_size, object_size, executable, owner); 687 AllocateChunk(object_size, object_size, executable, owner);
732 if (chunk == NULL) return NULL; 688 if (chunk == NULL) return NULL;
733 if (executable && chunk->size() > LargePage::kMaxCodePageSize) { 689 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
734 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); 690 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
735 FATAL("Code page is too large."); 691 FATAL("Code page is too large.");
736 } 692 }
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
781 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); 737 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
782 } 738 }
783 } 739 }
784 740
785 741
786 void MemoryAllocator::Free(MemoryChunk* chunk) { 742 void MemoryAllocator::Free(MemoryChunk* chunk) {
787 PreFreeMemory(chunk); 743 PreFreeMemory(chunk);
788 PerformFreeMemory(chunk); 744 PerformFreeMemory(chunk);
789 } 745 }
790 746
747 template <typename SpaceType>
748 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner,
749 Executability executable) {
750 if (chunk_pool_.is_empty()) return nullptr;
751 MemoryChunk* chunk = chunk_pool_.RemoveLast();
752 const intptr_t chunk_size = MemoryChunk::kPageSize;
753 const Address start = reinterpret_cast<Address>(chunk);
754 const Address area_start = start + MemoryChunk::kObjectStartOffset;
755 const Address area_end = start + chunk_size;
756 CommitBlock(reinterpret_cast<Address>(chunk), chunk_size, executable);
757 base::VirtualMemory reservation(start, chunk_size);
758 MemoryChunk::Initialize(isolate_->heap(), start, chunk_size, area_start,
759 area_end, NOT_EXECUTABLE, owner, &reservation);
ulan 2016/04/05 08:46:25 s/NOT_EXECUTABLE/executable?
Michael Lippautz 2016/04/05 09:42:34 Done.
760 if (chunk->executable() == EXECUTABLE) {
761 size_executable_.Increment(chunk_size);
762 }
763 size_.Increment(chunk_size);
764 return chunk;
765 }
766
767 void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
768 chunk_pool_.Add(chunk);
ulan 2016/04/05 08:46:25 Where a pooled page gets actually release?
Michael Lippautz 2016/04/05 09:42:34 This is now done in MemoryAllocator::TearDown. I
769 intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
770 if (chunk->executable() == EXECUTABLE) {
771 size_executable_.Increment(-chunk_size);
772 }
773 size_.Increment(-chunk_size);
774 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
775 }
791 776
792 bool MemoryAllocator::CommitBlock(Address start, size_t size, 777 bool MemoryAllocator::CommitBlock(Address start, size_t size,
793 Executability executable) { 778 Executability executable) {
794 if (!CommitMemory(start, size, executable)) return false; 779 if (!CommitMemory(start, size, executable)) return false;
795 780
796 if (Heap::ShouldZapGarbage()) { 781 if (Heap::ShouldZapGarbage()) {
797 ZapBlock(start, size); 782 ZapBlock(start, size);
798 } 783 }
799 784
800 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 785 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after
1152 1137
1153 1138
1154 bool PagedSpace::Expand() { 1139 bool PagedSpace::Expand() {
1155 intptr_t size = AreaSize(); 1140 intptr_t size = AreaSize();
1156 if (snapshotable() && !HasPages()) { 1141 if (snapshotable() && !HasPages()) {
1157 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); 1142 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1158 } 1143 }
1159 1144
1160 if (!CanExpand(size)) return false; 1145 if (!CanExpand(size)) return false;
1161 1146
1162 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, 1147 Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
1163 executable()); 1148 size, this, executable());
1164 if (p == NULL) return false; 1149 if (p == NULL) return false;
1165 1150
1166 AccountCommitted(static_cast<intptr_t>(p->size())); 1151 AccountCommitted(static_cast<intptr_t>(p->size()));
1167 1152
1168 // Pages created during bootstrapping may contain immortal immovable objects. 1153 // Pages created during bootstrapping may contain immortal immovable objects.
1169 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1154 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1170 1155
1171 // When incremental marking was activated, old generation pages are allocated 1156 // When incremental marking was activated, old generation pages are allocated
1172 // black. 1157 // black.
1173 if (heap()->incremental_marking()->black_allocation()) { 1158 if (heap()->incremental_marking()->black_allocation()) {
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1283 } 1268 }
1284 CHECK_LE(black_size, page->LiveBytes()); 1269 CHECK_LE(black_size, page->LiveBytes());
1285 } 1270 }
1286 CHECK(allocation_pointer_found_in_space); 1271 CHECK(allocation_pointer_found_in_space);
1287 } 1272 }
1288 #endif // VERIFY_HEAP 1273 #endif // VERIFY_HEAP
1289 1274
1290 // ----------------------------------------------------------------------------- 1275 // -----------------------------------------------------------------------------
1291 // NewSpace implementation 1276 // NewSpace implementation
1292 1277
1293 1278 bool NewSpace::SetUp(int initial_semispace_capacity,
1294 bool NewSpace::SetUp(int reserved_semispace_capacity,
1295 int maximum_semispace_capacity) { 1279 int maximum_semispace_capacity) {
1296 // Set up new space based on the preallocated memory block defined by
1297 // start and size. The provided space is divided into two semi-spaces.
1298 // To support fast containment testing in the new space, the size of
1299 // this chunk must be a power of two and it must be aligned to its size.
1300 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1301
1302 size_t size = 2 * reserved_semispace_capacity;
1303 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1304 size, size, &reservation_);
1305 if (base == NULL) return false;
1306
1307 chunk_base_ = base;
1308 chunk_size_ = static_cast<uintptr_t>(size);
1309 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1310
1311 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); 1280 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1312 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); 1281 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1313 1282
1283 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1284 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1285 if (!to_space_.Commit()) {
1286 return false;
1287 }
1288 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1289 ResetAllocationInfo();
1290
1314 // Allocate and set up the histogram arrays if necessary. 1291 // Allocate and set up the histogram arrays if necessary.
1315 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 1292 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1316 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 1293 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1317
1318 #define SET_NAME(name) \ 1294 #define SET_NAME(name) \
1319 allocated_histogram_[name].set_name(#name); \ 1295 allocated_histogram_[name].set_name(#name); \
1320 promoted_histogram_[name].set_name(#name); 1296 promoted_histogram_[name].set_name(#name);
1321 INSTANCE_TYPE_LIST(SET_NAME) 1297 INSTANCE_TYPE_LIST(SET_NAME)
1322 #undef SET_NAME 1298 #undef SET_NAME
1323 1299
1324 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1325 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1326 2 * heap()->ReservedSemiSpaceSize());
1327 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1328
1329 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1330 maximum_semispace_capacity);
1331 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1332 initial_semispace_capacity, maximum_semispace_capacity);
1333 if (!to_space_.Commit()) {
1334 return false;
1335 }
1336 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1337
1338 ResetAllocationInfo();
1339
1340 return true; 1300 return true;
1341 } 1301 }
1342 1302
1343 1303
1344 void NewSpace::TearDown() { 1304 void NewSpace::TearDown() {
1345 if (allocated_histogram_) { 1305 if (allocated_histogram_) {
1346 DeleteArray(allocated_histogram_); 1306 DeleteArray(allocated_histogram_);
1347 allocated_histogram_ = NULL; 1307 allocated_histogram_ = NULL;
1348 } 1308 }
1349 if (promoted_histogram_) { 1309 if (promoted_histogram_) {
1350 DeleteArray(promoted_histogram_); 1310 DeleteArray(promoted_histogram_);
1351 promoted_histogram_ = NULL; 1311 promoted_histogram_ = NULL;
1352 } 1312 }
1353 1313
1354 allocation_info_.Reset(nullptr, nullptr); 1314 allocation_info_.Reset(nullptr, nullptr);
1355 1315
1356 to_space_.TearDown(); 1316 to_space_.TearDown();
1357 from_space_.TearDown(); 1317 from_space_.TearDown();
1358
1359 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
1360 chunk_base_, &reservation_, NOT_EXECUTABLE);
1361
1362 chunk_base_ = NULL;
1363 chunk_size_ = 0;
1364 } 1318 }
1365 1319
1366 1320
1367 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } 1321 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1368 1322
1369 1323
1370 void NewSpace::Grow() { 1324 void NewSpace::Grow() {
1371 // Double the semispace size but only up to maximum capacity. 1325 // Double the semispace size but only up to maximum capacity.
1372 DCHECK(TotalCapacity() < MaximumCapacity()); 1326 DCHECK(TotalCapacity() < MaximumCapacity());
1373 int new_capacity = 1327 int new_capacity =
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after
1670 CHECK_EQ(from_space_.id(), kFromSpace); 1624 CHECK_EQ(from_space_.id(), kFromSpace);
1671 CHECK_EQ(to_space_.id(), kToSpace); 1625 CHECK_EQ(to_space_.id(), kToSpace);
1672 from_space_.Verify(); 1626 from_space_.Verify();
1673 to_space_.Verify(); 1627 to_space_.Verify();
1674 } 1628 }
1675 #endif 1629 #endif
1676 1630
1677 // ----------------------------------------------------------------------------- 1631 // -----------------------------------------------------------------------------
1678 // SemiSpace implementation 1632 // SemiSpace implementation
1679 1633
1680 void SemiSpace::SetUp(Address start, int initial_capacity, 1634 void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
1681 int maximum_capacity) {
1682 DCHECK_GE(maximum_capacity, Page::kPageSize); 1635 DCHECK_GE(maximum_capacity, Page::kPageSize);
1683 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize); 1636 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1684 current_capacity_ = minimum_capacity_; 1637 current_capacity_ = minimum_capacity_;
1685 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); 1638 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1686 committed_ = false; 1639 committed_ = false;
1687 start_ = start;
1688 age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
1689 } 1640 }
1690 1641
1691 1642
1692 void SemiSpace::TearDown() { 1643 void SemiSpace::TearDown() {
1693 start_ = nullptr; 1644 // Properly uncommit memory to keep the allocator counters in sync.
1694 current_capacity_ = 0; 1645 if (is_committed()) Uncommit();
1646 current_capacity_ = maximum_capacity_ = 0;
1695 } 1647 }
1696 1648
1697 1649
1698 bool SemiSpace::Commit() { 1650 bool SemiSpace::Commit() {
1699 DCHECK(!is_committed()); 1651 DCHECK(!is_committed());
1700 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1701 start_, current_capacity_, executable())) {
1702 return false;
1703 }
1704 AccountCommitted(current_capacity_);
1705
1706 NewSpacePage* current = anchor(); 1652 NewSpacePage* current = anchor();
1707 const int num_pages = current_capacity_ / Page::kPageSize; 1653 const int num_pages = current_capacity_ / Page::kPageSize;
1708 for (int i = 0; i < num_pages; i++) { 1654 for (int i = 0; i < num_pages; i++) {
1709 NewSpacePage* new_page = 1655 NewSpacePage* new_page =
1710 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); 1656 heap()
1657 ->isolate()
1658 ->memory_allocator()
1659 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1660 NewSpacePage::kAllocatableMemory, this, executable());
ulan 2016/04/05 08:46:25 s/executable()/NOT_EXECUTABLE? or fix SemiSpace::
Michael Lippautz 2016/04/05 09:42:34 Fixed SemiSpace::GrowTo.
1711 new_page->InsertAfter(current); 1661 new_page->InsertAfter(current);
1712 current = new_page; 1662 current = new_page;
1713 } 1663 }
1714 Reset(); 1664 Reset();
1715 1665 AccountCommitted(current_capacity_);
1716 set_current_capacity(current_capacity_); 1666 if (age_mark_ == nullptr) {
1667 age_mark_ = first_page()->area_start();
1668 }
1717 committed_ = true; 1669 committed_ = true;
1718 return true; 1670 return true;
1719 } 1671 }
1720 1672
1721 1673
1722 bool SemiSpace::Uncommit() { 1674 bool SemiSpace::Uncommit() {
1723 DCHECK(is_committed()); 1675 DCHECK(is_committed());
1724 Address start = start_ + maximum_capacity_ - current_capacity_; 1676 NewSpacePageIterator it(this);
1725 if (!heap()->isolate()->memory_allocator()->UncommitBlock( 1677 while (it.has_next()) {
1726 start, current_capacity_)) { 1678 heap()->isolate()->memory_allocator()->FreePooled(it.next());
1727 return false;
1728 } 1679 }
1729 AccountUncommitted(current_capacity_);
1730
1731 anchor()->set_next_page(anchor()); 1680 anchor()->set_next_page(anchor());
1732 anchor()->set_prev_page(anchor()); 1681 anchor()->set_prev_page(anchor());
1733 1682 AccountUncommitted(current_capacity_);
1734 committed_ = false; 1683 committed_ = false;
1735 return true; 1684 return true;
1736 } 1685 }
1737 1686
1738 1687
1739 size_t SemiSpace::CommittedPhysicalMemory() { 1688 size_t SemiSpace::CommittedPhysicalMemory() {
1740 if (!is_committed()) return 0; 1689 if (!is_committed()) return 0;
1741 size_t size = 0; 1690 size_t size = 0;
1742 NewSpacePageIterator it(this); 1691 NewSpacePageIterator it(this);
1743 while (it.has_next()) { 1692 while (it.has_next()) {
1744 size += it.next()->CommittedPhysicalMemory(); 1693 size += it.next()->CommittedPhysicalMemory();
1745 } 1694 }
1746 return size; 1695 return size;
1747 } 1696 }
1748 1697
1749 1698
1750 bool SemiSpace::GrowTo(int new_capacity) { 1699 bool SemiSpace::GrowTo(int new_capacity) {
1751 if (!is_committed()) { 1700 if (!is_committed()) {
1752 if (!Commit()) return false; 1701 if (!Commit()) return false;
1753 } 1702 }
1754 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); 1703 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
1755 DCHECK_LE(new_capacity, maximum_capacity_); 1704 DCHECK_LE(new_capacity, maximum_capacity_);
1756 DCHECK_GT(new_capacity, current_capacity_); 1705 DCHECK_GT(new_capacity, current_capacity_);
1757 int pages_before = current_capacity_ / Page::kPageSize; 1706 const int delta = new_capacity - current_capacity_;
1758 int pages_after = new_capacity / Page::kPageSize;
1759
1760 size_t delta = new_capacity - current_capacity_;
1761
1762 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1707 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1763 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1708 int delta_pages = delta / NewSpacePage::kPageSize;
1764 start_ + current_capacity_, delta, executable())) {
1765 return false;
1766 }
1767 AccountCommitted(static_cast<intptr_t>(delta));
1768 set_current_capacity(new_capacity);
1769 NewSpacePage* last_page = anchor()->prev_page(); 1709 NewSpacePage* last_page = anchor()->prev_page();
1770 DCHECK_NE(last_page, anchor()); 1710 DCHECK_NE(last_page, anchor());
1771 for (int i = pages_before; i < pages_after; i++) { 1711 while (delta_pages > 0) {
1772 Address page_address = start_ + i * Page::kPageSize;
1773 NewSpacePage* new_page = 1712 NewSpacePage* new_page =
1774 NewSpacePage::Initialize(heap(), page_address, this); 1713 heap()
1714 ->isolate()
1715 ->memory_allocator()
1716 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1717 NewSpacePage::kAllocatableMemory, this, NOT_EXECUTABLE);
1775 new_page->InsertAfter(last_page); 1718 new_page->InsertAfter(last_page);
1776 Bitmap::Clear(new_page); 1719 Bitmap::Clear(new_page);
1777 // Duplicate the flags that was set on the old page. 1720 // Duplicate the flags that was set on the old page.
1778 new_page->SetFlags(last_page->GetFlags(), 1721 new_page->SetFlags(last_page->GetFlags(),
1779 NewSpacePage::kCopyOnFlipFlagsMask); 1722 NewSpacePage::kCopyOnFlipFlagsMask);
1780 last_page = new_page; 1723 last_page = new_page;
1724 delta_pages--;
1781 } 1725 }
1726 AccountCommitted(static_cast<intptr_t>(delta));
1727 current_capacity_ = new_capacity;
1782 return true; 1728 return true;
1783 } 1729 }
1784 1730
1785 1731
1786 bool SemiSpace::ShrinkTo(int new_capacity) { 1732 bool SemiSpace::ShrinkTo(int new_capacity) {
1787 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); 1733 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
1788 DCHECK_GE(new_capacity, minimum_capacity_); 1734 DCHECK_GE(new_capacity, minimum_capacity_);
1789 DCHECK_LT(new_capacity, current_capacity_); 1735 DCHECK_LT(new_capacity, current_capacity_);
1790 if (is_committed()) { 1736 if (is_committed()) {
1791 size_t delta = current_capacity_ - new_capacity; 1737 const int delta = current_capacity_ - new_capacity;
1792 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1738 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1793 1739 int delta_pages = delta / NewSpacePage::kPageSize;
1794 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); 1740 NewSpacePage* new_last_page;
1795 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { 1741 NewSpacePage* last_page;
1796 return false; 1742 while (delta_pages > 0) {
1743 last_page = anchor()->prev_page();
1744 new_last_page = last_page->prev_page();
1745 new_last_page->set_next_page(anchor());
1746 anchor()->set_prev_page(new_last_page);
1747 heap()->isolate()->memory_allocator()->FreePooled(last_page);
1748 delta_pages--;
1797 } 1749 }
1798 AccountUncommitted(static_cast<intptr_t>(delta)); 1750 AccountUncommitted(static_cast<intptr_t>(delta));
1799
1800 int pages_after = new_capacity / Page::kPageSize;
1801 NewSpacePage* new_last_page =
1802 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1803 new_last_page->set_next_page(anchor());
1804 anchor()->set_prev_page(new_last_page);
1805 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1806 } 1751 }
1807 1752 current_capacity_ = new_capacity;
1808 set_current_capacity(new_capacity);
1809
1810 return true; 1753 return true;
1811 } 1754 }
1812 1755
1813 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { 1756 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
1814 anchor_.set_owner(this); 1757 anchor_.set_owner(this);
1815 // Fixup back-pointers to anchor. Address of anchor changes when we swap. 1758 // Fixup back-pointers to anchor. Address of anchor changes when we swap.
1816 anchor_.prev_page()->set_next_page(&anchor_); 1759 anchor_.prev_page()->set_next_page(&anchor_);
1817 anchor_.next_page()->set_prev_page(&anchor_); 1760 anchor_.next_page()->set_prev_page(&anchor_);
1818 1761
1819 NewSpacePageIterator it(this); 1762 NewSpacePageIterator it(this);
(...skipping 26 matching lines...) Expand all
1846 // We won't be swapping semispaces without data in them. 1789 // We won't be swapping semispaces without data in them.
1847 DCHECK_NE(from->anchor_.next_page(), &from->anchor_); 1790 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1848 DCHECK_NE(to->anchor_.next_page(), &to->anchor_); 1791 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
1849 1792
1850 intptr_t saved_to_space_flags = to->current_page()->GetFlags(); 1793 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
1851 1794
1852 // We swap all properties but id_. 1795 // We swap all properties but id_.
1853 std::swap(from->current_capacity_, to->current_capacity_); 1796 std::swap(from->current_capacity_, to->current_capacity_);
1854 std::swap(from->maximum_capacity_, to->maximum_capacity_); 1797 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1855 std::swap(from->minimum_capacity_, to->minimum_capacity_); 1798 std::swap(from->minimum_capacity_, to->minimum_capacity_);
1856 std::swap(from->start_, to->start_);
1857 std::swap(from->age_mark_, to->age_mark_); 1799 std::swap(from->age_mark_, to->age_mark_);
1858 std::swap(from->committed_, to->committed_); 1800 std::swap(from->committed_, to->committed_);
1859 std::swap(from->anchor_, to->anchor_); 1801 std::swap(from->anchor_, to->anchor_);
1860 std::swap(from->current_page_, to->current_page_); 1802 std::swap(from->current_page_, to->current_page_);
1861 1803
1862 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask); 1804 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
1863 from->FixPagesFlags(0, 0); 1805 from->FixPagesFlags(0, 0);
1864 } 1806 }
1865 1807
1866 1808
(...skipping 1295 matching lines...) Expand 10 before | Expand all | Expand 10 after
3162 object->ShortPrint(); 3104 object->ShortPrint();
3163 PrintF("\n"); 3105 PrintF("\n");
3164 } 3106 }
3165 printf(" --------------------------------------\n"); 3107 printf(" --------------------------------------\n");
3166 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3108 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3167 } 3109 }
3168 3110
3169 #endif // DEBUG 3111 #endif // DEBUG
3170 } // namespace internal 3112 } // namespace internal
3171 } // namespace v8 3113 } // namespace v8
OLDNEW
« src/heap/spaces.h ('K') | « src/heap/spaces.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698