| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
| 10 #include "src/heap/slot-set.h" | 10 #include "src/heap/slot-set.h" |
| (...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 308 DCHECK_GE(capacity_, capacity_executable_); | 308 DCHECK_GE(capacity_, capacity_executable_); |
| 309 | 309 |
| 310 size_ = 0; | 310 size_ = 0; |
| 311 size_executable_ = 0; | 311 size_executable_ = 0; |
| 312 | 312 |
| 313 return true; | 313 return true; |
| 314 } | 314 } |
| 315 | 315 |
| 316 | 316 |
| 317 void MemoryAllocator::TearDown() { | 317 void MemoryAllocator::TearDown() { |
| 318 for (MemoryChunk* chunk : chunk_pool_) { |
| 319 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, |
| 320 NOT_EXECUTABLE); |
| 321 } |
| 318 // Check that spaces were torn down before MemoryAllocator. | 322 // Check that spaces were torn down before MemoryAllocator. |
| 319 DCHECK(size_.Value() == 0); | 323 DCHECK_EQ(size_.Value(), 0); |
| 320 // TODO(gc) this will be true again when we fix FreeMemory. | 324 // TODO(gc) this will be true again when we fix FreeMemory. |
| 321 // DCHECK(size_executable_ == 0); | 325 // DCHECK(size_executable_ == 0); |
| 322 capacity_ = 0; | 326 capacity_ = 0; |
| 323 capacity_executable_ = 0; | 327 capacity_executable_ = 0; |
| 324 } | 328 } |
| 325 | 329 |
| 326 | |
| 327 bool MemoryAllocator::CommitMemory(Address base, size_t size, | 330 bool MemoryAllocator::CommitMemory(Address base, size_t size, |
| 328 Executability executable) { | 331 Executability executable) { |
| 329 if (!base::VirtualMemory::CommitRegion(base, size, | 332 if (!base::VirtualMemory::CommitRegion(base, size, |
| 330 executable == EXECUTABLE)) { | 333 executable == EXECUTABLE)) { |
| 331 return false; | 334 return false; |
| 332 } | 335 } |
| 333 UpdateAllocatedSpaceLimits(base, base + size); | 336 UpdateAllocatedSpaceLimits(base, base + size); |
| 334 return true; | 337 return true; |
| 335 } | 338 } |
| 336 | 339 |
| 337 | 340 |
| 338 void MemoryAllocator::FreeNewSpaceMemory(Address addr, | |
| 339 base::VirtualMemory* reservation, | |
| 340 Executability executable) { | |
| 341 LOG(isolate_, DeleteEvent("NewSpace", addr)); | |
| 342 | |
| 343 DCHECK(reservation->IsReserved()); | |
| 344 const intptr_t size = static_cast<intptr_t>(reservation->size()); | |
| 345 DCHECK(size_.Value() >= size); | |
| 346 size_.Increment(-size); | |
| 347 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
| 348 FreeMemory(reservation, NOT_EXECUTABLE); | |
| 349 } | |
| 350 | |
| 351 | |
| 352 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 341 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
| 353 Executability executable) { | 342 Executability executable) { |
| 354 // TODO(gc) make code_range part of memory allocator? | 343 // TODO(gc) make code_range part of memory allocator? |
| 355 // Code which is part of the code-range does not have its own VirtualMemory. | 344 // Code which is part of the code-range does not have its own VirtualMemory. |
| 356 DCHECK(isolate_->code_range() == NULL || | 345 DCHECK(isolate_->code_range() == NULL || |
| 357 !isolate_->code_range()->contains( | 346 !isolate_->code_range()->contains( |
| 358 static_cast<Address>(reservation->address()))); | 347 static_cast<Address>(reservation->address()))); |
| 359 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || | 348 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 360 !isolate_->code_range()->valid() || | 349 !isolate_->code_range()->valid() || |
| 361 reservation->size() <= Page::kPageSize); | 350 reservation->size() <= Page::kPageSize); |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 426 return base; | 415 return base; |
| 427 } | 416 } |
| 428 | 417 |
| 429 | 418 |
| 430 void Page::InitializeAsAnchor(PagedSpace* owner) { | 419 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 431 set_owner(owner); | 420 set_owner(owner); |
| 432 set_prev_page(this); | 421 set_prev_page(this); |
| 433 set_next_page(this); | 422 set_next_page(this); |
| 434 } | 423 } |
| 435 | 424 |
| 436 | |
| 437 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, | |
| 438 SemiSpace* semi_space) { | |
| 439 Address area_start = start + NewSpacePage::kObjectStartOffset; | |
| 440 Address area_end = start + Page::kPageSize; | |
| 441 | |
| 442 MemoryChunk* chunk = | |
| 443 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start, | |
| 444 area_end, NOT_EXECUTABLE, semi_space, nullptr); | |
| 445 bool in_to_space = (semi_space->id() != kFromSpace); | |
| 446 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE | |
| 447 : MemoryChunk::IN_FROM_SPACE); | |
| 448 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE | |
| 449 : MemoryChunk::IN_TO_SPACE)); | |
| 450 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); | |
| 451 heap->incremental_marking()->SetNewSpacePageFlags(page); | |
| 452 return page; | |
| 453 } | |
| 454 | |
| 455 | |
| 456 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { | 425 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 457 set_owner(semi_space); | 426 set_owner(semi_space); |
| 458 set_next_chunk(this); | 427 set_next_chunk(this); |
| 459 set_prev_chunk(this); | 428 set_prev_chunk(this); |
| 460 // Flags marks this invalid page as not being in new-space. | 429 // Flags marks this invalid page as not being in new-space. |
| 461 // All real new-space pages will be in new-space. | 430 // All real new-space pages will be in new-space. |
| 462 SetFlags(0, ~0); | 431 SetFlags(0, ~0); |
| 463 } | 432 } |
| 464 | 433 |
| 465 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, | 434 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 708 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 677 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
| 709 executable, owner, &reservation); | 678 executable, owner, &reservation); |
| 710 } | 679 } |
| 711 | 680 |
| 712 | 681 |
| 713 void Page::ResetFreeListStatistics() { | 682 void Page::ResetFreeListStatistics() { |
| 714 wasted_memory_ = 0; | 683 wasted_memory_ = 0; |
| 715 available_in_free_list_ = 0; | 684 available_in_free_list_ = 0; |
| 716 } | 685 } |
| 717 | 686 |
| 718 | |
| 719 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, | |
| 720 Executability executable) { | |
| 721 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); | |
| 722 if (chunk == NULL) return NULL; | |
| 723 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | |
| 724 } | |
| 725 | |
| 726 | |
| 727 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 687 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 728 Space* owner, | 688 Space* owner, |
| 729 Executability executable) { | 689 Executability executable) { |
| 730 MemoryChunk* chunk = | 690 MemoryChunk* chunk = |
| 731 AllocateChunk(object_size, object_size, executable, owner); | 691 AllocateChunk(object_size, object_size, executable, owner); |
| 732 if (chunk == NULL) return NULL; | 692 if (chunk == NULL) return NULL; |
| 733 if (executable && chunk->size() > LargePage::kMaxCodePageSize) { | 693 if (executable && chunk->size() > LargePage::kMaxCodePageSize) { |
| 734 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); | 694 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); |
| 735 FATAL("Code page is too large."); | 695 FATAL("Code page is too large."); |
| 736 } | 696 } |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 775 chunk->ReleaseAllocatedMemory(); | 735 chunk->ReleaseAllocatedMemory(); |
| 776 | 736 |
| 777 base::VirtualMemory* reservation = chunk->reserved_memory(); | 737 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 778 if (reservation->IsReserved()) { | 738 if (reservation->IsReserved()) { |
| 779 FreeMemory(reservation, chunk->executable()); | 739 FreeMemory(reservation, chunk->executable()); |
| 780 } else { | 740 } else { |
| 781 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); | 741 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
| 782 } | 742 } |
| 783 } | 743 } |
| 784 | 744 |
| 785 | 745 template <MemoryAllocator::AllocationMode mode> |
| 786 void MemoryAllocator::Free(MemoryChunk* chunk) { | 746 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 787 PreFreeMemory(chunk); | 747 if (mode == kRegular) { |
| 788 PerformFreeMemory(chunk); | 748 PreFreeMemory(chunk); |
| 749 PerformFreeMemory(chunk); |
| 750 } else { |
| 751 DCHECK_EQ(mode, kPooled); |
| 752 FreePooled(chunk); |
| 753 } |
| 789 } | 754 } |
| 790 | 755 |
| 756 template void MemoryAllocator::Free<MemoryAllocator::kRegular>( |
| 757 MemoryChunk* chunk); |
| 758 |
| 759 template void MemoryAllocator::Free<MemoryAllocator::kPooled>( |
| 760 MemoryChunk* chunk); |
| 761 |
| 762 template <typename PageType, MemoryAllocator::AllocationMode mode, |
| 763 typename SpaceType> |
| 764 PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, |
| 765 Executability executable) { |
| 766 MemoryChunk* chunk = nullptr; |
| 767 if (mode == kPooled) { |
| 768 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory)); |
| 769 DCHECK_EQ(executable, NOT_EXECUTABLE); |
| 770 chunk = AllocatePagePooled(owner); |
| 771 } |
| 772 if (chunk == nullptr) { |
| 773 chunk = AllocateChunk(size, size, executable, owner); |
| 774 } |
| 775 if (chunk == nullptr) return nullptr; |
| 776 return PageType::Initialize(isolate_->heap(), chunk, executable, owner); |
| 777 } |
| 778 |
| 779 template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular, |
| 780 PagedSpace>(intptr_t, PagedSpace*, |
| 781 Executability); |
| 782 |
| 783 template NewSpacePage* MemoryAllocator::AllocatePage< |
| 784 NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*, |
| 785 Executability); |
| 786 |
| 787 template <typename SpaceType> |
| 788 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { |
| 789 if (chunk_pool_.is_empty()) return nullptr; |
| 790 const int size = MemoryChunk::kPageSize; |
| 791 MemoryChunk* chunk = chunk_pool_.RemoveLast(); |
| 792 const Address start = reinterpret_cast<Address>(chunk); |
| 793 const Address area_start = start + MemoryChunk::kObjectStartOffset; |
| 794 const Address area_end = start + size; |
| 795 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE); |
| 796 base::VirtualMemory reservation(start, size); |
| 797 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, |
| 798 NOT_EXECUTABLE, owner, &reservation); |
| 799 size_.Increment(size); |
| 800 return chunk; |
| 801 } |
| 802 |
| 803 void MemoryAllocator::FreePooled(MemoryChunk* chunk) { |
| 804 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize)); |
| 805 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); |
| 806 chunk_pool_.Add(chunk); |
| 807 intptr_t chunk_size = static_cast<intptr_t>(chunk->size()); |
| 808 if (chunk->executable() == EXECUTABLE) { |
| 809 size_executable_.Increment(-chunk_size); |
| 810 } |
| 811 size_.Increment(-chunk_size); |
| 812 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize); |
| 813 } |
| 791 | 814 |
| 792 bool MemoryAllocator::CommitBlock(Address start, size_t size, | 815 bool MemoryAllocator::CommitBlock(Address start, size_t size, |
| 793 Executability executable) { | 816 Executability executable) { |
| 794 if (!CommitMemory(start, size, executable)) return false; | 817 if (!CommitMemory(start, size, executable)) return false; |
| 795 | 818 |
| 796 if (Heap::ShouldZapGarbage()) { | 819 if (Heap::ShouldZapGarbage()) { |
| 797 ZapBlock(start, size); | 820 ZapBlock(start, size); |
| 798 } | 821 } |
| 799 | 822 |
| 800 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | 823 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| (...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1152 | 1175 |
| 1153 | 1176 |
| 1154 bool PagedSpace::Expand() { | 1177 bool PagedSpace::Expand() { |
| 1155 intptr_t size = AreaSize(); | 1178 intptr_t size = AreaSize(); |
| 1156 if (snapshotable() && !HasPages()) { | 1179 if (snapshotable() && !HasPages()) { |
| 1157 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); | 1180 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
| 1158 } | 1181 } |
| 1159 | 1182 |
| 1160 if (!CanExpand(size)) return false; | 1183 if (!CanExpand(size)) return false; |
| 1161 | 1184 |
| 1162 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, | 1185 Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>( |
| 1163 executable()); | 1186 size, this, executable()); |
| 1164 if (p == NULL) return false; | 1187 if (p == NULL) return false; |
| 1165 | 1188 |
| 1166 AccountCommitted(static_cast<intptr_t>(p->size())); | 1189 AccountCommitted(static_cast<intptr_t>(p->size())); |
| 1167 | 1190 |
| 1168 // Pages created during bootstrapping may contain immortal immovable objects. | 1191 // Pages created during bootstrapping may contain immortal immovable objects. |
| 1169 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); | 1192 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
| 1170 | 1193 |
| 1171 // When incremental marking was activated, old generation pages are allocated | 1194 // When incremental marking was activated, old generation pages are allocated |
| 1172 // black. | 1195 // black. |
| 1173 if (heap()->incremental_marking()->black_allocation()) { | 1196 if (heap()->incremental_marking()->black_allocation()) { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1283 } | 1306 } |
| 1284 CHECK_LE(black_size, page->LiveBytes()); | 1307 CHECK_LE(black_size, page->LiveBytes()); |
| 1285 } | 1308 } |
| 1286 CHECK(allocation_pointer_found_in_space); | 1309 CHECK(allocation_pointer_found_in_space); |
| 1287 } | 1310 } |
| 1288 #endif // VERIFY_HEAP | 1311 #endif // VERIFY_HEAP |
| 1289 | 1312 |
| 1290 // ----------------------------------------------------------------------------- | 1313 // ----------------------------------------------------------------------------- |
| 1291 // NewSpace implementation | 1314 // NewSpace implementation |
| 1292 | 1315 |
| 1293 | 1316 bool NewSpace::SetUp(int initial_semispace_capacity, |
| 1294 bool NewSpace::SetUp(int reserved_semispace_capacity, | |
| 1295 int maximum_semispace_capacity) { | 1317 int maximum_semispace_capacity) { |
| 1296 // Set up new space based on the preallocated memory block defined by | |
| 1297 // start and size. The provided space is divided into two semi-spaces. | |
| 1298 // To support fast containment testing in the new space, the size of | |
| 1299 // this chunk must be a power of two and it must be aligned to its size. | |
| 1300 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | |
| 1301 | |
| 1302 size_t size = 2 * reserved_semispace_capacity; | |
| 1303 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | |
| 1304 size, size, &reservation_); | |
| 1305 if (base == NULL) return false; | |
| 1306 | |
| 1307 chunk_base_ = base; | |
| 1308 chunk_size_ = static_cast<uintptr_t>(size); | |
| 1309 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | |
| 1310 | |
| 1311 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); | 1318 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1312 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); | 1319 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); |
| 1313 | 1320 |
| 1321 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity); |
| 1322 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity); |
| 1323 if (!to_space_.Commit()) { |
| 1324 return false; |
| 1325 } |
| 1326 DCHECK(!from_space_.is_committed()); // No need to use memory yet. |
| 1327 ResetAllocationInfo(); |
| 1328 |
| 1314 // Allocate and set up the histogram arrays if necessary. | 1329 // Allocate and set up the histogram arrays if necessary. |
| 1315 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1330 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1316 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1331 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1317 | |
| 1318 #define SET_NAME(name) \ | 1332 #define SET_NAME(name) \ |
| 1319 allocated_histogram_[name].set_name(#name); \ | 1333 allocated_histogram_[name].set_name(#name); \ |
| 1320 promoted_histogram_[name].set_name(#name); | 1334 promoted_histogram_[name].set_name(#name); |
| 1321 INSTANCE_TYPE_LIST(SET_NAME) | 1335 INSTANCE_TYPE_LIST(SET_NAME) |
| 1322 #undef SET_NAME | 1336 #undef SET_NAME |
| 1323 | 1337 |
| 1324 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); | |
| 1325 DCHECK(static_cast<intptr_t>(chunk_size_) >= | |
| 1326 2 * heap()->ReservedSemiSpaceSize()); | |
| 1327 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); | |
| 1328 | |
| 1329 to_space_.SetUp(chunk_base_, initial_semispace_capacity, | |
| 1330 maximum_semispace_capacity); | |
| 1331 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, | |
| 1332 initial_semispace_capacity, maximum_semispace_capacity); | |
| 1333 if (!to_space_.Commit()) { | |
| 1334 return false; | |
| 1335 } | |
| 1336 DCHECK(!from_space_.is_committed()); // No need to use memory yet. | |
| 1337 | |
| 1338 ResetAllocationInfo(); | |
| 1339 | |
| 1340 return true; | 1338 return true; |
| 1341 } | 1339 } |
| 1342 | 1340 |
| 1343 | 1341 |
| 1344 void NewSpace::TearDown() { | 1342 void NewSpace::TearDown() { |
| 1345 if (allocated_histogram_) { | 1343 if (allocated_histogram_) { |
| 1346 DeleteArray(allocated_histogram_); | 1344 DeleteArray(allocated_histogram_); |
| 1347 allocated_histogram_ = NULL; | 1345 allocated_histogram_ = NULL; |
| 1348 } | 1346 } |
| 1349 if (promoted_histogram_) { | 1347 if (promoted_histogram_) { |
| 1350 DeleteArray(promoted_histogram_); | 1348 DeleteArray(promoted_histogram_); |
| 1351 promoted_histogram_ = NULL; | 1349 promoted_histogram_ = NULL; |
| 1352 } | 1350 } |
| 1353 | 1351 |
| 1354 allocation_info_.Reset(nullptr, nullptr); | 1352 allocation_info_.Reset(nullptr, nullptr); |
| 1355 | 1353 |
| 1356 to_space_.TearDown(); | 1354 to_space_.TearDown(); |
| 1357 from_space_.TearDown(); | 1355 from_space_.TearDown(); |
| 1358 | |
| 1359 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory( | |
| 1360 chunk_base_, &reservation_, NOT_EXECUTABLE); | |
| 1361 | |
| 1362 chunk_base_ = NULL; | |
| 1363 chunk_size_ = 0; | |
| 1364 } | 1356 } |
| 1365 | 1357 |
| 1366 | 1358 |
| 1367 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } | 1359 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
| 1368 | 1360 |
| 1369 | 1361 |
| 1370 void NewSpace::Grow() { | 1362 void NewSpace::Grow() { |
| 1371 // Double the semispace size but only up to maximum capacity. | 1363 // Double the semispace size but only up to maximum capacity. |
| 1372 DCHECK(TotalCapacity() < MaximumCapacity()); | 1364 DCHECK(TotalCapacity() < MaximumCapacity()); |
| 1373 int new_capacity = | 1365 int new_capacity = |
| (...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1670 CHECK_EQ(from_space_.id(), kFromSpace); | 1662 CHECK_EQ(from_space_.id(), kFromSpace); |
| 1671 CHECK_EQ(to_space_.id(), kToSpace); | 1663 CHECK_EQ(to_space_.id(), kToSpace); |
| 1672 from_space_.Verify(); | 1664 from_space_.Verify(); |
| 1673 to_space_.Verify(); | 1665 to_space_.Verify(); |
| 1674 } | 1666 } |
| 1675 #endif | 1667 #endif |
| 1676 | 1668 |
| 1677 // ----------------------------------------------------------------------------- | 1669 // ----------------------------------------------------------------------------- |
| 1678 // SemiSpace implementation | 1670 // SemiSpace implementation |
| 1679 | 1671 |
| 1680 void SemiSpace::SetUp(Address start, int initial_capacity, | 1672 void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) { |
| 1681 int maximum_capacity) { | |
| 1682 DCHECK_GE(maximum_capacity, Page::kPageSize); | 1673 DCHECK_GE(maximum_capacity, Page::kPageSize); |
| 1683 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1674 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1684 current_capacity_ = minimum_capacity_; | 1675 current_capacity_ = minimum_capacity_; |
| 1685 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); | 1676 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
| 1686 committed_ = false; | 1677 committed_ = false; |
| 1687 start_ = start; | |
| 1688 age_mark_ = start_ + NewSpacePage::kObjectStartOffset; | |
| 1689 } | 1678 } |
| 1690 | 1679 |
| 1691 | 1680 |
| 1692 void SemiSpace::TearDown() { | 1681 void SemiSpace::TearDown() { |
| 1693 start_ = nullptr; | 1682 // Properly uncommit memory to keep the allocator counters in sync. |
| 1694 current_capacity_ = 0; | 1683 if (is_committed()) Uncommit(); |
| 1684 current_capacity_ = maximum_capacity_ = 0; |
| 1695 } | 1685 } |
| 1696 | 1686 |
| 1697 | 1687 |
| 1698 bool SemiSpace::Commit() { | 1688 bool SemiSpace::Commit() { |
| 1699 DCHECK(!is_committed()); | 1689 DCHECK(!is_committed()); |
| 1700 if (!heap()->isolate()->memory_allocator()->CommitBlock( | |
| 1701 start_, current_capacity_, executable())) { | |
| 1702 return false; | |
| 1703 } | |
| 1704 AccountCommitted(current_capacity_); | |
| 1705 | |
| 1706 NewSpacePage* current = anchor(); | 1690 NewSpacePage* current = anchor(); |
| 1707 const int num_pages = current_capacity_ / Page::kPageSize; | 1691 const int num_pages = current_capacity_ / Page::kPageSize; |
| 1708 for (int i = 0; i < num_pages; i++) { | 1692 for (int i = 0; i < num_pages; i++) { |
| 1709 NewSpacePage* new_page = | 1693 NewSpacePage* new_page = |
| 1710 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1694 heap() |
| 1695 ->isolate() |
| 1696 ->memory_allocator() |
| 1697 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( |
| 1698 NewSpacePage::kAllocatableMemory, this, executable()); |
| 1711 new_page->InsertAfter(current); | 1699 new_page->InsertAfter(current); |
| 1712 current = new_page; | 1700 current = new_page; |
| 1713 } | 1701 } |
| 1714 Reset(); | 1702 Reset(); |
| 1715 | 1703 AccountCommitted(current_capacity_); |
| 1716 set_current_capacity(current_capacity_); | 1704 if (age_mark_ == nullptr) { |
| 1705 age_mark_ = first_page()->area_start(); |
| 1706 } |
| 1717 committed_ = true; | 1707 committed_ = true; |
| 1718 return true; | 1708 return true; |
| 1719 } | 1709 } |
| 1720 | 1710 |
| 1721 | 1711 |
| 1722 bool SemiSpace::Uncommit() { | 1712 bool SemiSpace::Uncommit() { |
| 1723 DCHECK(is_committed()); | 1713 DCHECK(is_committed()); |
| 1724 Address start = start_ + maximum_capacity_ - current_capacity_; | 1714 NewSpacePageIterator it(this); |
| 1725 if (!heap()->isolate()->memory_allocator()->UncommitBlock( | 1715 while (it.has_next()) { |
| 1726 start, current_capacity_)) { | 1716 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( |
| 1727 return false; | 1717 it.next()); |
| 1728 } | 1718 } |
| 1729 AccountUncommitted(current_capacity_); | |
| 1730 | |
| 1731 anchor()->set_next_page(anchor()); | 1719 anchor()->set_next_page(anchor()); |
| 1732 anchor()->set_prev_page(anchor()); | 1720 anchor()->set_prev_page(anchor()); |
| 1733 | 1721 AccountUncommitted(current_capacity_); |
| 1734 committed_ = false; | 1722 committed_ = false; |
| 1735 return true; | 1723 return true; |
| 1736 } | 1724 } |
| 1737 | 1725 |
| 1738 | 1726 |
| 1739 size_t SemiSpace::CommittedPhysicalMemory() { | 1727 size_t SemiSpace::CommittedPhysicalMemory() { |
| 1740 if (!is_committed()) return 0; | 1728 if (!is_committed()) return 0; |
| 1741 size_t size = 0; | 1729 size_t size = 0; |
| 1742 NewSpacePageIterator it(this); | 1730 NewSpacePageIterator it(this); |
| 1743 while (it.has_next()) { | 1731 while (it.has_next()) { |
| 1744 size += it.next()->CommittedPhysicalMemory(); | 1732 size += it.next()->CommittedPhysicalMemory(); |
| 1745 } | 1733 } |
| 1746 return size; | 1734 return size; |
| 1747 } | 1735 } |
| 1748 | 1736 |
| 1749 | 1737 |
| 1750 bool SemiSpace::GrowTo(int new_capacity) { | 1738 bool SemiSpace::GrowTo(int new_capacity) { |
| 1751 if (!is_committed()) { | 1739 if (!is_committed()) { |
| 1752 if (!Commit()) return false; | 1740 if (!Commit()) return false; |
| 1753 } | 1741 } |
| 1754 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); | 1742 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); |
| 1755 DCHECK_LE(new_capacity, maximum_capacity_); | 1743 DCHECK_LE(new_capacity, maximum_capacity_); |
| 1756 DCHECK_GT(new_capacity, current_capacity_); | 1744 DCHECK_GT(new_capacity, current_capacity_); |
| 1757 int pages_before = current_capacity_ / Page::kPageSize; | 1745 const int delta = new_capacity - current_capacity_; |
| 1758 int pages_after = new_capacity / Page::kPageSize; | |
| 1759 | |
| 1760 size_t delta = new_capacity - current_capacity_; | |
| 1761 | |
| 1762 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); | 1746 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1763 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1747 int delta_pages = delta / NewSpacePage::kPageSize; |
| 1764 start_ + current_capacity_, delta, executable())) { | |
| 1765 return false; | |
| 1766 } | |
| 1767 AccountCommitted(static_cast<intptr_t>(delta)); | |
| 1768 set_current_capacity(new_capacity); | |
| 1769 NewSpacePage* last_page = anchor()->prev_page(); | 1748 NewSpacePage* last_page = anchor()->prev_page(); |
| 1770 DCHECK_NE(last_page, anchor()); | 1749 DCHECK_NE(last_page, anchor()); |
| 1771 for (int i = pages_before; i < pages_after; i++) { | 1750 while (delta_pages > 0) { |
| 1772 Address page_address = start_ + i * Page::kPageSize; | |
| 1773 NewSpacePage* new_page = | 1751 NewSpacePage* new_page = |
| 1774 NewSpacePage::Initialize(heap(), page_address, this); | 1752 heap() |
| 1753 ->isolate() |
| 1754 ->memory_allocator() |
| 1755 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( |
| 1756 NewSpacePage::kAllocatableMemory, this, executable()); |
| 1775 new_page->InsertAfter(last_page); | 1757 new_page->InsertAfter(last_page); |
| 1776 Bitmap::Clear(new_page); | 1758 Bitmap::Clear(new_page); |
| 1777 // Duplicate the flags that was set on the old page. | 1759 // Duplicate the flags that was set on the old page. |
| 1778 new_page->SetFlags(last_page->GetFlags(), | 1760 new_page->SetFlags(last_page->GetFlags(), |
| 1779 NewSpacePage::kCopyOnFlipFlagsMask); | 1761 NewSpacePage::kCopyOnFlipFlagsMask); |
| 1780 last_page = new_page; | 1762 last_page = new_page; |
| 1763 delta_pages--; |
| 1781 } | 1764 } |
| 1765 AccountCommitted(static_cast<intptr_t>(delta)); |
| 1766 current_capacity_ = new_capacity; |
| 1782 return true; | 1767 return true; |
| 1783 } | 1768 } |
| 1784 | 1769 |
| 1785 | 1770 |
| 1786 bool SemiSpace::ShrinkTo(int new_capacity) { | 1771 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1787 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0); | 1772 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); |
| 1788 DCHECK_GE(new_capacity, minimum_capacity_); | 1773 DCHECK_GE(new_capacity, minimum_capacity_); |
| 1789 DCHECK_LT(new_capacity, current_capacity_); | 1774 DCHECK_LT(new_capacity, current_capacity_); |
| 1790 if (is_committed()) { | 1775 if (is_committed()) { |
| 1791 size_t delta = current_capacity_ - new_capacity; | 1776 const int delta = current_capacity_ - new_capacity; |
| 1792 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); | 1777 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1793 | 1778 int delta_pages = delta / NewSpacePage::kPageSize; |
| 1794 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); | 1779 NewSpacePage* new_last_page; |
| 1795 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { | 1780 NewSpacePage* last_page; |
| 1796 return false; | 1781 while (delta_pages > 0) { |
| 1782 last_page = anchor()->prev_page(); |
| 1783 new_last_page = last_page->prev_page(); |
| 1784 new_last_page->set_next_page(anchor()); |
| 1785 anchor()->set_prev_page(new_last_page); |
| 1786 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( |
| 1787 last_page); |
| 1788 delta_pages--; |
| 1797 } | 1789 } |
| 1798 AccountUncommitted(static_cast<intptr_t>(delta)); | 1790 AccountUncommitted(static_cast<intptr_t>(delta)); |
| 1799 | |
| 1800 int pages_after = new_capacity / Page::kPageSize; | |
| 1801 NewSpacePage* new_last_page = | |
| 1802 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); | |
| 1803 new_last_page->set_next_page(anchor()); | |
| 1804 anchor()->set_prev_page(new_last_page); | |
| 1805 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); | |
| 1806 } | 1791 } |
| 1807 | 1792 current_capacity_ = new_capacity; |
| 1808 set_current_capacity(new_capacity); | |
| 1809 | |
| 1810 return true; | 1793 return true; |
| 1811 } | 1794 } |
| 1812 | 1795 |
| 1813 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { | 1796 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { |
| 1814 anchor_.set_owner(this); | 1797 anchor_.set_owner(this); |
| 1815 // Fixup back-pointers to anchor. Address of anchor changes when we swap. | 1798 // Fixup back-pointers to anchor. Address of anchor changes when we swap. |
| 1816 anchor_.prev_page()->set_next_page(&anchor_); | 1799 anchor_.prev_page()->set_next_page(&anchor_); |
| 1817 anchor_.next_page()->set_prev_page(&anchor_); | 1800 anchor_.next_page()->set_prev_page(&anchor_); |
| 1818 | 1801 |
| 1819 NewSpacePageIterator it(this); | 1802 NewSpacePageIterator it(this); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1846 // We won't be swapping semispaces without data in them. | 1829 // We won't be swapping semispaces without data in them. |
| 1847 DCHECK_NE(from->anchor_.next_page(), &from->anchor_); | 1830 DCHECK_NE(from->anchor_.next_page(), &from->anchor_); |
| 1848 DCHECK_NE(to->anchor_.next_page(), &to->anchor_); | 1831 DCHECK_NE(to->anchor_.next_page(), &to->anchor_); |
| 1849 | 1832 |
| 1850 intptr_t saved_to_space_flags = to->current_page()->GetFlags(); | 1833 intptr_t saved_to_space_flags = to->current_page()->GetFlags(); |
| 1851 | 1834 |
| 1852 // We swap all properties but id_. | 1835 // We swap all properties but id_. |
| 1853 std::swap(from->current_capacity_, to->current_capacity_); | 1836 std::swap(from->current_capacity_, to->current_capacity_); |
| 1854 std::swap(from->maximum_capacity_, to->maximum_capacity_); | 1837 std::swap(from->maximum_capacity_, to->maximum_capacity_); |
| 1855 std::swap(from->minimum_capacity_, to->minimum_capacity_); | 1838 std::swap(from->minimum_capacity_, to->minimum_capacity_); |
| 1856 std::swap(from->start_, to->start_); | |
| 1857 std::swap(from->age_mark_, to->age_mark_); | 1839 std::swap(from->age_mark_, to->age_mark_); |
| 1858 std::swap(from->committed_, to->committed_); | 1840 std::swap(from->committed_, to->committed_); |
| 1859 std::swap(from->anchor_, to->anchor_); | 1841 std::swap(from->anchor_, to->anchor_); |
| 1860 std::swap(from->current_page_, to->current_page_); | 1842 std::swap(from->current_page_, to->current_page_); |
| 1861 | 1843 |
| 1862 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask); | 1844 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask); |
| 1863 from->FixPagesFlags(0, 0); | 1845 from->FixPagesFlags(0, 0); |
| 1864 } | 1846 } |
| 1865 | 1847 |
| 1866 | 1848 |
| (...skipping 1295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3162 object->ShortPrint(); | 3144 object->ShortPrint(); |
| 3163 PrintF("\n"); | 3145 PrintF("\n"); |
| 3164 } | 3146 } |
| 3165 printf(" --------------------------------------\n"); | 3147 printf(" --------------------------------------\n"); |
| 3166 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3148 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3167 } | 3149 } |
| 3168 | 3150 |
| 3169 #endif // DEBUG | 3151 #endif // DEBUG |
| 3170 } // namespace internal | 3152 } // namespace internal |
| 3171 } // namespace v8 | 3153 } // namespace v8 |
| OLD | NEW |