Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(575)

Side by Side Diff: src/heap/spaces.cc

Issue 1929503002: Reland of "[heap] Uncommit pooled pages concurrently" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fixed pooling Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/base/platform/semaphore.h"
9 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slot-set.h" 11 #include "src/heap/slot-set.h"
11 #include "src/macro-assembler.h" 12 #include "src/macro-assembler.h"
12 #include "src/msan.h" 13 #include "src/msan.h"
13 #include "src/snapshot/snapshot.h" 14 #include "src/snapshot/snapshot.h"
15 #include "src/v8.h"
14 16
15 namespace v8 { 17 namespace v8 {
16 namespace internal { 18 namespace internal {
17 19
18 20
19 // ---------------------------------------------------------------------------- 21 // ----------------------------------------------------------------------------
20 // HeapObjectIterator 22 // HeapObjectIterator
21 23
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 24 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page, 25 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
296 // 298 //
297 299
298 MemoryAllocator::MemoryAllocator(Isolate* isolate) 300 MemoryAllocator::MemoryAllocator(Isolate* isolate)
299 : isolate_(isolate), 301 : isolate_(isolate),
300 code_range_(nullptr), 302 code_range_(nullptr),
301 capacity_(0), 303 capacity_(0),
302 capacity_executable_(0), 304 capacity_executable_(0),
303 size_(0), 305 size_(0),
304 size_executable_(0), 306 size_executable_(0),
305 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), 307 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
306 highest_ever_allocated_(reinterpret_cast<void*>(0)) {} 308 highest_ever_allocated_(reinterpret_cast<void*>(0)),
309 unmapper_(this) {}
307 310
308 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable, 311 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
309 intptr_t code_range_size) { 312 intptr_t code_range_size) {
310 capacity_ = RoundUp(capacity, Page::kPageSize); 313 capacity_ = RoundUp(capacity, Page::kPageSize);
311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 314 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 DCHECK_GE(capacity_, capacity_executable_); 315 DCHECK_GE(capacity_, capacity_executable_);
313 316
314 size_ = 0; 317 size_ = 0;
315 size_executable_ = 0; 318 size_executable_ = 0;
316 319
317 code_range_ = new CodeRange(isolate_); 320 code_range_ = new CodeRange(isolate_);
318 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false; 321 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
319 322
320 return true; 323 return true;
321 } 324 }
322 325
323 326
324 void MemoryAllocator::TearDown() { 327 void MemoryAllocator::TearDown() {
325 for (MemoryChunk* chunk : chunk_pool_) { 328 unmapper()->WaitUntilCompleted();
329
330 MemoryChunk* chunk = nullptr;
331 while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
326 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, 332 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
327 NOT_EXECUTABLE); 333 NOT_EXECUTABLE);
328 } 334 }
335
329 // Check that spaces were torn down before MemoryAllocator. 336 // Check that spaces were torn down before MemoryAllocator.
330 DCHECK_EQ(size_.Value(), 0); 337 DCHECK_EQ(size_.Value(), 0);
331 // TODO(gc) this will be true again when we fix FreeMemory. 338 // TODO(gc) this will be true again when we fix FreeMemory.
332 // DCHECK(size_executable_ == 0); 339 // DCHECK(size_executable_ == 0);
333 capacity_ = 0; 340 capacity_ = 0;
334 capacity_executable_ = 0; 341 capacity_executable_ = 0;
335 342
336 if (last_chunk_.IsReserved()) { 343 if (last_chunk_.IsReserved()) {
337 last_chunk_.Release(); 344 last_chunk_.Release();
338 } 345 }
339 346
340 delete code_range_; 347 delete code_range_;
341 code_range_ = nullptr; 348 code_range_ = nullptr;
342 } 349 }
343 350
351 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
352 public:
353 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
354
355 private:
356 // v8::Task overrides.
357 void Run() override {
358 unmapper_->PerformFreeMemoryOnQueuedChunks();
359 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
360 }
361
362 Unmapper* unmapper_;
363 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
364 };
365
366 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
367 if (FLAG_concurrent_sweeping) {
368 V8::GetCurrentPlatform()->CallOnBackgroundThread(
369 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
370 concurrent_unmapping_tasks_active_++;
371 } else {
372 PerformFreeMemoryOnQueuedChunks();
373 }
374 }
375
376 bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
377 bool waited = false;
378 while (concurrent_unmapping_tasks_active_ > 0) {
379 pending_unmapping_tasks_semaphore_.Wait();
380 concurrent_unmapping_tasks_active_--;
381 waited = true;
382 }
383 return waited;
384 }
385
386 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
387 MemoryChunk* chunk = nullptr;
388 // Regular chunks.
389 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
390 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
391 allocator_->PerformFreeMemory(chunk);
392 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
393 }
394 // Non-regular chunks.
395 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
396 allocator_->PerformFreeMemory(chunk);
397 }
398 }
399
344 bool MemoryAllocator::CommitMemory(Address base, size_t size, 400 bool MemoryAllocator::CommitMemory(Address base, size_t size,
345 Executability executable) { 401 Executability executable) {
346 if (!base::VirtualMemory::CommitRegion(base, size, 402 if (!base::VirtualMemory::CommitRegion(base, size,
347 executable == EXECUTABLE)) { 403 executable == EXECUTABLE)) {
348 return false; 404 return false;
349 } 405 }
350 UpdateAllocatedSpaceLimits(base, base + size); 406 UpdateAllocatedSpaceLimits(base, base + size);
351 return true; 407 return true;
352 } 408 }
353 409
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
733 789
734 chunk->SetFlag(MemoryChunk::PRE_FREED); 790 chunk->SetFlag(MemoryChunk::PRE_FREED);
735 } 791 }
736 792
737 793
738 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { 794 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
739 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); 795 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
740 chunk->ReleaseAllocatedMemory(); 796 chunk->ReleaseAllocatedMemory();
741 797
742 base::VirtualMemory* reservation = chunk->reserved_memory(); 798 base::VirtualMemory* reservation = chunk->reserved_memory();
743 if (reservation->IsReserved()) { 799 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
744 FreeMemory(reservation, chunk->executable()); 800 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
745 } else { 801 } else {
746 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); 802 if (reservation->IsReserved()) {
803 FreeMemory(reservation, chunk->executable());
804 } else {
805 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
806 }
747 } 807 }
748 } 808 }
749 809
750 template <MemoryAllocator::AllocationMode mode> 810 template <MemoryAllocator::FreeMode mode>
751 void MemoryAllocator::Free(MemoryChunk* chunk) { 811 void MemoryAllocator::Free(MemoryChunk* chunk) {
752 if (mode == kRegular) { 812 switch (mode) {
753 PreFreeMemory(chunk); 813 case kFull:
754 PerformFreeMemory(chunk); 814 PreFreeMemory(chunk);
755 } else { 815 PerformFreeMemory(chunk);
756 DCHECK_EQ(mode, kPooled); 816 break;
757 FreePooled(chunk); 817 case kPooledAndQueue:
818 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
819 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
820 chunk->SetFlag(MemoryChunk::POOLED);
821 // Fall through to kPreFreeAndQueue.
822 case kPreFreeAndQueue:
823 PreFreeMemory(chunk);
824 // The chunks added to this queue will be freed by a concurrent thread.
825 unmapper()->AddMemoryChunkSafe(chunk);
826 break;
827 default:
828 UNREACHABLE();
758 } 829 }
759 } 830 }
760 831
761 template void MemoryAllocator::Free<MemoryAllocator::kRegular>( 832 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
833
834 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
762 MemoryChunk* chunk); 835 MemoryChunk* chunk);
763 836
764 template void MemoryAllocator::Free<MemoryAllocator::kPooled>( 837 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
765 MemoryChunk* chunk); 838 MemoryChunk* chunk);
766 839
767 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType> 840 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
768 Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, 841 Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
769 Executability executable) { 842 Executability executable) {
770 MemoryChunk* chunk = nullptr; 843 MemoryChunk* chunk = nullptr;
771 if (alloc_mode == kPooled) { 844 if (alloc_mode == kPooled) {
772 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory)); 845 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
773 DCHECK_EQ(executable, NOT_EXECUTABLE); 846 DCHECK_EQ(executable, NOT_EXECUTABLE);
774 chunk = AllocatePagePooled(owner); 847 chunk = AllocatePagePooled(owner);
(...skipping 18 matching lines...) Expand all
793 LargePage* MemoryAllocator::AllocateLargePage(intptr_t size, 866 LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
794 LargeObjectSpace* owner, 867 LargeObjectSpace* owner,
795 Executability executable) { 868 Executability executable) {
796 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); 869 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
797 if (chunk == nullptr) return nullptr; 870 if (chunk == nullptr) return nullptr;
798 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner); 871 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
799 } 872 }
800 873
801 template <typename SpaceType> 874 template <typename SpaceType>
802 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { 875 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
803 if (chunk_pool_.is_empty()) return nullptr; 876 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
877 if (chunk == nullptr) return nullptr;
804 const int size = MemoryChunk::kPageSize; 878 const int size = MemoryChunk::kPageSize;
805 MemoryChunk* chunk = chunk_pool_.RemoveLast();
806 const Address start = reinterpret_cast<Address>(chunk); 879 const Address start = reinterpret_cast<Address>(chunk);
807 const Address area_start = start + MemoryChunk::kObjectStartOffset; 880 const Address area_start = start + MemoryChunk::kObjectStartOffset;
808 const Address area_end = start + size; 881 const Address area_end = start + size;
809 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE); 882 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE);
810 base::VirtualMemory reservation(start, size); 883 base::VirtualMemory reservation(start, size);
811 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, 884 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
812 NOT_EXECUTABLE, owner, &reservation); 885 NOT_EXECUTABLE, owner, &reservation);
813 size_.Increment(size); 886 size_.Increment(size);
814 return chunk; 887 return chunk;
815 } 888 }
816 889
817 void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
818 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
819 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
820 chunk_pool_.Add(chunk);
821 intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
822 if (chunk->executable() == EXECUTABLE) {
823 size_executable_.Increment(-chunk_size);
824 }
825 size_.Increment(-chunk_size);
826 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
827 }
828
829 bool MemoryAllocator::CommitBlock(Address start, size_t size, 890 bool MemoryAllocator::CommitBlock(Address start, size_t size,
830 Executability executable) { 891 Executability executable) {
831 if (!CommitMemory(start, size, executable)) return false; 892 if (!CommitMemory(start, size, executable)) return false;
832 893
833 if (Heap::ShouldZapGarbage()) { 894 if (Heap::ShouldZapGarbage()) {
834 ZapBlock(start, size); 895 ZapBlock(start, size);
835 } 896 }
836 897
837 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 898 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
838 return true; 899 return true;
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
961 vm->Uncommit(header, header_size); 1022 vm->Uncommit(header, header_size);
962 } 1023 }
963 return false; 1024 return false;
964 } 1025 }
965 1026
966 1027
967 // ----------------------------------------------------------------------------- 1028 // -----------------------------------------------------------------------------
968 // MemoryChunk implementation 1029 // MemoryChunk implementation
969 1030
970 void MemoryChunk::ReleaseAllocatedMemory() { 1031 void MemoryChunk::ReleaseAllocatedMemory() {
971 delete skip_list_; 1032 if (skip_list_ != nullptr) {
972 skip_list_ = nullptr; 1033 delete skip_list_;
973 delete mutex_; 1034 skip_list_ = nullptr;
974 mutex_ = nullptr; 1035 }
975 ReleaseOldToNewSlots(); 1036 if (mutex_ != nullptr) {
976 ReleaseOldToOldSlots(); 1037 delete mutex_;
1038 mutex_ = nullptr;
1039 }
1040 if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
1041 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
977 } 1042 }
978 1043
979 static SlotSet* AllocateSlotSet(size_t size, Address page_start) { 1044 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
980 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize; 1045 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
981 DCHECK(pages > 0); 1046 DCHECK(pages > 0);
982 SlotSet* slot_set = new SlotSet[pages]; 1047 SlotSet* slot_set = new SlotSet[pages];
983 for (size_t i = 0; i < pages; i++) { 1048 for (size_t i = 0; i < pages; i++) {
984 slot_set[i].SetPageStart(page_start + i * Page::kPageSize); 1049 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
985 } 1050 }
986 return slot_set; 1051 return slot_set;
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1048 1113
1049 bool PagedSpace::SetUp() { return true; } 1114 bool PagedSpace::SetUp() { return true; }
1050 1115
1051 1116
1052 bool PagedSpace::HasBeenSetUp() { return true; } 1117 bool PagedSpace::HasBeenSetUp() { return true; }
1053 1118
1054 1119
1055 void PagedSpace::TearDown() { 1120 void PagedSpace::TearDown() {
1056 PageIterator iterator(this); 1121 PageIterator iterator(this);
1057 while (iterator.has_next()) { 1122 while (iterator.has_next()) {
1058 heap()->memory_allocator()->Free(iterator.next()); 1123 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
1059 } 1124 }
1060 anchor_.set_next_page(&anchor_); 1125 anchor_.set_next_page(&anchor_);
1061 anchor_.set_prev_page(&anchor_); 1126 anchor_.set_prev_page(&anchor_);
1062 accounting_stats_.Clear(); 1127 accounting_stats_.Clear();
1063 } 1128 }
1064 1129
1065 void PagedSpace::RefillFreeList() { 1130 void PagedSpace::RefillFreeList() {
1066 // Any PagedSpace might invoke RefillFreeList. We filter all but our old 1131 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1067 // generation spaces out. 1132 // generation spaces out.
1068 if (identity() != OLD_SPACE && identity() != CODE_SPACE && 1133 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
1238 allocation_info_.Reset(nullptr, nullptr); 1303 allocation_info_.Reset(nullptr, nullptr);
1239 } 1304 }
1240 1305
1241 // If page is still in a list, unlink it from that list. 1306 // If page is still in a list, unlink it from that list.
1242 if (page->next_chunk() != NULL) { 1307 if (page->next_chunk() != NULL) {
1243 DCHECK(page->prev_chunk() != NULL); 1308 DCHECK(page->prev_chunk() != NULL);
1244 page->Unlink(); 1309 page->Unlink();
1245 } 1310 }
1246 1311
1247 AccountUncommitted(static_cast<intptr_t>(page->size())); 1312 AccountUncommitted(static_cast<intptr_t>(page->size()));
1248 heap()->QueueMemoryChunkForFree(page); 1313 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1249 1314
1250 DCHECK(Capacity() > 0); 1315 DCHECK(Capacity() > 0);
1251 accounting_stats_.ShrinkSpace(AreaSize()); 1316 accounting_stats_.ShrinkSpace(AreaSize());
1252 } 1317 }
1253 1318
1254 #ifdef DEBUG 1319 #ifdef DEBUG
1255 void PagedSpace::Print() {} 1320 void PagedSpace::Print() {}
1256 #endif 1321 #endif
1257 1322
1258 #ifdef VERIFY_HEAP 1323 #ifdef VERIFY_HEAP
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after
1700 } 1765 }
1701 committed_ = true; 1766 committed_ = true;
1702 return true; 1767 return true;
1703 } 1768 }
1704 1769
1705 1770
1706 bool SemiSpace::Uncommit() { 1771 bool SemiSpace::Uncommit() {
1707 DCHECK(is_committed()); 1772 DCHECK(is_committed());
1708 NewSpacePageIterator it(this); 1773 NewSpacePageIterator it(this);
1709 while (it.has_next()) { 1774 while (it.has_next()) {
1710 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(it.next()); 1775 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1776 it.next());
1711 } 1777 }
1712 anchor()->set_next_page(anchor()); 1778 anchor()->set_next_page(anchor());
1713 anchor()->set_prev_page(anchor()); 1779 anchor()->set_prev_page(anchor());
1714 AccountUncommitted(current_capacity_); 1780 AccountUncommitted(current_capacity_);
1715 committed_ = false; 1781 committed_ = false;
1782 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1716 return true; 1783 return true;
1717 } 1784 }
1718 1785
1719 1786
1720 size_t SemiSpace::CommittedPhysicalMemory() { 1787 size_t SemiSpace::CommittedPhysicalMemory() {
1721 if (!is_committed()) return 0; 1788 if (!is_committed()) return 0;
1722 size_t size = 0; 1789 size_t size = 0;
1723 NewSpacePageIterator it(this); 1790 NewSpacePageIterator it(this);
1724 while (it.has_next()) { 1791 while (it.has_next()) {
1725 size += it.next()->CommittedPhysicalMemory(); 1792 size += it.next()->CommittedPhysicalMemory();
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1780 const int delta = current_capacity_ - new_capacity; 1847 const int delta = current_capacity_ - new_capacity;
1781 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1848 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1782 int delta_pages = delta / Page::kPageSize; 1849 int delta_pages = delta / Page::kPageSize;
1783 Page* new_last_page; 1850 Page* new_last_page;
1784 Page* last_page; 1851 Page* last_page;
1785 while (delta_pages > 0) { 1852 while (delta_pages > 0) {
1786 last_page = anchor()->prev_page(); 1853 last_page = anchor()->prev_page();
1787 new_last_page = last_page->prev_page(); 1854 new_last_page = last_page->prev_page();
1788 new_last_page->set_next_page(anchor()); 1855 new_last_page->set_next_page(anchor());
1789 anchor()->set_prev_page(new_last_page); 1856 anchor()->set_prev_page(new_last_page);
1790 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(last_page); 1857 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1858 last_page);
1791 delta_pages--; 1859 delta_pages--;
1792 } 1860 }
1793 AccountUncommitted(static_cast<intptr_t>(delta)); 1861 AccountUncommitted(static_cast<intptr_t>(delta));
1862 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1794 } 1863 }
1795 current_capacity_ = new_capacity; 1864 current_capacity_ = new_capacity;
1796 return true; 1865 return true;
1797 } 1866 }
1798 1867
1799 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { 1868 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
1800 anchor_.set_owner(this); 1869 anchor_.set_owner(this);
1801 anchor_.prev_page()->set_next_page(&anchor_); 1870 anchor_.prev_page()->set_next_page(&anchor_);
1802 anchor_.next_page()->set_prev_page(&anchor_); 1871 anchor_.next_page()->set_prev_page(&anchor_);
1803 1872
(...skipping 1070 matching lines...) Expand 10 before | Expand all | Expand 10 after
2874 2943
2875 void LargeObjectSpace::TearDown() { 2944 void LargeObjectSpace::TearDown() {
2876 while (first_page_ != NULL) { 2945 while (first_page_ != NULL) {
2877 LargePage* page = first_page_; 2946 LargePage* page = first_page_;
2878 first_page_ = first_page_->next_page(); 2947 first_page_ = first_page_->next_page();
2879 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); 2948 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2880 2949
2881 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); 2950 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2882 heap()->memory_allocator()->PerformAllocationCallback( 2951 heap()->memory_allocator()->PerformAllocationCallback(
2883 space, kAllocationActionFree, page->size()); 2952 space, kAllocationActionFree, page->size());
2884 heap()->memory_allocator()->Free(page); 2953 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
2885 } 2954 }
2886 SetUp(); 2955 SetUp();
2887 } 2956 }
2888 2957
2889 2958
2890 AllocationResult LargeObjectSpace::AllocateRaw(int object_size, 2959 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2891 Executability executable) { 2960 Executability executable) {
2892 // Check if we want to force a GC before growing the old space further. 2961 // Check if we want to force a GC before growing the old space further.
2893 // If so, fail the allocation. 2962 // If so, fail the allocation.
2894 if (!heap()->CanExpandOldGeneration(object_size)) { 2963 if (!heap()->CanExpandOldGeneration(object_size)) {
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
3017 // Use variable alignment to help pass length check (<= 80 characters) 3086 // Use variable alignment to help pass length check (<= 80 characters)
3018 // of single line in tools/presubmit.py. 3087 // of single line in tools/presubmit.py.
3019 const intptr_t alignment = MemoryChunk::kAlignment; 3088 const intptr_t alignment = MemoryChunk::kAlignment;
3020 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; 3089 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3021 uintptr_t limit = base + (page->size() - 1) / alignment; 3090 uintptr_t limit = base + (page->size() - 1) / alignment;
3022 for (uintptr_t key = base; key <= limit; key++) { 3091 for (uintptr_t key = base; key <= limit; key++) {
3023 chunk_map_.Remove(reinterpret_cast<void*>(key), 3092 chunk_map_.Remove(reinterpret_cast<void*>(key),
3024 static_cast<uint32_t>(key)); 3093 static_cast<uint32_t>(key));
3025 } 3094 }
3026 3095
3027 heap()->QueueMemoryChunkForFree(page); 3096 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3028 } 3097 }
3029 } 3098 }
3030 } 3099 }
3031 3100
3032 3101
3033 bool LargeObjectSpace::Contains(HeapObject* object) { 3102 bool LargeObjectSpace::Contains(HeapObject* object) {
3034 Address address = object->address(); 3103 Address address = object->address();
3035 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 3104 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3036 3105
3037 bool owned = (chunk->owner() == this); 3106 bool owned = (chunk->owner() == this);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
3148 object->ShortPrint(); 3217 object->ShortPrint();
3149 PrintF("\n"); 3218 PrintF("\n");
3150 } 3219 }
3151 printf(" --------------------------------------\n"); 3220 printf(" --------------------------------------\n");
3152 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3221 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3153 } 3222 }
3154 3223
3155 #endif // DEBUG 3224 #endif // DEBUG
3156 } // namespace internal 3225 } // namespace internal
3157 } // namespace v8 3226 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698