Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: src/heap/spaces.cc

Issue 1913083002: [heap] Uncommit pooled pages concurrently (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/base/platform/semaphore.h"
9 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slot-set.h" 11 #include "src/heap/slot-set.h"
11 #include "src/macro-assembler.h" 12 #include "src/macro-assembler.h"
12 #include "src/msan.h" 13 #include "src/msan.h"
13 #include "src/snapshot/snapshot.h" 14 #include "src/snapshot/snapshot.h"
15 #include "src/v8.h"
14 16
15 namespace v8 { 17 namespace v8 {
16 namespace internal { 18 namespace internal {
17 19
18 20
19 // ---------------------------------------------------------------------------- 21 // ----------------------------------------------------------------------------
20 // HeapObjectIterator 22 // HeapObjectIterator
21 23
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 24 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page, 25 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
296 // 298 //
297 299
298 MemoryAllocator::MemoryAllocator(Isolate* isolate) 300 MemoryAllocator::MemoryAllocator(Isolate* isolate)
299 : isolate_(isolate), 301 : isolate_(isolate),
300 code_range_(nullptr), 302 code_range_(nullptr),
301 capacity_(0), 303 capacity_(0),
302 capacity_executable_(0), 304 capacity_executable_(0),
303 size_(0), 305 size_(0),
304 size_executable_(0), 306 size_executable_(0),
305 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), 307 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
306 highest_ever_allocated_(reinterpret_cast<void*>(0)) {} 308 highest_ever_allocated_(reinterpret_cast<void*>(0)),
309 unmapper_(this) {}
307 310
308 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable, 311 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
309 intptr_t code_range_size) { 312 intptr_t code_range_size) {
310 capacity_ = RoundUp(capacity, Page::kPageSize); 313 capacity_ = RoundUp(capacity, Page::kPageSize);
311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 314 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 DCHECK_GE(capacity_, capacity_executable_); 315 DCHECK_GE(capacity_, capacity_executable_);
313 316
314 size_ = 0; 317 size_ = 0;
315 size_executable_ = 0; 318 size_executable_ = 0;
316 319
317 code_range_ = new CodeRange(isolate_); 320 code_range_ = new CodeRange(isolate_);
318 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false; 321 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
319 322
320 return true; 323 return true;
321 } 324 }
322 325
323 326
324 void MemoryAllocator::TearDown() { 327 void MemoryAllocator::TearDown() {
325 for (MemoryChunk* chunk : chunk_pool_) { 328 unmapper()->WaitUntilCompleted();
329
330 MemoryChunk* chunk = nullptr;
331 while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
326 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, 332 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
327 NOT_EXECUTABLE); 333 NOT_EXECUTABLE);
328 } 334 }
335
329 // Check that spaces were torn down before MemoryAllocator. 336 // Check that spaces were torn down before MemoryAllocator.
330 DCHECK_EQ(size_.Value(), 0); 337 DCHECK_EQ(size_.Value(), 0);
331 // TODO(gc) this will be true again when we fix FreeMemory. 338 // TODO(gc) this will be true again when we fix FreeMemory.
332 // DCHECK(size_executable_ == 0); 339 // DCHECK(size_executable_ == 0);
333 capacity_ = 0; 340 capacity_ = 0;
334 capacity_executable_ = 0; 341 capacity_executable_ = 0;
335 342
336 if (last_chunk_.IsReserved()) { 343 if (last_chunk_.IsReserved()) {
337 last_chunk_.Release(); 344 last_chunk_.Release();
338 } 345 }
339 346
340 delete code_range_; 347 delete code_range_;
341 code_range_ = nullptr; 348 code_range_ = nullptr;
342 } 349 }
343 350
351 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
352 public:
353 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
354
355 private:
356 // v8::Task overrides.
357 void Run() override {
358 unmapper_->PerformFreeMemoryOnQueuedChunks();
359 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
360 }
361
362 Unmapper* unmapper_;
363 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
364 };
365
366 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
367 if (FLAG_concurrent_sweeping) {
368 V8::GetCurrentPlatform()->CallOnBackgroundThread(
369 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
370 concurrent_unmapping_tasks_active_++;
371 } else {
372 PerformFreeMemoryOnQueuedChunks();
373 }
374 }
375
376 bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
377 bool waited = false;
378 while (concurrent_unmapping_tasks_active_ > 0) {
379 pending_unmapping_tasks_semaphore_.Wait();
380 concurrent_unmapping_tasks_active_--;
381 waited = true;
382 }
383 return waited;
384 }
385
386 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
387 MemoryChunk* chunk = nullptr;
388 // Regular chunks.
389 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
390 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
391 allocator_->PerformFreeMemory(chunk);
392 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
393 }
394 // Non-regular chunks.
395 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
396 allocator_->PerformFreeMemory(chunk);
397 }
398 }
399
344 bool MemoryAllocator::CommitMemory(Address base, size_t size, 400 bool MemoryAllocator::CommitMemory(Address base, size_t size,
345 Executability executable) { 401 Executability executable) {
346 if (!base::VirtualMemory::CommitRegion(base, size, 402 if (!base::VirtualMemory::CommitRegion(base, size,
347 executable == EXECUTABLE)) { 403 executable == EXECUTABLE)) {
348 return false; 404 return false;
349 } 405 }
350 UpdateAllocatedSpaceLimits(base, base + size); 406 UpdateAllocatedSpaceLimits(base, base + size);
351 return true; 407 return true;
352 } 408 }
353 409
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
741 797
742 chunk->SetFlag(MemoryChunk::PRE_FREED); 798 chunk->SetFlag(MemoryChunk::PRE_FREED);
743 } 799 }
744 800
745 801
746 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { 802 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
747 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); 803 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
748 chunk->ReleaseAllocatedMemory(); 804 chunk->ReleaseAllocatedMemory();
749 805
750 base::VirtualMemory* reservation = chunk->reserved_memory(); 806 base::VirtualMemory* reservation = chunk->reserved_memory();
751 if (reservation->IsReserved()) { 807 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
752 FreeMemory(reservation, chunk->executable()); 808 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
753 } else { 809 } else {
754 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); 810 if (reservation->IsReserved()) {
811 FreeMemory(reservation, chunk->executable());
812 } else {
813 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
814 }
755 } 815 }
756 } 816 }
757 817
758 template <MemoryAllocator::AllocationMode mode> 818 template <MemoryAllocator::FreeMode mode>
759 void MemoryAllocator::Free(MemoryChunk* chunk) { 819 void MemoryAllocator::Free(MemoryChunk* chunk) {
760 if (mode == kRegular) { 820 switch (mode) {
761 PreFreeMemory(chunk); 821 case kFull:
762 PerformFreeMemory(chunk); 822 PreFreeMemory(chunk);
763 } else { 823 PerformFreeMemory(chunk);
764 DCHECK_EQ(mode, kPooled); 824 break;
765 FreePooled(chunk); 825 case kPooledAndQueue:
826 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
827 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
828 chunk->SetFlag(MemoryChunk::POOLED);
829 // Fall through to kPreFreeAndQueue.
830 case kPreFreeAndQueue:
831 PreFreeMemory(chunk);
832 // The chunks added to this queue will be freed by a concurrent thread.
833 unmapper()->AddMemoryChunkSafe(chunk);
834 break;
835 default:
836 UNREACHABLE();
766 } 837 }
767 } 838 }
768 839
769 template void MemoryAllocator::Free<MemoryAllocator::kRegular>( 840 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
841
842 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
770 MemoryChunk* chunk); 843 MemoryChunk* chunk);
771 844
772 template void MemoryAllocator::Free<MemoryAllocator::kPooled>( 845 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
773 MemoryChunk* chunk); 846 MemoryChunk* chunk);
774 847
775 template <typename PageType, MemoryAllocator::AllocationMode mode, 848 template <typename PageType, MemoryAllocator::AllocationMode mode,
776 typename SpaceType> 849 typename SpaceType>
777 PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, 850 PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
778 Executability executable) { 851 Executability executable) {
779 MemoryChunk* chunk = nullptr; 852 MemoryChunk* chunk = nullptr;
780 if (mode == kPooled) { 853 if (mode == kPooled) {
781 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory)); 854 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
782 DCHECK_EQ(executable, NOT_EXECUTABLE); 855 DCHECK_EQ(executable, NOT_EXECUTABLE);
(...skipping 13 matching lines...) Expand all
796 template LargePage* 869 template LargePage*
797 MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>( 870 MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>(
798 intptr_t, Space*, Executability); 871 intptr_t, Space*, Executability);
799 872
800 template NewSpacePage* MemoryAllocator::AllocatePage< 873 template NewSpacePage* MemoryAllocator::AllocatePage<
801 NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*, 874 NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
802 Executability); 875 Executability);
803 876
804 template <typename SpaceType> 877 template <typename SpaceType>
805 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { 878 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
806 if (chunk_pool_.is_empty()) return nullptr; 879 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
880 if (chunk == nullptr) return nullptr;
807 const int size = MemoryChunk::kPageSize; 881 const int size = MemoryChunk::kPageSize;
808 MemoryChunk* chunk = chunk_pool_.RemoveLast();
809 const Address start = reinterpret_cast<Address>(chunk); 882 const Address start = reinterpret_cast<Address>(chunk);
810 const Address area_start = start + MemoryChunk::kObjectStartOffset; 883 const Address area_start = start + MemoryChunk::kObjectStartOffset;
811 const Address area_end = start + size; 884 const Address area_end = start + size;
812 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE); 885 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE);
813 base::VirtualMemory reservation(start, size); 886 base::VirtualMemory reservation(start, size);
814 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end, 887 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
815 NOT_EXECUTABLE, owner, &reservation); 888 NOT_EXECUTABLE, owner, &reservation);
816 size_.Increment(size); 889 size_.Increment(size);
817 return chunk; 890 return chunk;
818 } 891 }
819 892
820 void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
821 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
822 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
823 chunk_pool_.Add(chunk);
824 intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
825 if (chunk->executable() == EXECUTABLE) {
826 size_executable_.Increment(-chunk_size);
827 }
828 size_.Increment(-chunk_size);
829 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
830 }
831
832 bool MemoryAllocator::CommitBlock(Address start, size_t size, 893 bool MemoryAllocator::CommitBlock(Address start, size_t size,
833 Executability executable) { 894 Executability executable) {
834 if (!CommitMemory(start, size, executable)) return false; 895 if (!CommitMemory(start, size, executable)) return false;
835 896
836 if (Heap::ShouldZapGarbage()) { 897 if (Heap::ShouldZapGarbage()) {
837 ZapBlock(start, size); 898 ZapBlock(start, size);
838 } 899 }
839 900
840 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 901 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
841 return true; 902 return true;
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
964 vm->Uncommit(header, header_size); 1025 vm->Uncommit(header, header_size);
965 } 1026 }
966 return false; 1027 return false;
967 } 1028 }
968 1029
969 1030
970 // ----------------------------------------------------------------------------- 1031 // -----------------------------------------------------------------------------
971 // MemoryChunk implementation 1032 // MemoryChunk implementation
972 1033
973 void MemoryChunk::ReleaseAllocatedMemory() { 1034 void MemoryChunk::ReleaseAllocatedMemory() {
974 delete skip_list_; 1035 if (skip_list_ != nullptr) {
975 skip_list_ = nullptr; 1036 delete skip_list_;
976 delete mutex_; 1037 skip_list_ = nullptr;
977 mutex_ = nullptr; 1038 }
978 ReleaseOldToNewSlots(); 1039 if (mutex_ != nullptr) {
979 ReleaseOldToOldSlots(); 1040 delete mutex_;
1041 mutex_ = nullptr;
1042 }
1043 if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
1044 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
980 } 1045 }
981 1046
982 static SlotSet* AllocateSlotSet(size_t size, Address page_start) { 1047 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
983 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize; 1048 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
984 DCHECK(pages > 0); 1049 DCHECK(pages > 0);
985 SlotSet* slot_set = new SlotSet[pages]; 1050 SlotSet* slot_set = new SlotSet[pages];
986 for (size_t i = 0; i < pages; i++) { 1051 for (size_t i = 0; i < pages; i++) {
987 slot_set[i].SetPageStart(page_start + i * Page::kPageSize); 1052 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
988 } 1053 }
989 return slot_set; 1054 return slot_set;
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1053 1118
1054 bool PagedSpace::SetUp() { return true; } 1119 bool PagedSpace::SetUp() { return true; }
1055 1120
1056 1121
1057 bool PagedSpace::HasBeenSetUp() { return true; } 1122 bool PagedSpace::HasBeenSetUp() { return true; }
1058 1123
1059 1124
1060 void PagedSpace::TearDown() { 1125 void PagedSpace::TearDown() {
1061 PageIterator iterator(this); 1126 PageIterator iterator(this);
1062 while (iterator.has_next()) { 1127 while (iterator.has_next()) {
1063 heap()->memory_allocator()->Free(iterator.next()); 1128 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
1064 } 1129 }
1065 anchor_.set_next_page(&anchor_); 1130 anchor_.set_next_page(&anchor_);
1066 anchor_.set_prev_page(&anchor_); 1131 anchor_.set_prev_page(&anchor_);
1067 accounting_stats_.Clear(); 1132 accounting_stats_.Clear();
1068 } 1133 }
1069 1134
1070 void PagedSpace::RefillFreeList() { 1135 void PagedSpace::RefillFreeList() {
1071 // Any PagedSpace might invoke RefillFreeList. We filter all but our old 1136 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1072 // generation spaces out. 1137 // generation spaces out.
1073 if (identity() != OLD_SPACE && identity() != CODE_SPACE && 1138 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
1244 allocation_info_.Reset(nullptr, nullptr); 1309 allocation_info_.Reset(nullptr, nullptr);
1245 } 1310 }
1246 1311
1247 // If page is still in a list, unlink it from that list. 1312 // If page is still in a list, unlink it from that list.
1248 if (page->next_chunk() != NULL) { 1313 if (page->next_chunk() != NULL) {
1249 DCHECK(page->prev_chunk() != NULL); 1314 DCHECK(page->prev_chunk() != NULL);
1250 page->Unlink(); 1315 page->Unlink();
1251 } 1316 }
1252 1317
1253 AccountUncommitted(static_cast<intptr_t>(page->size())); 1318 AccountUncommitted(static_cast<intptr_t>(page->size()));
1254 heap()->QueueMemoryChunkForFree(page); 1319 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1255 1320
1256 DCHECK(Capacity() > 0); 1321 DCHECK(Capacity() > 0);
1257 accounting_stats_.ShrinkSpace(AreaSize()); 1322 accounting_stats_.ShrinkSpace(AreaSize());
1258 } 1323 }
1259 1324
1260 #ifdef DEBUG 1325 #ifdef DEBUG
1261 void PagedSpace::Print() {} 1326 void PagedSpace::Print() {}
1262 #endif 1327 #endif
1263 1328
1264 #ifdef VERIFY_HEAP 1329 #ifdef VERIFY_HEAP
(...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after
1708 } 1773 }
1709 committed_ = true; 1774 committed_ = true;
1710 return true; 1775 return true;
1711 } 1776 }
1712 1777
1713 1778
1714 bool SemiSpace::Uncommit() { 1779 bool SemiSpace::Uncommit() {
1715 DCHECK(is_committed()); 1780 DCHECK(is_committed());
1716 NewSpacePageIterator it(this); 1781 NewSpacePageIterator it(this);
1717 while (it.has_next()) { 1782 while (it.has_next()) {
1718 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(it.next()); 1783 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1784 it.next());
1719 } 1785 }
1720 anchor()->set_next_page(anchor()); 1786 anchor()->set_next_page(anchor());
1721 anchor()->set_prev_page(anchor()); 1787 anchor()->set_prev_page(anchor());
1722 AccountUncommitted(current_capacity_); 1788 AccountUncommitted(current_capacity_);
1723 committed_ = false; 1789 committed_ = false;
1790 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1724 return true; 1791 return true;
1725 } 1792 }
1726 1793
1727 1794
1728 size_t SemiSpace::CommittedPhysicalMemory() { 1795 size_t SemiSpace::CommittedPhysicalMemory() {
1729 if (!is_committed()) return 0; 1796 if (!is_committed()) return 0;
1730 size_t size = 0; 1797 size_t size = 0;
1731 NewSpacePageIterator it(this); 1798 NewSpacePageIterator it(this);
1732 while (it.has_next()) { 1799 while (it.has_next()) {
1733 size += it.next()->CommittedPhysicalMemory(); 1800 size += it.next()->CommittedPhysicalMemory();
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 const int delta = current_capacity_ - new_capacity; 1858 const int delta = current_capacity_ - new_capacity;
1792 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1859 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1793 int delta_pages = delta / NewSpacePage::kPageSize; 1860 int delta_pages = delta / NewSpacePage::kPageSize;
1794 NewSpacePage* new_last_page; 1861 NewSpacePage* new_last_page;
1795 NewSpacePage* last_page; 1862 NewSpacePage* last_page;
1796 while (delta_pages > 0) { 1863 while (delta_pages > 0) {
1797 last_page = anchor()->prev_page(); 1864 last_page = anchor()->prev_page();
1798 new_last_page = last_page->prev_page(); 1865 new_last_page = last_page->prev_page();
1799 new_last_page->set_next_page(anchor()); 1866 new_last_page->set_next_page(anchor());
1800 anchor()->set_prev_page(new_last_page); 1867 anchor()->set_prev_page(new_last_page);
1801 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(last_page); 1868 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1869 last_page);
1802 delta_pages--; 1870 delta_pages--;
1803 } 1871 }
1804 AccountUncommitted(static_cast<intptr_t>(delta)); 1872 AccountUncommitted(static_cast<intptr_t>(delta));
1873 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1805 } 1874 }
1806 current_capacity_ = new_capacity; 1875 current_capacity_ = new_capacity;
1807 return true; 1876 return true;
1808 } 1877 }
1809 1878
1810 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { 1879 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
1811 anchor_.set_owner(this); 1880 anchor_.set_owner(this);
1812 // Fixup back-pointers to anchor. Address of anchor changes when we swap. 1881 // Fixup back-pointers to anchor. Address of anchor changes when we swap.
1813 anchor_.prev_page()->set_next_page(&anchor_); 1882 anchor_.prev_page()->set_next_page(&anchor_);
1814 anchor_.next_page()->set_prev_page(&anchor_); 1883 anchor_.next_page()->set_prev_page(&anchor_);
(...skipping 1072 matching lines...) Expand 10 before | Expand all | Expand 10 after
2887 2956
2888 void LargeObjectSpace::TearDown() { 2957 void LargeObjectSpace::TearDown() {
2889 while (first_page_ != NULL) { 2958 while (first_page_ != NULL) {
2890 LargePage* page = first_page_; 2959 LargePage* page = first_page_;
2891 first_page_ = first_page_->next_page(); 2960 first_page_ = first_page_->next_page();
2892 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); 2961 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2893 2962
2894 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); 2963 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2895 heap()->memory_allocator()->PerformAllocationCallback( 2964 heap()->memory_allocator()->PerformAllocationCallback(
2896 space, kAllocationActionFree, page->size()); 2965 space, kAllocationActionFree, page->size());
2897 heap()->memory_allocator()->Free(page); 2966 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
2898 } 2967 }
2899 SetUp(); 2968 SetUp();
2900 } 2969 }
2901 2970
2902 2971
2903 AllocationResult LargeObjectSpace::AllocateRaw(int object_size, 2972 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2904 Executability executable) { 2973 Executability executable) {
2905 // Check if we want to force a GC before growing the old space further. 2974 // Check if we want to force a GC before growing the old space further.
2906 // If so, fail the allocation. 2975 // If so, fail the allocation.
2907 if (!heap()->CanExpandOldGeneration(object_size)) { 2976 if (!heap()->CanExpandOldGeneration(object_size)) {
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
3030 // Use variable alignment to help pass length check (<= 80 characters) 3099 // Use variable alignment to help pass length check (<= 80 characters)
3031 // of single line in tools/presubmit.py. 3100 // of single line in tools/presubmit.py.
3032 const intptr_t alignment = MemoryChunk::kAlignment; 3101 const intptr_t alignment = MemoryChunk::kAlignment;
3033 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; 3102 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3034 uintptr_t limit = base + (page->size() - 1) / alignment; 3103 uintptr_t limit = base + (page->size() - 1) / alignment;
3035 for (uintptr_t key = base; key <= limit; key++) { 3104 for (uintptr_t key = base; key <= limit; key++) {
3036 chunk_map_.Remove(reinterpret_cast<void*>(key), 3105 chunk_map_.Remove(reinterpret_cast<void*>(key),
3037 static_cast<uint32_t>(key)); 3106 static_cast<uint32_t>(key));
3038 } 3107 }
3039 3108
3040 heap()->QueueMemoryChunkForFree(page); 3109 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3041 } 3110 }
3042 } 3111 }
3043 } 3112 }
3044 3113
3045 3114
3046 bool LargeObjectSpace::Contains(HeapObject* object) { 3115 bool LargeObjectSpace::Contains(HeapObject* object) {
3047 Address address = object->address(); 3116 Address address = object->address();
3048 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 3117 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3049 3118
3050 bool owned = (chunk->owner() == this); 3119 bool owned = (chunk->owner() == this);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
3161 object->ShortPrint(); 3230 object->ShortPrint();
3162 PrintF("\n"); 3231 PrintF("\n");
3163 } 3232 }
3164 printf(" --------------------------------------\n"); 3233 printf(" --------------------------------------\n");
3165 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3234 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3166 } 3235 }
3167 3236
3168 #endif // DEBUG 3237 #endif // DEBUG
3169 } // namespace internal 3238 } // namespace internal
3170 } // namespace v8 3239 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698