Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(280)

Side by Side Diff: src/heap/spaces.cc

Issue 1306183003: Re-land "Concurrently unmap free pages." (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/mark-compact.h" 10 #include "src/heap/mark-compact.h"
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
156 156
157 bool CodeRange::GetNextAllocationBlock(size_t requested) { 157 bool CodeRange::GetNextAllocationBlock(size_t requested) {
158 for (current_allocation_block_index_++; 158 for (current_allocation_block_index_++;
159 current_allocation_block_index_ < allocation_list_.length(); 159 current_allocation_block_index_ < allocation_list_.length();
160 current_allocation_block_index_++) { 160 current_allocation_block_index_++) {
161 if (requested <= allocation_list_[current_allocation_block_index_].size) { 161 if (requested <= allocation_list_[current_allocation_block_index_].size) {
162 return true; // Found a large enough allocation block. 162 return true; // Found a large enough allocation block.
163 } 163 }
164 } 164 }
165 165
166 // Sort and merge the free blocks on the free list and the allocation list. 166 {
167 free_list_.AddAll(allocation_list_); 167 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
168 allocation_list_.Clear(); 168
169 free_list_.Sort(&CompareFreeBlockAddress); 169 // Sort and merge the free blocks on the free list and the allocation list.
170 for (int i = 0; i < free_list_.length();) { 170 free_list_.AddAll(allocation_list_);
171 FreeBlock merged = free_list_[i]; 171 allocation_list_.Clear();
172 i++; 172 free_list_.Sort(&CompareFreeBlockAddress);
173 // Add adjacent free blocks to the current merged block. 173 for (int i = 0; i < free_list_.length();) {
174 while (i < free_list_.length() && 174 FreeBlock merged = free_list_[i];
175 free_list_[i].start == merged.start + merged.size) {
176 merged.size += free_list_[i].size;
177 i++; 175 i++;
176 // Add adjacent free blocks to the current merged block.
177 while (i < free_list_.length() &&
178 free_list_[i].start == merged.start + merged.size) {
179 merged.size += free_list_[i].size;
180 i++;
181 }
182 if (merged.size > 0) {
183 allocation_list_.Add(merged);
184 }
178 } 185 }
179 if (merged.size > 0) { 186 free_list_.Clear();
180 allocation_list_.Add(merged);
181 }
182 } 187 }
183 free_list_.Clear();
184 188
185 for (current_allocation_block_index_ = 0; 189 for (current_allocation_block_index_ = 0;
186 current_allocation_block_index_ < allocation_list_.length(); 190 current_allocation_block_index_ < allocation_list_.length();
187 current_allocation_block_index_++) { 191 current_allocation_block_index_++) {
188 if (requested <= allocation_list_[current_allocation_block_index_].size) { 192 if (requested <= allocation_list_[current_allocation_block_index_].size) {
189 return true; // Found a large enough allocation block. 193 return true; // Found a large enough allocation block.
190 } 194 }
191 } 195 }
192 current_allocation_block_index_ = 0; 196 current_allocation_block_index_ = 0;
193 // Code range is full or too fragmented. 197 // Code range is full or too fragmented.
(...skipping 28 matching lines...) Expand all
222 } 226 }
223 227
224 228
225 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 229 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
226 return code_range_->Uncommit(start, length); 230 return code_range_->Uncommit(start, length);
227 } 231 }
228 232
229 233
230 void CodeRange::FreeRawMemory(Address address, size_t length) { 234 void CodeRange::FreeRawMemory(Address address, size_t length) {
231 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); 235 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
236 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
232 free_list_.Add(FreeBlock(address, length)); 237 free_list_.Add(FreeBlock(address, length));
233 code_range_->Uncommit(address, length); 238 code_range_->Uncommit(address, length);
234 } 239 }
235 240
236 241
237 void CodeRange::TearDown() { 242 void CodeRange::TearDown() {
238 delete code_range_; // Frees all memory in the virtual memory range. 243 delete code_range_; // Frees all memory in the virtual memory range.
239 code_range_ = NULL; 244 code_range_ = NULL;
245 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
240 free_list_.Free(); 246 free_list_.Free();
241 allocation_list_.Free(); 247 allocation_list_.Free();
242 } 248 }
243 249
244 250
245 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { 251 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
246 DCHECK(allocation_list_.length() == 0 || 252 DCHECK(allocation_list_.length() == 0 ||
247 current_allocation_block_index_ < allocation_list_.length()); 253 current_allocation_block_index_ < allocation_list_.length());
248 if (allocation_list_.length() == 0 || 254 if (allocation_list_.length() == 0 ||
249 requested_size > allocation_list_[current_allocation_block_index_].size) { 255 requested_size > allocation_list_[current_allocation_block_index_].size) {
250 // Find an allocation block large enough. 256 // Find an allocation block large enough.
251 if (!GetNextAllocationBlock(requested_size)) return false; 257 if (!GetNextAllocationBlock(requested_size)) return false;
252 } 258 }
253 // Commit the requested memory at the start of the current allocation block. 259 // Commit the requested memory at the start of the current allocation block.
254 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 260 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
255 *block = allocation_list_[current_allocation_block_index_]; 261 *block = allocation_list_[current_allocation_block_index_];
256 // Don't leave a small free block, useless for a large object or chunk. 262 // Don't leave a small free block, useless for a large object or chunk.
257 if (aligned_requested < (block->size - Page::kPageSize)) { 263 if (aligned_requested < (block->size - Page::kPageSize)) {
258 block->size = aligned_requested; 264 block->size = aligned_requested;
259 } 265 }
260 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); 266 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
261 allocation_list_[current_allocation_block_index_].start += block->size; 267 allocation_list_[current_allocation_block_index_].start += block->size;
262 allocation_list_[current_allocation_block_index_].size -= block->size; 268 allocation_list_[current_allocation_block_index_].size -= block->size;
263 return true; 269 return true;
264 } 270 }
265 271
266 272
267 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); } 273 void CodeRange::ReleaseBlock(const FreeBlock* block) {
274 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_);
275 free_list_.Add(*block);
276 }
268 277
269 278
270 void CodeRange::ReserveEmergencyBlock() { 279 void CodeRange::ReserveEmergencyBlock() {
271 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); 280 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
272 if (emergency_block_.size == 0) { 281 if (emergency_block_.size == 0) {
273 ReserveBlock(requested_size, &emergency_block_); 282 ReserveBlock(requested_size, &emergency_block_);
274 } else { 283 } else {
275 DCHECK(emergency_block_.size >= requested_size); 284 DCHECK(emergency_block_.size >= requested_size);
276 } 285 }
277 } 286 }
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
325 Executability executable) { 334 Executability executable) {
326 if (!base::VirtualMemory::CommitRegion(base, size, 335 if (!base::VirtualMemory::CommitRegion(base, size,
327 executable == EXECUTABLE)) { 336 executable == EXECUTABLE)) {
328 return false; 337 return false;
329 } 338 }
330 UpdateAllocatedSpaceLimits(base, base + size); 339 UpdateAllocatedSpaceLimits(base, base + size);
331 return true; 340 return true;
332 } 341 }
333 342
334 343
344 void MemoryAllocator::FreeNewSpaceMemory(Address addr,
345 base::VirtualMemory* reservation,
346 Executability executable) {
347 LOG(isolate_, DeleteEvent("NewSpace", addr));
348
349 DCHECK(reservation->IsReserved());
350 const size_t size = reservation->size();
351 DCHECK(size_ >= size);
352 size_ -= size;
353 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
354 FreeMemory(reservation, NOT_EXECUTABLE);
355 }
356
357
335 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, 358 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
336 Executability executable) { 359 Executability executable) {
337 // TODO(gc) make code_range part of memory allocator? 360 // TODO(gc) make code_range part of memory allocator?
338 DCHECK(reservation->IsReserved());
339 size_t size = reservation->size();
340 DCHECK(size_ >= size);
341 size_ -= size;
342
343 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
344
345 if (executable == EXECUTABLE) {
346 DCHECK(size_executable_ >= size);
347 size_executable_ -= size;
348 }
349 // Code which is part of the code-range does not have its own VirtualMemory. 361 // Code which is part of the code-range does not have its own VirtualMemory.
350 DCHECK(isolate_->code_range() == NULL || 362 DCHECK(isolate_->code_range() == NULL ||
351 !isolate_->code_range()->contains( 363 !isolate_->code_range()->contains(
352 static_cast<Address>(reservation->address()))); 364 static_cast<Address>(reservation->address())));
353 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || 365 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
354 !isolate_->code_range()->valid() || size <= Page::kPageSize); 366 !isolate_->code_range()->valid() ||
367 reservation->size() <= Page::kPageSize);
355 368
356 reservation->Release(); 369 reservation->Release();
357 } 370 }
358 371
359 372
360 void MemoryAllocator::FreeMemory(Address base, size_t size, 373 void MemoryAllocator::FreeMemory(Address base, size_t size,
361 Executability executable) { 374 Executability executable) {
362 // TODO(gc) make code_range part of memory allocator? 375 // TODO(gc) make code_range part of memory allocator?
363 DCHECK(size_ >= size);
364 size_ -= size;
365
366 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
367
368 if (executable == EXECUTABLE) {
369 DCHECK(size_executable_ >= size);
370 size_executable_ -= size;
371 }
372 if (isolate_->code_range() != NULL && 376 if (isolate_->code_range() != NULL &&
373 isolate_->code_range()->contains(static_cast<Address>(base))) { 377 isolate_->code_range()->contains(static_cast<Address>(base))) {
374 DCHECK(executable == EXECUTABLE); 378 DCHECK(executable == EXECUTABLE);
375 isolate_->code_range()->FreeRawMemory(base, size); 379 isolate_->code_range()->FreeRawMemory(base, size);
376 } else { 380 } else {
377 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || 381 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
378 !isolate_->code_range()->valid()); 382 !isolate_->code_range()->valid());
379 bool result = base::VirtualMemory::ReleaseRegion(base, size); 383 bool result = base::VirtualMemory::ReleaseRegion(base, size);
380 USE(result); 384 USE(result);
381 DCHECK(result); 385 DCHECK(result);
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after
735 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 739 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
736 Space* owner, 740 Space* owner,
737 Executability executable) { 741 Executability executable) {
738 MemoryChunk* chunk = 742 MemoryChunk* chunk =
739 AllocateChunk(object_size, object_size, executable, owner); 743 AllocateChunk(object_size, object_size, executable, owner);
740 if (chunk == NULL) return NULL; 744 if (chunk == NULL) return NULL;
741 return LargePage::Initialize(isolate_->heap(), chunk); 745 return LargePage::Initialize(isolate_->heap(), chunk);
742 } 746 }
743 747
744 748
745 void MemoryAllocator::Free(MemoryChunk* chunk) { 749 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
750 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
746 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 751 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
747 if (chunk->owner() != NULL) { 752 if (chunk->owner() != NULL) {
748 ObjectSpace space = 753 ObjectSpace space =
749 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 754 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
750 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 755 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
751 } 756 }
752 757
753 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), 758 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
754 chunk->IsEvacuationCandidate()); 759 chunk->IsEvacuationCandidate());
755 760
756 delete chunk->slots_buffer(); 761 size_t size;
757 delete chunk->skip_list(); 762 base::VirtualMemory* reservation = chunk->reserved_memory();
758 delete chunk->mutex(); 763 if (reservation->IsReserved()) {
764 size = reservation->size();
765 } else {
766 size = chunk->size();
767 }
768 DCHECK(size_ >= size);
769 size_ -= size;
770 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
771
772 if (chunk->executable() == EXECUTABLE) {
773 DCHECK(size_executable_ >= size);
774 size_executable_ -= size;
775 }
776
777 chunk->SetFlag(MemoryChunk::PRE_FREED);
778 }
779
780
781 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
782 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
783 chunk->ReleaseAllocatedMemory();
759 784
760 base::VirtualMemory* reservation = chunk->reserved_memory(); 785 base::VirtualMemory* reservation = chunk->reserved_memory();
761 if (reservation->IsReserved()) { 786 if (reservation->IsReserved()) {
762 FreeMemory(reservation, chunk->executable()); 787 FreeMemory(reservation, chunk->executable());
763 } else { 788 } else {
764 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); 789 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
765 } 790 }
766 } 791 }
767 792
768 793
794 void MemoryAllocator::Free(MemoryChunk* chunk) {
795 PreFreeMemory(chunk);
796 PerformFreeMemory(chunk);
797 }
798
799
769 bool MemoryAllocator::CommitBlock(Address start, size_t size, 800 bool MemoryAllocator::CommitBlock(Address start, size_t size,
770 Executability executable) { 801 Executability executable) {
771 if (!CommitMemory(start, size, executable)) return false; 802 if (!CommitMemory(start, size, executable)) return false;
772 803
773 if (Heap::ShouldZapGarbage()) { 804 if (Heap::ShouldZapGarbage()) {
774 ZapBlock(start, size); 805 ZapBlock(start, size);
775 } 806 }
776 807
777 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 808 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
778 return true; 809 return true;
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
911 942
912 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { 943 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
913 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); 944 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
914 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 945 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
915 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 946 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
916 } 947 }
917 chunk->IncrementLiveBytes(by); 948 chunk->IncrementLiveBytes(by);
918 } 949 }
919 950
920 951
952 void MemoryChunk::ReleaseAllocatedMemory() {
953 delete slots_buffer_;
954 delete skip_list_;
955 delete mutex_;
956 }
957
958
921 // ----------------------------------------------------------------------------- 959 // -----------------------------------------------------------------------------
922 // PagedSpace implementation 960 // PagedSpace implementation
923 961
924 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == 962 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
925 ObjectSpace::kObjectSpaceNewSpace); 963 ObjectSpace::kObjectSpaceNewSpace);
926 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) == 964 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
927 ObjectSpace::kObjectSpaceOldSpace); 965 ObjectSpace::kObjectSpaceOldSpace);
928 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == 966 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
929 ObjectSpace::kObjectSpaceCodeSpace); 967 ObjectSpace::kObjectSpaceCodeSpace);
930 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == 968 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
1277 promoted_histogram_ = NULL; 1315 promoted_histogram_ = NULL;
1278 } 1316 }
1279 1317
1280 start_ = NULL; 1318 start_ = NULL;
1281 allocation_info_.set_top(NULL); 1319 allocation_info_.set_top(NULL);
1282 allocation_info_.set_limit(NULL); 1320 allocation_info_.set_limit(NULL);
1283 1321
1284 to_space_.TearDown(); 1322 to_space_.TearDown();
1285 from_space_.TearDown(); 1323 from_space_.TearDown();
1286 1324
1287 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 1325 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
1326 chunk_base_, &reservation_, NOT_EXECUTABLE);
1288 1327
1289 DCHECK(reservation_.IsReserved());
1290 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1291 NOT_EXECUTABLE);
1292 chunk_base_ = NULL; 1328 chunk_base_ = NULL;
1293 chunk_size_ = 0; 1329 chunk_size_ = 0;
1294 } 1330 }
1295 1331
1296 1332
1297 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } 1333 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1298 1334
1299 1335
1300 void NewSpace::Grow() { 1336 void NewSpace::Grow() {
1301 // Double the semispace size but only up to maximum capacity. 1337 // Double the semispace size but only up to maximum capacity.
(...skipping 1811 matching lines...) Expand 10 before | Expand all | Expand 10 after
3113 object->ShortPrint(); 3149 object->ShortPrint();
3114 PrintF("\n"); 3150 PrintF("\n");
3115 } 3151 }
3116 printf(" --------------------------------------\n"); 3152 printf(" --------------------------------------\n");
3117 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3153 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3118 } 3154 }
3119 3155
3120 #endif // DEBUG 3156 #endif // DEBUG
3121 } // namespace internal 3157 } // namespace internal
3122 } // namespace v8 3158 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698