Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
| 10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 70 return true; | 70 return true; |
| 71 } | 71 } |
| 72 | 72 |
| 73 | 73 |
| 74 // ----------------------------------------------------------------------------- | 74 // ----------------------------------------------------------------------------- |
| 75 // CodeRange | 75 // CodeRange |
| 76 | 76 |
| 77 | 77 |
| 78 CodeRange::CodeRange(Isolate* isolate) | 78 CodeRange::CodeRange(Isolate* isolate) |
| 79 : isolate_(isolate), | 79 : isolate_(isolate), |
| 80 code_range_(NULL), | 80 code_range_(NULL), |
|
Michael Lippautz
2015/08/24 13:43:12
add
free_list_mutex_(),
Hannes Payer (out of office)
2015/08/24 14:58:13
Why? The default constructor is called in this cas
| |
| 81 free_list_(0), | 81 free_list_(0), |
| 82 allocation_list_(0), | 82 allocation_list_(0), |
| 83 current_allocation_block_index_(0), | 83 current_allocation_block_index_(0), |
| 84 emergency_block_() {} | 84 emergency_block_() {} |
| 85 | 85 |
| 86 | 86 |
| 87 bool CodeRange::SetUp(size_t requested) { | 87 bool CodeRange::SetUp(size_t requested) { |
| 88 DCHECK(code_range_ == NULL); | 88 DCHECK(code_range_ == NULL); |
| 89 | 89 |
| 90 if (requested == 0) { | 90 if (requested == 0) { |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 156 | 156 |
| 157 bool CodeRange::GetNextAllocationBlock(size_t requested) { | 157 bool CodeRange::GetNextAllocationBlock(size_t requested) { |
| 158 for (current_allocation_block_index_++; | 158 for (current_allocation_block_index_++; |
| 159 current_allocation_block_index_ < allocation_list_.length(); | 159 current_allocation_block_index_ < allocation_list_.length(); |
| 160 current_allocation_block_index_++) { | 160 current_allocation_block_index_++) { |
| 161 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 161 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 162 return true; // Found a large enough allocation block. | 162 return true; // Found a large enough allocation block. |
| 163 } | 163 } |
| 164 } | 164 } |
| 165 | 165 |
| 166 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); | |
|
Michael Lippautz
2015/08/24 13:43:12
Please add a block to make clear which parts are t
Hannes Payer (out of office)
2015/08/24 14:58:13
I would have just locked the rest of the function,
| |
| 167 | |
| 166 // Sort and merge the free blocks on the free list and the allocation list. | 168 // Sort and merge the free blocks on the free list and the allocation list. |
| 167 free_list_.AddAll(allocation_list_); | 169 free_list_.AddAll(allocation_list_); |
| 168 allocation_list_.Clear(); | 170 allocation_list_.Clear(); |
| 169 free_list_.Sort(&CompareFreeBlockAddress); | 171 free_list_.Sort(&CompareFreeBlockAddress); |
| 170 for (int i = 0; i < free_list_.length();) { | 172 for (int i = 0; i < free_list_.length();) { |
| 171 FreeBlock merged = free_list_[i]; | 173 FreeBlock merged = free_list_[i]; |
| 172 i++; | 174 i++; |
| 173 // Add adjacent free blocks to the current merged block. | 175 // Add adjacent free blocks to the current merged block. |
| 174 while (i < free_list_.length() && | 176 while (i < free_list_.length() && |
| 175 free_list_[i].start == merged.start + merged.size) { | 177 free_list_[i].start == merged.start + merged.size) { |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 222 } | 224 } |
| 223 | 225 |
| 224 | 226 |
| 225 bool CodeRange::UncommitRawMemory(Address start, size_t length) { | 227 bool CodeRange::UncommitRawMemory(Address start, size_t length) { |
| 226 return code_range_->Uncommit(start, length); | 228 return code_range_->Uncommit(start, length); |
| 227 } | 229 } |
| 228 | 230 |
| 229 | 231 |
| 230 void CodeRange::FreeRawMemory(Address address, size_t length) { | 232 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 231 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); | 233 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 234 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); | |
| 232 free_list_.Add(FreeBlock(address, length)); | 235 free_list_.Add(FreeBlock(address, length)); |
| 233 code_range_->Uncommit(address, length); | 236 code_range_->Uncommit(address, length); |
| 234 } | 237 } |
| 235 | 238 |
| 236 | 239 |
| 237 void CodeRange::TearDown() { | 240 void CodeRange::TearDown() { |
| 238 delete code_range_; // Frees all memory in the virtual memory range. | 241 delete code_range_; // Frees all memory in the virtual memory range. |
| 239 code_range_ = NULL; | 242 code_range_ = NULL; |
| 243 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); | |
| 240 free_list_.Free(); | 244 free_list_.Free(); |
| 241 allocation_list_.Free(); | 245 allocation_list_.Free(); |
| 242 } | 246 } |
| 243 | 247 |
| 244 | 248 |
| 245 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { | 249 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { |
| 246 DCHECK(allocation_list_.length() == 0 || | 250 DCHECK(allocation_list_.length() == 0 || |
| 247 current_allocation_block_index_ < allocation_list_.length()); | 251 current_allocation_block_index_ < allocation_list_.length()); |
| 248 if (allocation_list_.length() == 0 || | 252 if (allocation_list_.length() == 0 || |
| 249 requested_size > allocation_list_[current_allocation_block_index_].size) { | 253 requested_size > allocation_list_[current_allocation_block_index_].size) { |
| 250 // Find an allocation block large enough. | 254 // Find an allocation block large enough. |
| 251 if (!GetNextAllocationBlock(requested_size)) return false; | 255 if (!GetNextAllocationBlock(requested_size)) return false; |
| 252 } | 256 } |
| 253 // Commit the requested memory at the start of the current allocation block. | 257 // Commit the requested memory at the start of the current allocation block. |
| 254 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); | 258 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 255 *block = allocation_list_[current_allocation_block_index_]; | 259 *block = allocation_list_[current_allocation_block_index_]; |
| 256 // Don't leave a small free block, useless for a large object or chunk. | 260 // Don't leave a small free block, useless for a large object or chunk. |
| 257 if (aligned_requested < (block->size - Page::kPageSize)) { | 261 if (aligned_requested < (block->size - Page::kPageSize)) { |
| 258 block->size = aligned_requested; | 262 block->size = aligned_requested; |
| 259 } | 263 } |
| 260 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); | 264 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); |
| 261 allocation_list_[current_allocation_block_index_].start += block->size; | 265 allocation_list_[current_allocation_block_index_].start += block->size; |
| 262 allocation_list_[current_allocation_block_index_].size -= block->size; | 266 allocation_list_[current_allocation_block_index_].size -= block->size; |
| 263 return true; | 267 return true; |
| 264 } | 268 } |
| 265 | 269 |
| 266 | 270 |
| 267 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); } | 271 void CodeRange::ReleaseBlock(const FreeBlock* block) { |
| 272 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); | |
| 273 free_list_.Add(*block); | |
| 274 } | |
| 268 | 275 |
| 269 | 276 |
| 270 void CodeRange::ReserveEmergencyBlock() { | 277 void CodeRange::ReserveEmergencyBlock() { |
| 271 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); | 278 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); |
| 272 if (emergency_block_.size == 0) { | 279 if (emergency_block_.size == 0) { |
| 273 ReserveBlock(requested_size, &emergency_block_); | 280 ReserveBlock(requested_size, &emergency_block_); |
| 274 } else { | 281 } else { |
| 275 DCHECK(emergency_block_.size >= requested_size); | 282 DCHECK(emergency_block_.size >= requested_size); |
| 276 } | 283 } |
| 277 } | 284 } |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 325 Executability executable) { | 332 Executability executable) { |
| 326 if (!base::VirtualMemory::CommitRegion(base, size, | 333 if (!base::VirtualMemory::CommitRegion(base, size, |
| 327 executable == EXECUTABLE)) { | 334 executable == EXECUTABLE)) { |
| 328 return false; | 335 return false; |
| 329 } | 336 } |
| 330 UpdateAllocatedSpaceLimits(base, base + size); | 337 UpdateAllocatedSpaceLimits(base, base + size); |
| 331 return true; | 338 return true; |
| 332 } | 339 } |
| 333 | 340 |
| 334 | 341 |
| 342 void MemoryAllocator::FreeNewSpaceMemory(Address addr, | |
| 343 base::VirtualMemory* reservation, | |
| 344 Executability executable) { | |
| 345 LOG(isolate_, DeleteEvent("InitialChunk", addr)); | |
|
Michael Lippautz
2015/08/24 13:43:12
"InitialChunk"?
Hannes Payer (out of office)
2015/08/24 14:58:13
This was the original string. We can change it to
| |
| 346 | |
| 347 DCHECK(reservation->IsReserved()); | |
| 348 size_t size = reservation->size(); | |
|
Michael Lippautz
2015/08/24 13:43:12
const size_t size = ..
Hannes Payer (out of office)
2015/08/24 14:58:13
Done.
| |
| 349 DCHECK(size_ >= size); | |
| 350 size_ -= size; | |
| 351 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
| 352 FreeMemory(reservation, NOT_EXECUTABLE); | |
| 353 } | |
| 354 | |
| 355 | |
| 335 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 356 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
| 336 Executability executable) { | 357 Executability executable) { |
| 337 // TODO(gc) make code_range part of memory allocator? | 358 // TODO(gc) make code_range part of memory allocator? |
| 338 DCHECK(reservation->IsReserved()); | |
| 339 size_t size = reservation->size(); | |
| 340 DCHECK(size_ >= size); | |
| 341 size_ -= size; | |
| 342 | |
| 343 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
| 344 | |
| 345 if (executable == EXECUTABLE) { | |
| 346 DCHECK(size_executable_ >= size); | |
| 347 size_executable_ -= size; | |
| 348 } | |
| 349 // Code which is part of the code-range does not have its own VirtualMemory. | 359 // Code which is part of the code-range does not have its own VirtualMemory. |
| 350 DCHECK(isolate_->code_range() == NULL || | 360 DCHECK(isolate_->code_range() == NULL || |
| 351 !isolate_->code_range()->contains( | 361 !isolate_->code_range()->contains( |
| 352 static_cast<Address>(reservation->address()))); | 362 static_cast<Address>(reservation->address()))); |
| 353 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || | 363 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 354 !isolate_->code_range()->valid() || size <= Page::kPageSize); | 364 !isolate_->code_range()->valid() || |
| 365 reservation->size() <= Page::kPageSize); | |
| 355 | 366 |
| 356 reservation->Release(); | 367 reservation->Release(); |
| 357 } | 368 } |
| 358 | 369 |
| 359 | 370 |
| 360 void MemoryAllocator::FreeMemory(Address base, size_t size, | 371 void MemoryAllocator::FreeMemory(Address base, size_t size, |
| 361 Executability executable) { | 372 Executability executable) { |
| 362 // TODO(gc) make code_range part of memory allocator? | 373 // TODO(gc) make code_range part of memory allocator? |
| 363 DCHECK(size_ >= size); | |
| 364 size_ -= size; | |
| 365 | |
| 366 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
| 367 | |
| 368 if (executable == EXECUTABLE) { | |
| 369 DCHECK(size_executable_ >= size); | |
| 370 size_executable_ -= size; | |
| 371 } | |
| 372 if (isolate_->code_range() != NULL && | 374 if (isolate_->code_range() != NULL && |
| 373 isolate_->code_range()->contains(static_cast<Address>(base))) { | 375 isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 374 DCHECK(executable == EXECUTABLE); | 376 DCHECK(executable == EXECUTABLE); |
| 375 isolate_->code_range()->FreeRawMemory(base, size); | 377 isolate_->code_range()->FreeRawMemory(base, size); |
| 376 } else { | 378 } else { |
| 377 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || | 379 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 378 !isolate_->code_range()->valid()); | 380 !isolate_->code_range()->valid()); |
| 379 bool result = base::VirtualMemory::ReleaseRegion(base, size); | 381 bool result = base::VirtualMemory::ReleaseRegion(base, size); |
| 380 USE(result); | 382 USE(result); |
| 381 DCHECK(result); | 383 DCHECK(result); |
| (...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 735 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 737 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 736 Space* owner, | 738 Space* owner, |
| 737 Executability executable) { | 739 Executability executable) { |
| 738 MemoryChunk* chunk = | 740 MemoryChunk* chunk = |
| 739 AllocateChunk(object_size, object_size, executable, owner); | 741 AllocateChunk(object_size, object_size, executable, owner); |
| 740 if (chunk == NULL) return NULL; | 742 if (chunk == NULL) return NULL; |
| 741 return LargePage::Initialize(isolate_->heap(), chunk); | 743 return LargePage::Initialize(isolate_->heap(), chunk); |
| 742 } | 744 } |
| 743 | 745 |
| 744 | 746 |
| 745 void MemoryAllocator::Free(MemoryChunk* chunk) { | 747 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
|
Michael Lippautz
2015/08/24 13:43:12
Should we add a flag to {MemoryChunk} indicating t
Hannes Payer (out of office)
2015/08/24 14:58:13
Good idea. Done.
| |
| 746 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 748 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 747 if (chunk->owner() != NULL) { | 749 if (chunk->owner() != NULL) { |
| 748 ObjectSpace space = | 750 ObjectSpace space = |
| 749 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 751 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 750 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 752 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 751 } | 753 } |
| 752 | 754 |
| 753 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 755 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 754 chunk->IsEvacuationCandidate()); | 756 chunk->IsEvacuationCandidate()); |
| 755 | 757 |
| 758 size_t size; | |
| 759 base::VirtualMemory* reservation = chunk->reserved_memory(); | |
| 760 if (reservation->IsReserved()) { | |
| 761 size = reservation->size(); | |
| 762 } else { | |
| 763 size = chunk->size(); | |
| 764 } | |
| 765 DCHECK(size_ >= size); | |
| 766 size_ -= size; | |
| 767 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
| 768 | |
| 769 if (chunk->executable() == EXECUTABLE) { | |
| 770 DCHECK(size_executable_ >= size); | |
| 771 size_executable_ -= size; | |
| 772 } | |
| 773 } | |
| 774 | |
| 775 | |
| 776 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { | |
|
Michael Lippautz
2015/08/24 13:43:12
We could make this a static on {MemoryChunk}.
{M
Hannes Payer (out of office)
2015/08/24 14:58:13
As discussed offline, this method should not be st
| |
| 756 delete chunk->slots_buffer(); | 777 delete chunk->slots_buffer(); |
| 757 delete chunk->skip_list(); | 778 delete chunk->skip_list(); |
| 758 delete chunk->mutex(); | 779 delete chunk->mutex(); |
| 759 | 780 |
| 760 base::VirtualMemory* reservation = chunk->reserved_memory(); | 781 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 761 if (reservation->IsReserved()) { | 782 if (reservation->IsReserved()) { |
| 762 FreeMemory(reservation, chunk->executable()); | 783 FreeMemory(reservation, chunk->executable()); |
| 763 } else { | 784 } else { |
| 764 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); | 785 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
| 765 } | 786 } |
| 766 } | 787 } |
| 767 | 788 |
| 768 | 789 |
| 790 void MemoryAllocator::Free(MemoryChunk* chunk) { | |
| 791 PreFreeMemory(chunk); | |
| 792 PerformFreeMemory(chunk); | |
| 793 } | |
| 794 | |
| 795 | |
| 769 bool MemoryAllocator::CommitBlock(Address start, size_t size, | 796 bool MemoryAllocator::CommitBlock(Address start, size_t size, |
| 770 Executability executable) { | 797 Executability executable) { |
| 771 if (!CommitMemory(start, size, executable)) return false; | 798 if (!CommitMemory(start, size, executable)) return false; |
| 772 | 799 |
| 773 if (Heap::ShouldZapGarbage()) { | 800 if (Heap::ShouldZapGarbage()) { |
| 774 ZapBlock(start, size); | 801 ZapBlock(start, size); |
| 775 } | 802 } |
| 776 | 803 |
| 777 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | 804 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| 778 return true; | 805 return true; |
| (...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1277 promoted_histogram_ = NULL; | 1304 promoted_histogram_ = NULL; |
| 1278 } | 1305 } |
| 1279 | 1306 |
| 1280 start_ = NULL; | 1307 start_ = NULL; |
| 1281 allocation_info_.set_top(NULL); | 1308 allocation_info_.set_top(NULL); |
| 1282 allocation_info_.set_limit(NULL); | 1309 allocation_info_.set_limit(NULL); |
| 1283 | 1310 |
| 1284 to_space_.TearDown(); | 1311 to_space_.TearDown(); |
| 1285 from_space_.TearDown(); | 1312 from_space_.TearDown(); |
| 1286 | 1313 |
| 1287 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); | 1314 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory( |
| 1315 chunk_base_, &reservation_, NOT_EXECUTABLE); | |
| 1288 | 1316 |
| 1289 DCHECK(reservation_.IsReserved()); | |
| 1290 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | |
| 1291 NOT_EXECUTABLE); | |
| 1292 chunk_base_ = NULL; | 1317 chunk_base_ = NULL; |
| 1293 chunk_size_ = 0; | 1318 chunk_size_ = 0; |
| 1294 } | 1319 } |
| 1295 | 1320 |
| 1296 | 1321 |
| 1297 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } | 1322 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
| 1298 | 1323 |
| 1299 | 1324 |
| 1300 void NewSpace::Grow() { | 1325 void NewSpace::Grow() { |
| 1301 // Double the semispace size but only up to maximum capacity. | 1326 // Double the semispace size but only up to maximum capacity. |
| (...skipping 1811 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3113 object->ShortPrint(); | 3138 object->ShortPrint(); |
| 3114 PrintF("\n"); | 3139 PrintF("\n"); |
| 3115 } | 3140 } |
| 3116 printf(" --------------------------------------\n"); | 3141 printf(" --------------------------------------\n"); |
| 3117 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3142 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3118 } | 3143 } |
| 3119 | 3144 |
| 3120 #endif // DEBUG | 3145 #endif // DEBUG |
| 3121 } // namespace internal | 3146 } // namespace internal |
| 3122 } // namespace v8 | 3147 } // namespace v8 |
| OLD | NEW |