| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/platform/platform.h" | 7 #include "src/base/platform/platform.h" |
| 8 #include "src/full-codegen.h" | 8 #include "src/full-codegen.h" |
| 9 #include "src/heap/mark-compact.h" |
| 9 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
| 10 #include "src/mark-compact.h" | |
| 11 #include "src/msan.h" | 11 #include "src/msan.h" |
| 12 | 12 |
| 13 namespace v8 { | 13 namespace v8 { |
| 14 namespace internal { | 14 namespace internal { |
| 15 | 15 |
| 16 | 16 |
| 17 // ---------------------------------------------------------------------------- | 17 // ---------------------------------------------------------------------------- |
| 18 // HeapObjectIterator | 18 // HeapObjectIterator |
| 19 | 19 |
| 20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 21 // You can't actually iterate over the anchor page. It is not a real page, | 21 // You can't actually iterate over the anchor page. It is not a real page, |
| 22 // just an anchor for the double linked page list. Initialize as if we have | 22 // just an anchor for the double linked page list. Initialize as if we have |
| 23 // reached the end of the anchor page, then the first iteration will move on | 23 // reached the end of the anchor page, then the first iteration will move on |
| 24 // to the first page. | 24 // to the first page. |
| 25 Initialize(space, | 25 Initialize(space, NULL, NULL, kAllPagesInSpace, NULL); |
| 26 NULL, | |
| 27 NULL, | |
| 28 kAllPagesInSpace, | |
| 29 NULL); | |
| 30 } | 26 } |
| 31 | 27 |
| 32 | 28 |
| 33 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 29 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 34 HeapObjectCallback size_func) { | 30 HeapObjectCallback size_func) { |
| 35 // You can't actually iterate over the anchor page. It is not a real page, | 31 // You can't actually iterate over the anchor page. It is not a real page, |
| 36 // just an anchor for the double linked page list. Initialize the current | 32 // just an anchor for the double linked page list. Initialize the current |
| 37 // address and end as NULL, then the first iteration will move on | 33 // address and end as NULL, then the first iteration will move on |
| 38 // to the first page. | 34 // to the first page. |
| 39 Initialize(space, | 35 Initialize(space, NULL, NULL, kAllPagesInSpace, size_func); |
| 40 NULL, | |
| 41 NULL, | |
| 42 kAllPagesInSpace, | |
| 43 size_func); | |
| 44 } | 36 } |
| 45 | 37 |
| 46 | 38 |
| 47 HeapObjectIterator::HeapObjectIterator(Page* page, | 39 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 48 HeapObjectCallback size_func) { | 40 HeapObjectCallback size_func) { |
| 49 Space* owner = page->owner(); | 41 Space* owner = page->owner(); |
| 50 DCHECK(owner == page->heap()->old_pointer_space() || | 42 DCHECK(owner == page->heap()->old_pointer_space() || |
| 51 owner == page->heap()->old_data_space() || | 43 owner == page->heap()->old_data_space() || |
| 52 owner == page->heap()->map_space() || | 44 owner == page->heap()->map_space() || |
| 53 owner == page->heap()->cell_space() || | 45 owner == page->heap()->cell_space() || |
| 54 owner == page->heap()->property_cell_space() || | 46 owner == page->heap()->property_cell_space() || |
| 55 owner == page->heap()->code_space()); | 47 owner == page->heap()->code_space()); |
| 56 Initialize(reinterpret_cast<PagedSpace*>(owner), | 48 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), |
| 57 page->area_start(), | 49 page->area_end(), kOnePageOnly, size_func); |
| 58 page->area_end(), | |
| 59 kOnePageOnly, | |
| 60 size_func); | |
| 61 DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted()); | 50 DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted()); |
| 62 } | 51 } |
| 63 | 52 |
| 64 | 53 |
| 65 void HeapObjectIterator::Initialize(PagedSpace* space, | 54 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, |
| 66 Address cur, Address end, | |
| 67 HeapObjectIterator::PageMode mode, | 55 HeapObjectIterator::PageMode mode, |
| 68 HeapObjectCallback size_f) { | 56 HeapObjectCallback size_f) { |
| 69 // Check that we actually can iterate this space. | 57 // Check that we actually can iterate this space. |
| 70 DCHECK(space->swept_precisely()); | 58 DCHECK(space->swept_precisely()); |
| 71 | 59 |
| 72 space_ = space; | 60 space_ = space; |
| 73 cur_addr_ = cur; | 61 cur_addr_ = cur; |
| 74 cur_end_ = end; | 62 cur_end_ = end; |
| 75 page_mode_ = mode; | 63 page_mode_ = mode; |
| 76 size_func_ = size_f; | 64 size_func_ = size_f; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 100 | 88 |
| 101 // ----------------------------------------------------------------------------- | 89 // ----------------------------------------------------------------------------- |
| 102 // CodeRange | 90 // CodeRange |
| 103 | 91 |
| 104 | 92 |
| 105 CodeRange::CodeRange(Isolate* isolate) | 93 CodeRange::CodeRange(Isolate* isolate) |
| 106 : isolate_(isolate), | 94 : isolate_(isolate), |
| 107 code_range_(NULL), | 95 code_range_(NULL), |
| 108 free_list_(0), | 96 free_list_(0), |
| 109 allocation_list_(0), | 97 allocation_list_(0), |
| 110 current_allocation_block_index_(0) { | 98 current_allocation_block_index_(0) {} |
| 111 } | |
| 112 | 99 |
| 113 | 100 |
| 114 bool CodeRange::SetUp(size_t requested) { | 101 bool CodeRange::SetUp(size_t requested) { |
| 115 DCHECK(code_range_ == NULL); | 102 DCHECK(code_range_ == NULL); |
| 116 | 103 |
| 117 if (requested == 0) { | 104 if (requested == 0) { |
| 118 // When a target requires the code range feature, we put all code objects | 105 // When a target requires the code range feature, we put all code objects |
| 119 // in a kMaximalCodeRangeSize range of virtual address space, so that | 106 // in a kMaximalCodeRangeSize range of virtual address space, so that |
| 120 // they can call each other with near calls. | 107 // they can call each other with near calls. |
| 121 if (kRequiresCodeRange) { | 108 if (kRequiresCodeRange) { |
| 122 requested = kMaximalCodeRangeSize; | 109 requested = kMaximalCodeRangeSize; |
| 123 } else { | 110 } else { |
| 124 return true; | 111 return true; |
| 125 } | 112 } |
| 126 } | 113 } |
| 127 | 114 |
| 128 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); | 115 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); |
| 129 code_range_ = new base::VirtualMemory(requested); | 116 code_range_ = new base::VirtualMemory(requested); |
| 130 CHECK(code_range_ != NULL); | 117 CHECK(code_range_ != NULL); |
| 131 if (!code_range_->IsReserved()) { | 118 if (!code_range_->IsReserved()) { |
| 132 delete code_range_; | 119 delete code_range_; |
| 133 code_range_ = NULL; | 120 code_range_ = NULL; |
| 134 return false; | 121 return false; |
| 135 } | 122 } |
| 136 | 123 |
| 137 // We are sure that we have mapped a block of requested addresses. | 124 // We are sure that we have mapped a block of requested addresses. |
| 138 DCHECK(code_range_->size() == requested); | 125 DCHECK(code_range_->size() == requested); |
| 139 LOG(isolate_, | 126 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 140 NewEvent("CodeRange", code_range_->address(), requested)); | |
| 141 Address base = reinterpret_cast<Address>(code_range_->address()); | 127 Address base = reinterpret_cast<Address>(code_range_->address()); |
| 142 Address aligned_base = | 128 Address aligned_base = |
| 143 RoundUp(reinterpret_cast<Address>(code_range_->address()), | 129 RoundUp(reinterpret_cast<Address>(code_range_->address()), |
| 144 MemoryChunk::kAlignment); | 130 MemoryChunk::kAlignment); |
| 145 size_t size = code_range_->size() - (aligned_base - base); | 131 size_t size = code_range_->size() - (aligned_base - base); |
| 146 allocation_list_.Add(FreeBlock(aligned_base, size)); | 132 allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 147 current_allocation_block_index_ = 0; | 133 current_allocation_block_index_ = 0; |
| 148 return true; | 134 return true; |
| 149 } | 135 } |
| 150 | 136 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); | 198 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 213 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 199 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 214 if (aligned_requested >= (current.size - Page::kPageSize)) { | 200 if (aligned_requested >= (current.size - Page::kPageSize)) { |
| 215 // Don't leave a small free block, useless for a large object or chunk. | 201 // Don't leave a small free block, useless for a large object or chunk. |
| 216 *allocated = current.size; | 202 *allocated = current.size; |
| 217 } else { | 203 } else { |
| 218 *allocated = aligned_requested; | 204 *allocated = aligned_requested; |
| 219 } | 205 } |
| 220 DCHECK(*allocated <= current.size); | 206 DCHECK(*allocated <= current.size); |
| 221 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 207 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, | 208 if (!isolate_->memory_allocator()->CommitExecutableMemory( |
| 223 current.start, | 209 code_range_, current.start, commit_size, *allocated)) { |
| 224 commit_size, | |
| 225 *allocated)) { | |
| 226 *allocated = 0; | 210 *allocated = 0; |
| 227 return NULL; | 211 return NULL; |
| 228 } | 212 } |
| 229 allocation_list_[current_allocation_block_index_].start += *allocated; | 213 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 230 allocation_list_[current_allocation_block_index_].size -= *allocated; | 214 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 231 if (*allocated == current.size) { | 215 if (*allocated == current.size) { |
| 232 // This block is used up, get the next one. | 216 // This block is used up, get the next one. |
| 233 if (!GetNextAllocationBlock(0)) return NULL; | 217 if (!GetNextAllocationBlock(0)) return NULL; |
| 234 } | 218 } |
| 235 return current.start; | 219 return current.start; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 247 | 231 |
| 248 | 232 |
| 249 void CodeRange::FreeRawMemory(Address address, size_t length) { | 233 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 250 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); | 234 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 251 free_list_.Add(FreeBlock(address, length)); | 235 free_list_.Add(FreeBlock(address, length)); |
| 252 code_range_->Uncommit(address, length); | 236 code_range_->Uncommit(address, length); |
| 253 } | 237 } |
| 254 | 238 |
| 255 | 239 |
| 256 void CodeRange::TearDown() { | 240 void CodeRange::TearDown() { |
| 257 delete code_range_; // Frees all memory in the virtual memory range. | 241 delete code_range_; // Frees all memory in the virtual memory range. |
| 258 code_range_ = NULL; | 242 code_range_ = NULL; |
| 259 free_list_.Free(); | 243 free_list_.Free(); |
| 260 allocation_list_.Free(); | 244 allocation_list_.Free(); |
| 261 } | 245 } |
| 262 | 246 |
| 263 | 247 |
| 264 // ----------------------------------------------------------------------------- | 248 // ----------------------------------------------------------------------------- |
| 265 // MemoryAllocator | 249 // MemoryAllocator |
| 266 // | 250 // |
| 267 | 251 |
| 268 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 252 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 269 : isolate_(isolate), | 253 : isolate_(isolate), |
| 270 capacity_(0), | 254 capacity_(0), |
| 271 capacity_executable_(0), | 255 capacity_executable_(0), |
| 272 size_(0), | 256 size_(0), |
| 273 size_executable_(0), | 257 size_executable_(0), |
| 274 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), | 258 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
| 275 highest_ever_allocated_(reinterpret_cast<void*>(0)) { | 259 highest_ever_allocated_(reinterpret_cast<void*>(0)) {} |
| 276 } | |
| 277 | 260 |
| 278 | 261 |
| 279 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | 262 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 280 capacity_ = RoundUp(capacity, Page::kPageSize); | 263 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 264 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 282 DCHECK_GE(capacity_, capacity_executable_); | 265 DCHECK_GE(capacity_, capacity_executable_); |
| 283 | 266 |
| 284 size_ = 0; | 267 size_ = 0; |
| 285 size_executable_ = 0; | 268 size_executable_ = 0; |
| 286 | 269 |
| 287 return true; | 270 return true; |
| 288 } | 271 } |
| 289 | 272 |
| 290 | 273 |
| 291 void MemoryAllocator::TearDown() { | 274 void MemoryAllocator::TearDown() { |
| 292 // Check that spaces were torn down before MemoryAllocator. | 275 // Check that spaces were torn down before MemoryAllocator. |
| 293 DCHECK(size_ == 0); | 276 DCHECK(size_ == 0); |
| 294 // TODO(gc) this will be true again when we fix FreeMemory. | 277 // TODO(gc) this will be true again when we fix FreeMemory. |
| 295 // DCHECK(size_executable_ == 0); | 278 // DCHECK(size_executable_ == 0); |
| 296 capacity_ = 0; | 279 capacity_ = 0; |
| 297 capacity_executable_ = 0; | 280 capacity_executable_ = 0; |
| 298 } | 281 } |
| 299 | 282 |
| 300 | 283 |
| 301 bool MemoryAllocator::CommitMemory(Address base, | 284 bool MemoryAllocator::CommitMemory(Address base, size_t size, |
| 302 size_t size, | |
| 303 Executability executable) { | 285 Executability executable) { |
| 304 if (!base::VirtualMemory::CommitRegion(base, size, | 286 if (!base::VirtualMemory::CommitRegion(base, size, |
| 305 executable == EXECUTABLE)) { | 287 executable == EXECUTABLE)) { |
| 306 return false; | 288 return false; |
| 307 } | 289 } |
| 308 UpdateAllocatedSpaceLimits(base, base + size); | 290 UpdateAllocatedSpaceLimits(base, base + size); |
| 309 return true; | 291 return true; |
| 310 } | 292 } |
| 311 | 293 |
| 312 | 294 |
| 313 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 295 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
| 314 Executability executable) { | 296 Executability executable) { |
| 315 // TODO(gc) make code_range part of memory allocator? | 297 // TODO(gc) make code_range part of memory allocator? |
| 316 DCHECK(reservation->IsReserved()); | 298 DCHECK(reservation->IsReserved()); |
| 317 size_t size = reservation->size(); | 299 size_t size = reservation->size(); |
| 318 DCHECK(size_ >= size); | 300 DCHECK(size_ >= size); |
| 319 size_ -= size; | 301 size_ -= size; |
| 320 | 302 |
| 321 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 303 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 322 | 304 |
| 323 if (executable == EXECUTABLE) { | 305 if (executable == EXECUTABLE) { |
| 324 DCHECK(size_executable_ >= size); | 306 DCHECK(size_executable_ >= size); |
| 325 size_executable_ -= size; | 307 size_executable_ -= size; |
| 326 } | 308 } |
| 327 // Code which is part of the code-range does not have its own VirtualMemory. | 309 // Code which is part of the code-range does not have its own VirtualMemory. |
| 328 DCHECK(isolate_->code_range() == NULL || | 310 DCHECK(isolate_->code_range() == NULL || |
| 329 !isolate_->code_range()->contains( | 311 !isolate_->code_range()->contains( |
| 330 static_cast<Address>(reservation->address()))); | 312 static_cast<Address>(reservation->address()))); |
| 331 DCHECK(executable == NOT_EXECUTABLE || | 313 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 332 isolate_->code_range() == NULL || | |
| 333 !isolate_->code_range()->valid()); | 314 !isolate_->code_range()->valid()); |
| 334 reservation->Release(); | 315 reservation->Release(); |
| 335 } | 316 } |
| 336 | 317 |
| 337 | 318 |
| 338 void MemoryAllocator::FreeMemory(Address base, | 319 void MemoryAllocator::FreeMemory(Address base, size_t size, |
| 339 size_t size, | |
| 340 Executability executable) { | 320 Executability executable) { |
| 341 // TODO(gc) make code_range part of memory allocator? | 321 // TODO(gc) make code_range part of memory allocator? |
| 342 DCHECK(size_ >= size); | 322 DCHECK(size_ >= size); |
| 343 size_ -= size; | 323 size_ -= size; |
| 344 | 324 |
| 345 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 325 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 346 | 326 |
| 347 if (executable == EXECUTABLE) { | 327 if (executable == EXECUTABLE) { |
| 348 DCHECK(size_executable_ >= size); | 328 DCHECK(size_executable_ >= size); |
| 349 size_executable_ -= size; | 329 size_executable_ -= size; |
| 350 } | 330 } |
| 351 if (isolate_->code_range() != NULL && | 331 if (isolate_->code_range() != NULL && |
| 352 isolate_->code_range()->contains(static_cast<Address>(base))) { | 332 isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 353 DCHECK(executable == EXECUTABLE); | 333 DCHECK(executable == EXECUTABLE); |
| 354 isolate_->code_range()->FreeRawMemory(base, size); | 334 isolate_->code_range()->FreeRawMemory(base, size); |
| 355 } else { | 335 } else { |
| 356 DCHECK(executable == NOT_EXECUTABLE || | 336 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 357 isolate_->code_range() == NULL || | |
| 358 !isolate_->code_range()->valid()); | 337 !isolate_->code_range()->valid()); |
| 359 bool result = base::VirtualMemory::ReleaseRegion(base, size); | 338 bool result = base::VirtualMemory::ReleaseRegion(base, size); |
| 360 USE(result); | 339 USE(result); |
| 361 DCHECK(result); | 340 DCHECK(result); |
| 362 } | 341 } |
| 363 } | 342 } |
| 364 | 343 |
| 365 | 344 |
| 366 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 345 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
| 367 size_t alignment, | |
| 368 base::VirtualMemory* controller) { | 346 base::VirtualMemory* controller) { |
| 369 base::VirtualMemory reservation(size, alignment); | 347 base::VirtualMemory reservation(size, alignment); |
| 370 | 348 |
| 371 if (!reservation.IsReserved()) return NULL; | 349 if (!reservation.IsReserved()) return NULL; |
| 372 size_ += reservation.size(); | 350 size_ += reservation.size(); |
| 373 Address base = RoundUp(static_cast<Address>(reservation.address()), | 351 Address base = |
| 374 alignment); | 352 RoundUp(static_cast<Address>(reservation.address()), alignment); |
| 375 controller->TakeControl(&reservation); | 353 controller->TakeControl(&reservation); |
| 376 return base; | 354 return base; |
| 377 } | 355 } |
| 378 | 356 |
| 379 | 357 |
| 380 Address MemoryAllocator::AllocateAlignedMemory( | 358 Address MemoryAllocator::AllocateAlignedMemory( |
| 381 size_t reserve_size, size_t commit_size, size_t alignment, | 359 size_t reserve_size, size_t commit_size, size_t alignment, |
| 382 Executability executable, base::VirtualMemory* controller) { | 360 Executability executable, base::VirtualMemory* controller) { |
| 383 DCHECK(commit_size <= reserve_size); | 361 DCHECK(commit_size <= reserve_size); |
| 384 base::VirtualMemory reservation; | 362 base::VirtualMemory reservation; |
| 385 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); | 363 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); |
| 386 if (base == NULL) return NULL; | 364 if (base == NULL) return NULL; |
| 387 | 365 |
| 388 if (executable == EXECUTABLE) { | 366 if (executable == EXECUTABLE) { |
| 389 if (!CommitExecutableMemory(&reservation, | 367 if (!CommitExecutableMemory(&reservation, base, commit_size, |
| 390 base, | |
| 391 commit_size, | |
| 392 reserve_size)) { | 368 reserve_size)) { |
| 393 base = NULL; | 369 base = NULL; |
| 394 } | 370 } |
| 395 } else { | 371 } else { |
| 396 if (reservation.Commit(base, commit_size, false)) { | 372 if (reservation.Commit(base, commit_size, false)) { |
| 397 UpdateAllocatedSpaceLimits(base, base + commit_size); | 373 UpdateAllocatedSpaceLimits(base, base + commit_size); |
| 398 } else { | 374 } else { |
| 399 base = NULL; | 375 base = NULL; |
| 400 } | 376 } |
| 401 } | 377 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 412 } | 388 } |
| 413 | 389 |
| 414 | 390 |
| 415 void Page::InitializeAsAnchor(PagedSpace* owner) { | 391 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 416 set_owner(owner); | 392 set_owner(owner); |
| 417 set_prev_page(this); | 393 set_prev_page(this); |
| 418 set_next_page(this); | 394 set_next_page(this); |
| 419 } | 395 } |
| 420 | 396 |
| 421 | 397 |
| 422 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 398 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, |
| 423 Address start, | |
| 424 SemiSpace* semi_space) { | 399 SemiSpace* semi_space) { |
| 425 Address area_start = start + NewSpacePage::kObjectStartOffset; | 400 Address area_start = start + NewSpacePage::kObjectStartOffset; |
| 426 Address area_end = start + Page::kPageSize; | 401 Address area_end = start + Page::kPageSize; |
| 427 | 402 |
| 428 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 403 MemoryChunk* chunk = |
| 429 start, | 404 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start, |
| 430 Page::kPageSize, | 405 area_end, NOT_EXECUTABLE, semi_space); |
| 431 area_start, | |
| 432 area_end, | |
| 433 NOT_EXECUTABLE, | |
| 434 semi_space); | |
| 435 chunk->set_next_chunk(NULL); | 406 chunk->set_next_chunk(NULL); |
| 436 chunk->set_prev_chunk(NULL); | 407 chunk->set_prev_chunk(NULL); |
| 437 chunk->initialize_scan_on_scavenge(true); | 408 chunk->initialize_scan_on_scavenge(true); |
| 438 bool in_to_space = (semi_space->id() != kFromSpace); | 409 bool in_to_space = (semi_space->id() != kFromSpace); |
| 439 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE | 410 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 440 : MemoryChunk::IN_FROM_SPACE); | 411 : MemoryChunk::IN_FROM_SPACE); |
| 441 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE | 412 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 442 : MemoryChunk::IN_TO_SPACE)); | 413 : MemoryChunk::IN_TO_SPACE)); |
| 443 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); | 414 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
| 444 heap->incremental_marking()->SetNewSpacePageFlags(page); | 415 heap->incremental_marking()->SetNewSpacePageFlags(page); |
| 445 return page; | 416 return page; |
| 446 } | 417 } |
| 447 | 418 |
| 448 | 419 |
| 449 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { | 420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 450 set_owner(semi_space); | 421 set_owner(semi_space); |
| 451 set_next_chunk(this); | 422 set_next_chunk(this); |
| 452 set_prev_chunk(this); | 423 set_prev_chunk(this); |
| 453 // Flags marks this invalid page as not being in new-space. | 424 // Flags marks this invalid page as not being in new-space. |
| 454 // All real new-space pages will be in new-space. | 425 // All real new-space pages will be in new-space. |
| 455 SetFlags(0, ~0); | 426 SetFlags(0, ~0); |
| 456 } | 427 } |
| 457 | 428 |
| 458 | 429 |
| 459 MemoryChunk* MemoryChunk::Initialize(Heap* heap, | 430 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, |
| 460 Address base, | 431 Address area_start, Address area_end, |
| 461 size_t size, | 432 Executability executable, Space* owner) { |
| 462 Address area_start, | |
| 463 Address area_end, | |
| 464 Executability executable, | |
| 465 Space* owner) { | |
| 466 MemoryChunk* chunk = FromAddress(base); | 433 MemoryChunk* chunk = FromAddress(base); |
| 467 | 434 |
| 468 DCHECK(base == chunk->address()); | 435 DCHECK(base == chunk->address()); |
| 469 | 436 |
| 470 chunk->heap_ = heap; | 437 chunk->heap_ = heap; |
| 471 chunk->size_ = size; | 438 chunk->size_ = size; |
| 472 chunk->area_start_ = area_start; | 439 chunk->area_start_ = area_start; |
| 473 chunk->area_end_ = area_end; | 440 chunk->area_end_ = area_end; |
| 474 chunk->flags_ = 0; | 441 chunk->flags_ = 0; |
| 475 chunk->set_owner(owner); | 442 chunk->set_owner(owner); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 500 if (owner == heap->old_data_space()) { | 467 if (owner == heap->old_data_space()) { |
| 501 chunk->SetFlag(CONTAINS_ONLY_DATA); | 468 chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 502 } | 469 } |
| 503 | 470 |
| 504 return chunk; | 471 return chunk; |
| 505 } | 472 } |
| 506 | 473 |
| 507 | 474 |
| 508 // Commit MemoryChunk area to the requested size. | 475 // Commit MemoryChunk area to the requested size. |
| 509 bool MemoryChunk::CommitArea(size_t requested) { | 476 bool MemoryChunk::CommitArea(size_t requested) { |
| 510 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? | 477 size_t guard_size = |
| 511 MemoryAllocator::CodePageGuardSize() : 0; | 478 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; |
| 512 size_t header_size = area_start() - address() - guard_size; | 479 size_t header_size = area_start() - address() - guard_size; |
| 513 size_t commit_size = | 480 size_t commit_size = |
| 514 RoundUp(header_size + requested, base::OS::CommitPageSize()); | 481 RoundUp(header_size + requested, base::OS::CommitPageSize()); |
| 515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), | 482 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
| 516 base::OS::CommitPageSize()); | 483 base::OS::CommitPageSize()); |
| 517 | 484 |
| 518 if (commit_size > committed_size) { | 485 if (commit_size > committed_size) { |
| 519 // Commit size should be less or equal than the reserved size. | 486 // Commit size should be less or equal than the reserved size. |
| 520 DCHECK(commit_size <= size() - 2 * guard_size); | 487 DCHECK(commit_size <= size() - 2 * guard_size); |
| 521 // Append the committed area. | 488 // Append the committed area. |
| 522 Address start = address() + committed_size + guard_size; | 489 Address start = address() + committed_size + guard_size; |
| 523 size_t length = commit_size - committed_size; | 490 size_t length = commit_size - committed_size; |
| 524 if (reservation_.IsReserved()) { | 491 if (reservation_.IsReserved()) { |
| 525 Executability executable = IsFlagSet(IS_EXECUTABLE) | 492 Executability executable = |
| 526 ? EXECUTABLE : NOT_EXECUTABLE; | 493 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 527 if (!heap()->isolate()->memory_allocator()->CommitMemory( | 494 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, |
| 528 start, length, executable)) { | 495 executable)) { |
| 529 return false; | 496 return false; |
| 530 } | 497 } |
| 531 } else { | 498 } else { |
| 532 CodeRange* code_range = heap_->isolate()->code_range(); | 499 CodeRange* code_range = heap_->isolate()->code_range(); |
| 533 DCHECK(code_range != NULL && code_range->valid() && | 500 DCHECK(code_range != NULL && code_range->valid() && |
| 534 IsFlagSet(IS_EXECUTABLE)); | 501 IsFlagSet(IS_EXECUTABLE)); |
| 535 if (!code_range->CommitRawMemory(start, length)) return false; | 502 if (!code_range->CommitRawMemory(start, length)) return false; |
| 536 } | 503 } |
| 537 | 504 |
| 538 if (Heap::ShouldZapGarbage()) { | 505 if (Heap::ShouldZapGarbage()) { |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 616 // | Area | | 583 // | Area | |
| 617 // +----------------------------+<- area_end_ (area_start + commit_area_size) | 584 // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| 618 // | Committed but not used | | 585 // | Committed but not used | |
| 619 // +----------------------------+<- aligned at OS page boundary | 586 // +----------------------------+<- aligned at OS page boundary |
| 620 // | Reserved but not committed | | 587 // | Reserved but not committed | |
| 621 // +----------------------------+<- base + chunk_size | 588 // +----------------------------+<- base + chunk_size |
| 622 // | 589 // |
| 623 | 590 |
| 624 if (executable == EXECUTABLE) { | 591 if (executable == EXECUTABLE) { |
| 625 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | 592 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
| 626 base::OS::CommitPageSize()) + CodePageGuardSize(); | 593 base::OS::CommitPageSize()) + |
| 594 CodePageGuardSize(); |
| 627 | 595 |
| 628 // Check executable memory limit. | 596 // Check executable memory limit. |
| 629 if (size_executable_ + chunk_size > capacity_executable_) { | 597 if (size_executable_ + chunk_size > capacity_executable_) { |
| 630 LOG(isolate_, | 598 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
| 631 StringEvent("MemoryAllocator::AllocateRawMemory", | 599 "V8 Executable Allocation capacity exceeded")); |
| 632 "V8 Executable Allocation capacity exceeded")); | |
| 633 return NULL; | 600 return NULL; |
| 634 } | 601 } |
| 635 | 602 |
| 636 // Size of header (not executable) plus area (executable). | 603 // Size of header (not executable) plus area (executable). |
| 637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 604 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
| 638 base::OS::CommitPageSize()); | 605 base::OS::CommitPageSize()); |
| 639 // Allocate executable memory either from code range or from the | 606 // Allocate executable memory either from code range or from the |
| 640 // OS. | 607 // OS. |
| 641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { | 608 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
| 642 base = isolate_->code_range()->AllocateRawMemory(chunk_size, | 609 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, |
| 643 commit_size, | |
| 644 &chunk_size); | 610 &chunk_size); |
| 645 DCHECK(IsAligned(reinterpret_cast<intptr_t>(base), | 611 DCHECK( |
| 646 MemoryChunk::kAlignment)); | 612 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
| 647 if (base == NULL) return NULL; | 613 if (base == NULL) return NULL; |
| 648 size_ += chunk_size; | 614 size_ += chunk_size; |
| 649 // Update executable memory size. | 615 // Update executable memory size. |
| 650 size_executable_ += chunk_size; | 616 size_executable_ += chunk_size; |
| 651 } else { | 617 } else { |
| 652 base = AllocateAlignedMemory(chunk_size, | 618 base = AllocateAlignedMemory(chunk_size, commit_size, |
| 653 commit_size, | 619 MemoryChunk::kAlignment, executable, |
| 654 MemoryChunk::kAlignment, | |
| 655 executable, | |
| 656 &reservation); | 620 &reservation); |
| 657 if (base == NULL) return NULL; | 621 if (base == NULL) return NULL; |
| 658 // Update executable memory size. | 622 // Update executable memory size. |
| 659 size_executable_ += reservation.size(); | 623 size_executable_ += reservation.size(); |
| 660 } | 624 } |
| 661 | 625 |
| 662 if (Heap::ShouldZapGarbage()) { | 626 if (Heap::ShouldZapGarbage()) { |
| 663 ZapBlock(base, CodePageGuardStartOffset()); | 627 ZapBlock(base, CodePageGuardStartOffset()); |
| 664 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | 628 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
| 665 } | 629 } |
| 666 | 630 |
| 667 area_start = base + CodePageAreaStartOffset(); | 631 area_start = base + CodePageAreaStartOffset(); |
| 668 area_end = area_start + commit_area_size; | 632 area_end = area_start + commit_area_size; |
| 669 } else { | 633 } else { |
| 670 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, | 634 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, |
| 671 base::OS::CommitPageSize()); | 635 base::OS::CommitPageSize()); |
| 672 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + | 636 size_t commit_size = |
| 673 commit_area_size, base::OS::CommitPageSize()); | 637 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, |
| 674 base = AllocateAlignedMemory(chunk_size, | 638 base::OS::CommitPageSize()); |
| 675 commit_size, | 639 base = |
| 676 MemoryChunk::kAlignment, | 640 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
| 677 executable, | 641 executable, &reservation); |
| 678 &reservation); | |
| 679 | 642 |
| 680 if (base == NULL) return NULL; | 643 if (base == NULL) return NULL; |
| 681 | 644 |
| 682 if (Heap::ShouldZapGarbage()) { | 645 if (Heap::ShouldZapGarbage()) { |
| 683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); | 646 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); |
| 684 } | 647 } |
| 685 | 648 |
| 686 area_start = base + Page::kObjectStartOffset; | 649 area_start = base + Page::kObjectStartOffset; |
| 687 area_end = area_start + commit_area_size; | 650 area_end = area_start + commit_area_size; |
| 688 } | 651 } |
| 689 | 652 |
| 690 // Use chunk_size for statistics and callbacks because we assume that they | 653 // Use chunk_size for statistics and callbacks because we assume that they |
| 691 // treat reserved but not-yet committed memory regions of chunks as allocated. | 654 // treat reserved but not-yet committed memory regions of chunks as allocated. |
| 692 isolate_->counters()->memory_allocated()-> | 655 isolate_->counters()->memory_allocated()->Increment( |
| 693 Increment(static_cast<int>(chunk_size)); | 656 static_cast<int>(chunk_size)); |
| 694 | 657 |
| 695 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 658 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 696 if (owner != NULL) { | 659 if (owner != NULL) { |
| 697 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 660 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 698 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 661 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 699 } | 662 } |
| 700 | 663 |
| 701 MemoryChunk* result = MemoryChunk::Initialize(heap, | 664 MemoryChunk* result = MemoryChunk::Initialize( |
| 702 base, | 665 heap, base, chunk_size, area_start, area_end, executable, owner); |
| 703 chunk_size, | |
| 704 area_start, | |
| 705 area_end, | |
| 706 executable, | |
| 707 owner); | |
| 708 result->set_reserved_memory(&reservation); | 666 result->set_reserved_memory(&reservation); |
| 709 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); | 667 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); |
| 710 return result; | 668 return result; |
| 711 } | 669 } |
| 712 | 670 |
| 713 | 671 |
| 714 void Page::ResetFreeListStatistics() { | 672 void Page::ResetFreeListStatistics() { |
| 715 non_available_small_blocks_ = 0; | 673 non_available_small_blocks_ = 0; |
| 716 available_in_small_free_list_ = 0; | 674 available_in_small_free_list_ = 0; |
| 717 available_in_medium_free_list_ = 0; | 675 available_in_medium_free_list_ = 0; |
| 718 available_in_large_free_list_ = 0; | 676 available_in_large_free_list_ = 0; |
| 719 available_in_huge_free_list_ = 0; | 677 available_in_huge_free_list_ = 0; |
| 720 } | 678 } |
| 721 | 679 |
| 722 | 680 |
| 723 Page* MemoryAllocator::AllocatePage(intptr_t size, | 681 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, |
| 724 PagedSpace* owner, | |
| 725 Executability executable) { | 682 Executability executable) { |
| 726 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); | 683 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| 727 | 684 |
| 728 if (chunk == NULL) return NULL; | 685 if (chunk == NULL) return NULL; |
| 729 | 686 |
| 730 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 687 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 731 } | 688 } |
| 732 | 689 |
| 733 | 690 |
| 734 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 691 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 735 Space* owner, | 692 Space* owner, |
| 736 Executability executable) { | 693 Executability executable) { |
| 737 MemoryChunk* chunk = AllocateChunk(object_size, | 694 MemoryChunk* chunk = |
| 738 object_size, | 695 AllocateChunk(object_size, object_size, executable, owner); |
| 739 executable, | |
| 740 owner); | |
| 741 if (chunk == NULL) return NULL; | 696 if (chunk == NULL) return NULL; |
| 742 return LargePage::Initialize(isolate_->heap(), chunk); | 697 return LargePage::Initialize(isolate_->heap(), chunk); |
| 743 } | 698 } |
| 744 | 699 |
| 745 | 700 |
| 746 void MemoryAllocator::Free(MemoryChunk* chunk) { | 701 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 747 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 702 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 748 if (chunk->owner() != NULL) { | 703 if (chunk->owner() != NULL) { |
| 749 ObjectSpace space = | 704 ObjectSpace space = |
| 750 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 705 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 706 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 752 } | 707 } |
| 753 | 708 |
| 754 isolate_->heap()->RememberUnmappedPage( | 709 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 755 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); | 710 chunk->IsEvacuationCandidate()); |
| 756 | 711 |
| 757 delete chunk->slots_buffer(); | 712 delete chunk->slots_buffer(); |
| 758 delete chunk->skip_list(); | 713 delete chunk->skip_list(); |
| 759 | 714 |
| 760 base::VirtualMemory* reservation = chunk->reserved_memory(); | 715 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 761 if (reservation->IsReserved()) { | 716 if (reservation->IsReserved()) { |
| 762 FreeMemory(reservation, chunk->executable()); | 717 FreeMemory(reservation, chunk->executable()); |
| 763 } else { | 718 } else { |
| 764 FreeMemory(chunk->address(), | 719 FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
| 765 chunk->size(), | |
| 766 chunk->executable()); | |
| 767 } | 720 } |
| 768 } | 721 } |
| 769 | 722 |
| 770 | 723 |
| 771 bool MemoryAllocator::CommitBlock(Address start, | 724 bool MemoryAllocator::CommitBlock(Address start, size_t size, |
| 772 size_t size, | |
| 773 Executability executable) { | 725 Executability executable) { |
| 774 if (!CommitMemory(start, size, executable)) return false; | 726 if (!CommitMemory(start, size, executable)) return false; |
| 775 | 727 |
| 776 if (Heap::ShouldZapGarbage()) { | 728 if (Heap::ShouldZapGarbage()) { |
| 777 ZapBlock(start, size); | 729 ZapBlock(start, size); |
| 778 } | 730 } |
| 779 | 731 |
| 780 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | 732 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| 781 return true; | 733 return true; |
| 782 } | 734 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 794 Memory::Address_at(start + s) = kZapValue; | 746 Memory::Address_at(start + s) = kZapValue; |
| 795 } | 747 } |
| 796 } | 748 } |
| 797 | 749 |
| 798 | 750 |
| 799 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, | 751 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 800 AllocationAction action, | 752 AllocationAction action, |
| 801 size_t size) { | 753 size_t size) { |
| 802 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | 754 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 803 MemoryAllocationCallbackRegistration registration = | 755 MemoryAllocationCallbackRegistration registration = |
| 804 memory_allocation_callbacks_[i]; | 756 memory_allocation_callbacks_[i]; |
| 805 if ((registration.space & space) == space && | 757 if ((registration.space & space) == space && |
| 806 (registration.action & action) == action) | 758 (registration.action & action) == action) |
| 807 registration.callback(space, action, static_cast<int>(size)); | 759 registration.callback(space, action, static_cast<int>(size)); |
| 808 } | 760 } |
| 809 } | 761 } |
| 810 | 762 |
| 811 | 763 |
| 812 bool MemoryAllocator::MemoryAllocationCallbackRegistered( | 764 bool MemoryAllocator::MemoryAllocationCallbackRegistered( |
| 813 MemoryAllocationCallback callback) { | 765 MemoryAllocationCallback callback) { |
| 814 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | 766 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 815 if (memory_allocation_callbacks_[i].callback == callback) return true; | 767 if (memory_allocation_callbacks_[i].callback == callback) return true; |
| 816 } | 768 } |
| 817 return false; | 769 return false; |
| 818 } | 770 } |
| 819 | 771 |
| 820 | 772 |
| 821 void MemoryAllocator::AddMemoryAllocationCallback( | 773 void MemoryAllocator::AddMemoryAllocationCallback( |
| 822 MemoryAllocationCallback callback, | 774 MemoryAllocationCallback callback, ObjectSpace space, |
| 823 ObjectSpace space, | |
| 824 AllocationAction action) { | 775 AllocationAction action) { |
| 825 DCHECK(callback != NULL); | 776 DCHECK(callback != NULL); |
| 826 MemoryAllocationCallbackRegistration registration(callback, space, action); | 777 MemoryAllocationCallbackRegistration registration(callback, space, action); |
| 827 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); | 778 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
| 828 return memory_allocation_callbacks_.Add(registration); | 779 return memory_allocation_callbacks_.Add(registration); |
| 829 } | 780 } |
| 830 | 781 |
| 831 | 782 |
| 832 void MemoryAllocator::RemoveMemoryAllocationCallback( | 783 void MemoryAllocator::RemoveMemoryAllocationCallback( |
| 833 MemoryAllocationCallback callback) { | 784 MemoryAllocationCallback callback) { |
| 834 DCHECK(callback != NULL); | 785 DCHECK(callback != NULL); |
| 835 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | 786 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 836 if (memory_allocation_callbacks_[i].callback == callback) { | 787 if (memory_allocation_callbacks_[i].callback == callback) { |
| 837 memory_allocation_callbacks_.Remove(i); | 788 memory_allocation_callbacks_.Remove(i); |
| 838 return; | 789 return; |
| 839 } | 790 } |
| 840 } | 791 } |
| 841 UNREACHABLE(); | 792 UNREACHABLE(); |
| 842 } | 793 } |
| 843 | 794 |
| 844 | 795 |
| 845 #ifdef DEBUG | 796 #ifdef DEBUG |
| 846 void MemoryAllocator::ReportStatistics() { | 797 void MemoryAllocator::ReportStatistics() { |
| 847 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 798 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
| 848 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 799 PrintF(" capacity: %" V8_PTR_PREFIX |
| 849 ", used: %" V8_PTR_PREFIX "d" | 800 "d" |
| 850 ", available: %%%d\n\n", | 801 ", used: %" V8_PTR_PREFIX |
| 851 capacity_, size_, static_cast<int>(pct*100)); | 802 "d" |
| 803 ", available: %%%d\n\n", |
| 804 capacity_, size_, static_cast<int>(pct * 100)); |
| 852 } | 805 } |
| 853 #endif | 806 #endif |
| 854 | 807 |
| 855 | 808 |
| 856 int MemoryAllocator::CodePageGuardStartOffset() { | 809 int MemoryAllocator::CodePageGuardStartOffset() { |
| 857 // We are guarding code pages: the first OS page after the header | 810 // We are guarding code pages: the first OS page after the header |
| 858 // will be protected as non-writable. | 811 // will be protected as non-writable. |
| 859 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | 812 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
| 860 } | 813 } |
| 861 | 814 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 873 | 826 |
| 874 | 827 |
| 875 int MemoryAllocator::CodePageAreaEndOffset() { | 828 int MemoryAllocator::CodePageAreaEndOffset() { |
| 876 // We are guarding code pages: the last OS page will be protected as | 829 // We are guarding code pages: the last OS page will be protected as |
| 877 // non-writable. | 830 // non-writable. |
| 878 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); | 831 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); |
| 879 } | 832 } |
| 880 | 833 |
| 881 | 834 |
| 882 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, | 835 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, |
| 883 Address start, | 836 Address start, size_t commit_size, |
| 884 size_t commit_size, | |
| 885 size_t reserved_size) { | 837 size_t reserved_size) { |
| 886 // Commit page header (not executable). | 838 // Commit page header (not executable). |
| 887 if (!vm->Commit(start, | 839 if (!vm->Commit(start, CodePageGuardStartOffset(), false)) { |
| 888 CodePageGuardStartOffset(), | |
| 889 false)) { | |
| 890 return false; | 840 return false; |
| 891 } | 841 } |
| 892 | 842 |
| 893 // Create guard page after the header. | 843 // Create guard page after the header. |
| 894 if (!vm->Guard(start + CodePageGuardStartOffset())) { | 844 if (!vm->Guard(start + CodePageGuardStartOffset())) { |
| 895 return false; | 845 return false; |
| 896 } | 846 } |
| 897 | 847 |
| 898 // Commit page body (executable). | 848 // Commit page body (executable). |
| 899 if (!vm->Commit(start + CodePageAreaStartOffset(), | 849 if (!vm->Commit(start + CodePageAreaStartOffset(), |
| 900 commit_size - CodePageGuardStartOffset(), | 850 commit_size - CodePageGuardStartOffset(), true)) { |
| 901 true)) { | |
| 902 return false; | 851 return false; |
| 903 } | 852 } |
| 904 | 853 |
| 905 // Create guard page before the end. | 854 // Create guard page before the end. |
| 906 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { | 855 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { |
| 907 return false; | 856 return false; |
| 908 } | 857 } |
| 909 | 858 |
| 910 UpdateAllocatedSpaceLimits(start, | 859 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() + |
| 911 start + CodePageAreaStartOffset() + | 860 commit_size - |
| 912 commit_size - CodePageGuardStartOffset()); | 861 CodePageGuardStartOffset()); |
| 913 return true; | 862 return true; |
| 914 } | 863 } |
| 915 | 864 |
| 916 | 865 |
| 917 // ----------------------------------------------------------------------------- | 866 // ----------------------------------------------------------------------------- |
| 918 // MemoryChunk implementation | 867 // MemoryChunk implementation |
| 919 | 868 |
| 920 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { | 869 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { |
| 921 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 870 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 922 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { | 871 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { |
| 923 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); | 872 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); |
| 924 } | 873 } |
| 925 chunk->IncrementLiveBytes(by); | 874 chunk->IncrementLiveBytes(by); |
| 926 } | 875 } |
| 927 | 876 |
| 928 | 877 |
| 929 // ----------------------------------------------------------------------------- | 878 // ----------------------------------------------------------------------------- |
| 930 // PagedSpace implementation | 879 // PagedSpace implementation |
| 931 | 880 |
| 932 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, | 881 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, |
| 933 Executability executable) | 882 Executability executable) |
| 934 : Space(heap, id, executable), | 883 : Space(heap, id, executable), |
| 935 free_list_(this), | 884 free_list_(this), |
| 936 swept_precisely_(true), | 885 swept_precisely_(true), |
| 937 unswept_free_bytes_(0), | 886 unswept_free_bytes_(0), |
| 938 end_of_unswept_pages_(NULL), | 887 end_of_unswept_pages_(NULL), |
| 939 emergency_memory_(NULL) { | 888 emergency_memory_(NULL) { |
| 940 if (id == CODE_SPACE) { | 889 if (id == CODE_SPACE) { |
| 941 area_size_ = heap->isolate()->memory_allocator()-> | 890 area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize(); |
| 942 CodePageAreaSize(); | |
| 943 } else { | 891 } else { |
| 944 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | 892 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 945 } | 893 } |
| 946 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 894 max_capacity_ = |
| 947 * AreaSize(); | 895 (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize(); |
| 948 accounting_stats_.Clear(); | 896 accounting_stats_.Clear(); |
| 949 | 897 |
| 950 allocation_info_.set_top(NULL); | 898 allocation_info_.set_top(NULL); |
| 951 allocation_info_.set_limit(NULL); | 899 allocation_info_.set_limit(NULL); |
| 952 | 900 |
| 953 anchor_.InitializeAsAnchor(this); | 901 anchor_.InitializeAsAnchor(this); |
| 954 } | 902 } |
| 955 | 903 |
| 956 | 904 |
| 957 bool PagedSpace::SetUp() { | 905 bool PagedSpace::SetUp() { return true; } |
| 958 return true; | |
| 959 } | |
| 960 | 906 |
| 961 | 907 |
| 962 bool PagedSpace::HasBeenSetUp() { | 908 bool PagedSpace::HasBeenSetUp() { return true; } |
| 963 return true; | |
| 964 } | |
| 965 | 909 |
| 966 | 910 |
| 967 void PagedSpace::TearDown() { | 911 void PagedSpace::TearDown() { |
| 968 PageIterator iterator(this); | 912 PageIterator iterator(this); |
| 969 while (iterator.has_next()) { | 913 while (iterator.has_next()) { |
| 970 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 914 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 971 } | 915 } |
| 972 anchor_.set_next_page(&anchor_); | 916 anchor_.set_next_page(&anchor_); |
| 973 anchor_.set_prev_page(&anchor_); | 917 anchor_.set_prev_page(&anchor_); |
| 974 accounting_stats_.Clear(); | 918 accounting_stats_.Clear(); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1022 | 966 |
| 1023 bool PagedSpace::Expand() { | 967 bool PagedSpace::Expand() { |
| 1024 if (!CanExpand()) return false; | 968 if (!CanExpand()) return false; |
| 1025 | 969 |
| 1026 intptr_t size = AreaSize(); | 970 intptr_t size = AreaSize(); |
| 1027 | 971 |
| 1028 if (anchor_.next_page() == &anchor_) { | 972 if (anchor_.next_page() == &anchor_) { |
| 1029 size = SizeOfFirstPage(); | 973 size = SizeOfFirstPage(); |
| 1030 } | 974 } |
| 1031 | 975 |
| 1032 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( | 976 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, |
| 1033 size, this, executable()); | 977 executable()); |
| 1034 if (p == NULL) return false; | 978 if (p == NULL) return false; |
| 1035 | 979 |
| 1036 DCHECK(Capacity() <= max_capacity_); | 980 DCHECK(Capacity() <= max_capacity_); |
| 1037 | 981 |
| 1038 p->InsertAfter(anchor_.prev_page()); | 982 p->InsertAfter(anchor_.prev_page()); |
| 1039 | 983 |
| 1040 return true; | 984 return true; |
| 1041 } | 985 } |
| 1042 | 986 |
| 1043 | 987 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1060 size = 8 * kPointerSize * KB; | 1004 size = 8 * kPointerSize * KB; |
| 1061 break; | 1005 break; |
| 1062 case CODE_SPACE: { | 1006 case CODE_SPACE: { |
| 1063 CodeRange* code_range = heap()->isolate()->code_range(); | 1007 CodeRange* code_range = heap()->isolate()->code_range(); |
| 1064 if (code_range != NULL && code_range->valid()) { | 1008 if (code_range != NULL && code_range->valid()) { |
| 1065 // When code range exists, code pages are allocated in a special way | 1009 // When code range exists, code pages are allocated in a special way |
| 1066 // (from the reserved code range). That part of the code is not yet | 1010 // (from the reserved code range). That part of the code is not yet |
| 1067 // upgraded to handle small pages. | 1011 // upgraded to handle small pages. |
| 1068 size = AreaSize(); | 1012 size = AreaSize(); |
| 1069 } else { | 1013 } else { |
| 1070 size = RoundUp( | 1014 size = |
| 1071 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100, | 1015 RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100, |
| 1072 kPointerSize); | 1016 kPointerSize); |
| 1073 } | 1017 } |
| 1074 break; | 1018 break; |
| 1075 } | 1019 } |
| 1076 default: | 1020 default: |
| 1077 UNREACHABLE(); | 1021 UNREACHABLE(); |
| 1078 } | 1022 } |
| 1079 return Min(size, AreaSize()); | 1023 return Min(size, AreaSize()); |
| 1080 } | 1024 } |
| 1081 | 1025 |
| 1082 | 1026 |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1166 | 1110 |
| 1167 | 1111 |
| 1168 void PagedSpace::UseEmergencyMemory() { | 1112 void PagedSpace::UseEmergencyMemory() { |
| 1169 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); | 1113 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); |
| 1170 page->InsertAfter(anchor_.prev_page()); | 1114 page->InsertAfter(anchor_.prev_page()); |
| 1171 emergency_memory_ = NULL; | 1115 emergency_memory_ = NULL; |
| 1172 } | 1116 } |
| 1173 | 1117 |
| 1174 | 1118 |
| 1175 #ifdef DEBUG | 1119 #ifdef DEBUG |
| 1176 void PagedSpace::Print() { } | 1120 void PagedSpace::Print() {} |
| 1177 #endif | 1121 #endif |
| 1178 | 1122 |
| 1179 #ifdef VERIFY_HEAP | 1123 #ifdef VERIFY_HEAP |
| 1180 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1124 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1181 // We can only iterate over the pages if they were swept precisely. | 1125 // We can only iterate over the pages if they were swept precisely. |
| 1182 if (!swept_precisely_) return; | 1126 if (!swept_precisely_) return; |
| 1183 | 1127 |
| 1184 bool allocation_pointer_found_in_space = | 1128 bool allocation_pointer_found_in_space = |
| 1185 (allocation_info_.top() == allocation_info_.limit()); | 1129 (allocation_info_.top() == allocation_info_.limit()); |
| 1186 PageIterator page_iterator(this); | 1130 PageIterator page_iterator(this); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1232 | 1176 |
| 1233 bool NewSpace::SetUp(int reserved_semispace_capacity, | 1177 bool NewSpace::SetUp(int reserved_semispace_capacity, |
| 1234 int maximum_semispace_capacity) { | 1178 int maximum_semispace_capacity) { |
| 1235 // Set up new space based on the preallocated memory block defined by | 1179 // Set up new space based on the preallocated memory block defined by |
| 1236 // start and size. The provided space is divided into two semi-spaces. | 1180 // start and size. The provided space is divided into two semi-spaces. |
| 1237 // To support fast containment testing in the new space, the size of | 1181 // To support fast containment testing in the new space, the size of |
| 1238 // this chunk must be a power of two and it must be aligned to its size. | 1182 // this chunk must be a power of two and it must be aligned to its size. |
| 1239 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | 1183 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 1240 | 1184 |
| 1241 size_t size = 2 * reserved_semispace_capacity; | 1185 size_t size = 2 * reserved_semispace_capacity; |
| 1242 Address base = | 1186 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 1243 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | 1187 size, size, &reservation_); |
| 1244 size, size, &reservation_); | |
| 1245 if (base == NULL) return false; | 1188 if (base == NULL) return false; |
| 1246 | 1189 |
| 1247 chunk_base_ = base; | 1190 chunk_base_ = base; |
| 1248 chunk_size_ = static_cast<uintptr_t>(size); | 1191 chunk_size_ = static_cast<uintptr_t>(size); |
| 1249 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | 1192 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 1250 | 1193 |
| 1251 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); | 1194 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1252 DCHECK(IsPowerOf2(maximum_semispace_capacity)); | 1195 DCHECK(IsPowerOf2(maximum_semispace_capacity)); |
| 1253 | 1196 |
| 1254 // Allocate and set up the histogram arrays if necessary. | 1197 // Allocate and set up the histogram arrays if necessary. |
| 1255 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1198 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1256 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1199 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1257 | 1200 |
| 1258 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 1201 #define SET_NAME(name) \ |
| 1259 promoted_histogram_[name].set_name(#name); | 1202 allocated_histogram_[name].set_name(#name); \ |
| 1203 promoted_histogram_[name].set_name(#name); |
| 1260 INSTANCE_TYPE_LIST(SET_NAME) | 1204 INSTANCE_TYPE_LIST(SET_NAME) |
| 1261 #undef SET_NAME | 1205 #undef SET_NAME |
| 1262 | 1206 |
| 1263 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); | 1207 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 1264 DCHECK(static_cast<intptr_t>(chunk_size_) >= | 1208 DCHECK(static_cast<intptr_t>(chunk_size_) >= |
| 1265 2 * heap()->ReservedSemiSpaceSize()); | 1209 2 * heap()->ReservedSemiSpaceSize()); |
| 1266 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); | 1210 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
| 1267 | 1211 |
| 1268 to_space_.SetUp(chunk_base_, | 1212 to_space_.SetUp(chunk_base_, initial_semispace_capacity, |
| 1269 initial_semispace_capacity, | |
| 1270 maximum_semispace_capacity); | 1213 maximum_semispace_capacity); |
| 1271 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, | 1214 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, |
| 1272 initial_semispace_capacity, | 1215 initial_semispace_capacity, maximum_semispace_capacity); |
| 1273 maximum_semispace_capacity); | |
| 1274 if (!to_space_.Commit()) { | 1216 if (!to_space_.Commit()) { |
| 1275 return false; | 1217 return false; |
| 1276 } | 1218 } |
| 1277 DCHECK(!from_space_.is_committed()); // No need to use memory yet. | 1219 DCHECK(!from_space_.is_committed()); // No need to use memory yet. |
| 1278 | 1220 |
| 1279 start_ = chunk_base_; | 1221 start_ = chunk_base_; |
| 1280 address_mask_ = ~(2 * reserved_semispace_capacity - 1); | 1222 address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
| 1281 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1223 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1282 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; | 1224 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
| 1283 | 1225 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1307 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); | 1249 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
| 1308 | 1250 |
| 1309 DCHECK(reservation_.IsReserved()); | 1251 DCHECK(reservation_.IsReserved()); |
| 1310 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | 1252 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
| 1311 NOT_EXECUTABLE); | 1253 NOT_EXECUTABLE); |
| 1312 chunk_base_ = NULL; | 1254 chunk_base_ = NULL; |
| 1313 chunk_size_ = 0; | 1255 chunk_size_ = 0; |
| 1314 } | 1256 } |
| 1315 | 1257 |
| 1316 | 1258 |
| 1317 void NewSpace::Flip() { | 1259 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
| 1318 SemiSpace::Swap(&from_space_, &to_space_); | |
| 1319 } | |
| 1320 | 1260 |
| 1321 | 1261 |
| 1322 void NewSpace::Grow() { | 1262 void NewSpace::Grow() { |
| 1323 // Double the semispace size but only up to maximum capacity. | 1263 // Double the semispace size but only up to maximum capacity. |
| 1324 DCHECK(Capacity() < MaximumCapacity()); | 1264 DCHECK(Capacity() < MaximumCapacity()); |
| 1325 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); | 1265 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); |
| 1326 if (to_space_.GrowTo(new_capacity)) { | 1266 if (to_space_.GrowTo(new_capacity)) { |
| 1327 // Only grow from space if we managed to grow to-space. | 1267 // Only grow from space if we managed to grow to-space. |
| 1328 if (!from_space_.GrowTo(new_capacity)) { | 1268 if (!from_space_.GrowTo(new_capacity)) { |
| 1329 // If we managed to grow to-space but couldn't grow from-space, | 1269 // If we managed to grow to-space but couldn't grow from-space, |
| 1330 // attempt to shrink to-space. | 1270 // attempt to shrink to-space. |
| 1331 if (!to_space_.ShrinkTo(from_space_.Capacity())) { | 1271 if (!to_space_.ShrinkTo(from_space_.Capacity())) { |
| 1332 // We are in an inconsistent state because we could not | 1272 // We are in an inconsistent state because we could not |
| 1333 // commit/uncommit memory from new space. | 1273 // commit/uncommit memory from new space. |
| 1334 V8::FatalProcessOutOfMemory("Failed to grow new space."); | 1274 V8::FatalProcessOutOfMemory("Failed to grow new space."); |
| 1335 } | 1275 } |
| 1336 } | 1276 } |
| 1337 } | 1277 } |
| 1338 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1278 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1339 } | 1279 } |
| 1340 | 1280 |
| 1341 | 1281 |
| 1342 void NewSpace::Shrink() { | 1282 void NewSpace::Shrink() { |
| 1343 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); | 1283 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); |
| 1344 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); | 1284 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); |
| 1345 if (rounded_new_capacity < Capacity() && | 1285 if (rounded_new_capacity < Capacity() && |
| 1346 to_space_.ShrinkTo(rounded_new_capacity)) { | 1286 to_space_.ShrinkTo(rounded_new_capacity)) { |
| 1347 // Only shrink from-space if we managed to shrink to-space. | 1287 // Only shrink from-space if we managed to shrink to-space. |
| 1348 from_space_.Reset(); | 1288 from_space_.Reset(); |
| 1349 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1289 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1350 // If we managed to shrink to-space but couldn't shrink from | 1290 // If we managed to shrink to-space but couldn't shrink from |
| 1351 // space, attempt to grow to-space again. | 1291 // space, attempt to grow to-space again. |
| 1352 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1292 if (!to_space_.GrowTo(from_space_.Capacity())) { |
| 1353 // We are in an inconsistent state because we could not | 1293 // We are in an inconsistent state because we could not |
| 1354 // commit/uncommit memory from new space. | 1294 // commit/uncommit memory from new space. |
| 1355 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 1295 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
| 1356 } | 1296 } |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1435 | 1375 |
| 1436 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { | 1376 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1437 Address old_top = allocation_info_.top(); | 1377 Address old_top = allocation_info_.top(); |
| 1438 Address high = to_space_.page_high(); | 1378 Address high = to_space_.page_high(); |
| 1439 if (allocation_info_.limit() < high) { | 1379 if (allocation_info_.limit() < high) { |
| 1440 // Either the limit has been lowered because linear allocation was disabled | 1380 // Either the limit has been lowered because linear allocation was disabled |
| 1441 // or because incremental marking wants to get a chance to do a step. Set | 1381 // or because incremental marking wants to get a chance to do a step. Set |
| 1442 // the new limit accordingly. | 1382 // the new limit accordingly. |
| 1443 Address new_top = old_top + size_in_bytes; | 1383 Address new_top = old_top + size_in_bytes; |
| 1444 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | 1384 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
| 1445 heap()->incremental_marking()->Step( | 1385 heap()->incremental_marking()->Step(bytes_allocated, |
| 1446 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1386 IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1447 UpdateInlineAllocationLimit(size_in_bytes); | 1387 UpdateInlineAllocationLimit(size_in_bytes); |
| 1448 top_on_previous_step_ = new_top; | 1388 top_on_previous_step_ = new_top; |
| 1449 return AllocateRaw(size_in_bytes); | 1389 return AllocateRaw(size_in_bytes); |
| 1450 } else if (AddFreshPage()) { | 1390 } else if (AddFreshPage()) { |
| 1451 // Switched to new page. Try allocating again. | 1391 // Switched to new page. Try allocating again. |
| 1452 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | 1392 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); |
| 1453 heap()->incremental_marking()->Step( | 1393 heap()->incremental_marking()->Step(bytes_allocated, |
| 1454 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1394 IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1455 top_on_previous_step_ = to_space_.page_low(); | 1395 top_on_previous_step_ = to_space_.page_low(); |
| 1456 return AllocateRaw(size_in_bytes); | 1396 return AllocateRaw(size_in_bytes); |
| 1457 } else { | 1397 } else { |
| 1458 return AllocationResult::Retry(); | 1398 return AllocationResult::Retry(); |
| 1459 } | 1399 } |
| 1460 } | 1400 } |
| 1461 | 1401 |
| 1462 | 1402 |
| 1463 #ifdef VERIFY_HEAP | 1403 #ifdef VERIFY_HEAP |
| 1464 // We do not use the SemiSpaceIterator because verification doesn't assume | 1404 // We do not use the SemiSpaceIterator because verification doesn't assume |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1512 CHECK_EQ(from_space_.id(), kFromSpace); | 1452 CHECK_EQ(from_space_.id(), kFromSpace); |
| 1513 CHECK_EQ(to_space_.id(), kToSpace); | 1453 CHECK_EQ(to_space_.id(), kToSpace); |
| 1514 from_space_.Verify(); | 1454 from_space_.Verify(); |
| 1515 to_space_.Verify(); | 1455 to_space_.Verify(); |
| 1516 } | 1456 } |
| 1517 #endif | 1457 #endif |
| 1518 | 1458 |
| 1519 // ----------------------------------------------------------------------------- | 1459 // ----------------------------------------------------------------------------- |
| 1520 // SemiSpace implementation | 1460 // SemiSpace implementation |
| 1521 | 1461 |
| 1522 void SemiSpace::SetUp(Address start, | 1462 void SemiSpace::SetUp(Address start, int initial_capacity, |
| 1523 int initial_capacity, | |
| 1524 int maximum_capacity) { | 1463 int maximum_capacity) { |
| 1525 // Creates a space in the young generation. The constructor does not | 1464 // Creates a space in the young generation. The constructor does not |
| 1526 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1465 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1527 // memory of size 'capacity' when set up, and does not grow or shrink | 1466 // memory of size 'capacity' when set up, and does not grow or shrink |
| 1528 // otherwise. In the mark-compact collector, the memory region of the from | 1467 // otherwise. In the mark-compact collector, the memory region of the from |
| 1529 // space is used as the marking stack. It requires contiguous memory | 1468 // space is used as the marking stack. It requires contiguous memory |
| 1530 // addresses. | 1469 // addresses. |
| 1531 DCHECK(maximum_capacity >= Page::kPageSize); | 1470 DCHECK(maximum_capacity >= Page::kPageSize); |
| 1532 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1471 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1533 capacity_ = initial_capacity; | 1472 capacity_ = initial_capacity; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1544 | 1483 |
| 1545 void SemiSpace::TearDown() { | 1484 void SemiSpace::TearDown() { |
| 1546 start_ = NULL; | 1485 start_ = NULL; |
| 1547 capacity_ = 0; | 1486 capacity_ = 0; |
| 1548 } | 1487 } |
| 1549 | 1488 |
| 1550 | 1489 |
| 1551 bool SemiSpace::Commit() { | 1490 bool SemiSpace::Commit() { |
| 1552 DCHECK(!is_committed()); | 1491 DCHECK(!is_committed()); |
| 1553 int pages = capacity_ / Page::kPageSize; | 1492 int pages = capacity_ / Page::kPageSize; |
| 1554 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, | 1493 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_, |
| 1555 capacity_, | |
| 1556 executable())) { | 1494 executable())) { |
| 1557 return false; | 1495 return false; |
| 1558 } | 1496 } |
| 1559 | 1497 |
| 1560 NewSpacePage* current = anchor(); | 1498 NewSpacePage* current = anchor(); |
| 1561 for (int i = 0; i < pages; i++) { | 1499 for (int i = 0; i < pages; i++) { |
| 1562 NewSpacePage* new_page = | 1500 NewSpacePage* new_page = |
| 1563 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1501 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1564 new_page->InsertAfter(current); | 1502 new_page->InsertAfter(current); |
| 1565 current = new_page; | 1503 current = new_page; |
| 1566 } | 1504 } |
| 1567 | 1505 |
| 1568 SetCapacity(capacity_); | 1506 SetCapacity(capacity_); |
| 1569 committed_ = true; | 1507 committed_ = true; |
| 1570 Reset(); | 1508 Reset(); |
| 1571 return true; | 1509 return true; |
| 1572 } | 1510 } |
| 1573 | 1511 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1604 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); | 1542 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1605 DCHECK(new_capacity <= maximum_capacity_); | 1543 DCHECK(new_capacity <= maximum_capacity_); |
| 1606 DCHECK(new_capacity > capacity_); | 1544 DCHECK(new_capacity > capacity_); |
| 1607 int pages_before = capacity_ / Page::kPageSize; | 1545 int pages_before = capacity_ / Page::kPageSize; |
| 1608 int pages_after = new_capacity / Page::kPageSize; | 1546 int pages_after = new_capacity / Page::kPageSize; |
| 1609 | 1547 |
| 1610 size_t delta = new_capacity - capacity_; | 1548 size_t delta = new_capacity - capacity_; |
| 1611 | 1549 |
| 1612 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); | 1550 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1613 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1551 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1614 start_ + capacity_, delta, executable())) { | 1552 start_ + capacity_, delta, executable())) { |
| 1615 return false; | 1553 return false; |
| 1616 } | 1554 } |
| 1617 SetCapacity(new_capacity); | 1555 SetCapacity(new_capacity); |
| 1618 NewSpacePage* last_page = anchor()->prev_page(); | 1556 NewSpacePage* last_page = anchor()->prev_page(); |
| 1619 DCHECK(last_page != anchor()); | 1557 DCHECK(last_page != anchor()); |
| 1620 for (int i = pages_before; i < pages_after; i++) { | 1558 for (int i = pages_before; i < pages_after; i++) { |
| 1621 Address page_address = start_ + i * Page::kPageSize; | 1559 Address page_address = start_ + i * Page::kPageSize; |
| 1622 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1560 NewSpacePage* new_page = |
| 1623 page_address, | 1561 NewSpacePage::Initialize(heap(), page_address, this); |
| 1624 this); | |
| 1625 new_page->InsertAfter(last_page); | 1562 new_page->InsertAfter(last_page); |
| 1626 Bitmap::Clear(new_page); | 1563 Bitmap::Clear(new_page); |
| 1627 // Duplicate the flags that was set on the old page. | 1564 // Duplicate the flags that was set on the old page. |
| 1628 new_page->SetFlags(last_page->GetFlags(), | 1565 new_page->SetFlags(last_page->GetFlags(), |
| 1629 NewSpacePage::kCopyOnFlipFlagsMask); | 1566 NewSpacePage::kCopyOnFlipFlagsMask); |
| 1630 last_page = new_page; | 1567 last_page = new_page; |
| 1631 } | 1568 } |
| 1632 return true; | 1569 return true; |
| 1633 } | 1570 } |
| 1634 | 1571 |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1730 age_mark_ = mark; | 1667 age_mark_ = mark; |
| 1731 // Mark all pages up to the one containing mark. | 1668 // Mark all pages up to the one containing mark. |
| 1732 NewSpacePageIterator it(space_start(), mark); | 1669 NewSpacePageIterator it(space_start(), mark); |
| 1733 while (it.has_next()) { | 1670 while (it.has_next()) { |
| 1734 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | 1671 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1735 } | 1672 } |
| 1736 } | 1673 } |
| 1737 | 1674 |
| 1738 | 1675 |
| 1739 #ifdef DEBUG | 1676 #ifdef DEBUG |
| 1740 void SemiSpace::Print() { } | 1677 void SemiSpace::Print() {} |
| 1741 #endif | 1678 #endif |
| 1742 | 1679 |
| 1743 #ifdef VERIFY_HEAP | 1680 #ifdef VERIFY_HEAP |
| 1744 void SemiSpace::Verify() { | 1681 void SemiSpace::Verify() { |
| 1745 bool is_from_space = (id_ == kFromSpace); | 1682 bool is_from_space = (id_ == kFromSpace); |
| 1746 NewSpacePage* page = anchor_.next_page(); | 1683 NewSpacePage* page = anchor_.next_page(); |
| 1747 CHECK(anchor_.semi_space() == this); | 1684 CHECK(anchor_.semi_space() == this); |
| 1748 while (page != &anchor_) { | 1685 while (page != &anchor_) { |
| 1749 CHECK(page->semi_space() == this); | 1686 CHECK(page->semi_space() == this); |
| 1750 CHECK(page->InNewSpace()); | 1687 CHECK(page->InNewSpace()); |
| 1751 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE | 1688 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE |
| 1752 : MemoryChunk::IN_TO_SPACE)); | 1689 : MemoryChunk::IN_TO_SPACE)); |
| 1753 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE | 1690 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE |
| 1754 : MemoryChunk::IN_FROM_SPACE)); | 1691 : MemoryChunk::IN_FROM_SPACE)); |
| 1755 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); | 1692 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); |
| 1756 if (!is_from_space) { | 1693 if (!is_from_space) { |
| 1757 // The pointers-from-here-are-interesting flag isn't updated dynamically | 1694 // The pointers-from-here-are-interesting flag isn't updated dynamically |
| 1758 // on from-space pages, so it might be out of sync with the marking state. | 1695 // on from-space pages, so it might be out of sync with the marking state. |
| 1759 if (page->heap()->incremental_marking()->IsMarking()) { | 1696 if (page->heap()->incremental_marking()->IsMarking()) { |
| 1760 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); | 1697 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1761 } else { | 1698 } else { |
| 1762 CHECK(!page->IsFlagSet( | 1699 CHECK( |
| 1763 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); | 1700 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1764 } | 1701 } |
| 1765 // TODO(gc): Check that the live_bytes_count_ field matches the | 1702 // TODO(gc): Check that the live_bytes_count_ field matches the |
| 1766 // black marking on the page (if we make it match in new-space). | 1703 // black marking on the page (if we make it match in new-space). |
| 1767 } | 1704 } |
| 1768 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); | 1705 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1769 CHECK(page->prev_page()->next_page() == page); | 1706 CHECK(page->prev_page()->next_page() == page); |
| 1770 page = page->next_page(); | 1707 page = page->next_page(); |
| 1771 } | 1708 } |
| 1772 } | 1709 } |
| 1773 #endif | 1710 #endif |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1810 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { | 1747 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { |
| 1811 Initialize(start, space->top(), NULL); | 1748 Initialize(start, space->top(), NULL); |
| 1812 } | 1749 } |
| 1813 | 1750 |
| 1814 | 1751 |
| 1815 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { | 1752 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { |
| 1816 Initialize(from, to, NULL); | 1753 Initialize(from, to, NULL); |
| 1817 } | 1754 } |
| 1818 | 1755 |
| 1819 | 1756 |
| 1820 void SemiSpaceIterator::Initialize(Address start, | 1757 void SemiSpaceIterator::Initialize(Address start, Address end, |
| 1821 Address end, | |
| 1822 HeapObjectCallback size_func) { | 1758 HeapObjectCallback size_func) { |
| 1823 SemiSpace::AssertValidRange(start, end); | 1759 SemiSpace::AssertValidRange(start, end); |
| 1824 current_ = start; | 1760 current_ = start; |
| 1825 limit_ = end; | 1761 limit_ = end; |
| 1826 size_func_ = size_func; | 1762 size_func_ = size_func; |
| 1827 } | 1763 } |
| 1828 | 1764 |
| 1829 | 1765 |
| 1830 #ifdef DEBUG | 1766 #ifdef DEBUG |
| 1831 // heap_histograms is shared, always clear it before using it. | 1767 // heap_histograms is shared, always clear it before using it. |
| 1832 static void ClearHistograms(Isolate* isolate) { | 1768 static void ClearHistograms(Isolate* isolate) { |
| 1833 // We reset the name each time, though it hasn't changed. | 1769 // We reset the name each time, though it hasn't changed. |
| 1834 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); | 1770 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); |
| 1835 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) | 1771 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) |
| 1836 #undef DEF_TYPE_NAME | 1772 #undef DEF_TYPE_NAME |
| 1837 | 1773 |
| 1838 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); | 1774 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); |
| 1839 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) | 1775 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) |
| 1840 #undef CLEAR_HISTOGRAM | 1776 #undef CLEAR_HISTOGRAM |
| 1841 | 1777 |
| 1842 isolate->js_spill_information()->Clear(); | 1778 isolate->js_spill_information()->Clear(); |
| 1843 } | 1779 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1865 | 1801 |
| 1866 static int CollectHistogramInfo(HeapObject* obj) { | 1802 static int CollectHistogramInfo(HeapObject* obj) { |
| 1867 Isolate* isolate = obj->GetIsolate(); | 1803 Isolate* isolate = obj->GetIsolate(); |
| 1868 InstanceType type = obj->map()->instance_type(); | 1804 InstanceType type = obj->map()->instance_type(); |
| 1869 DCHECK(0 <= type && type <= LAST_TYPE); | 1805 DCHECK(0 <= type && type <= LAST_TYPE); |
| 1870 DCHECK(isolate->heap_histograms()[type].name() != NULL); | 1806 DCHECK(isolate->heap_histograms()[type].name() != NULL); |
| 1871 isolate->heap_histograms()[type].increment_number(1); | 1807 isolate->heap_histograms()[type].increment_number(1); |
| 1872 isolate->heap_histograms()[type].increment_bytes(obj->Size()); | 1808 isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
| 1873 | 1809 |
| 1874 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { | 1810 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
| 1875 JSObject::cast(obj)->IncrementSpillStatistics( | 1811 JSObject::cast(obj) |
| 1876 isolate->js_spill_information()); | 1812 ->IncrementSpillStatistics(isolate->js_spill_information()); |
| 1877 } | 1813 } |
| 1878 | 1814 |
| 1879 return obj->Size(); | 1815 return obj->Size(); |
| 1880 } | 1816 } |
| 1881 | 1817 |
| 1882 | 1818 |
| 1883 static void ReportHistogram(Isolate* isolate, bool print_spill) { | 1819 static void ReportHistogram(Isolate* isolate, bool print_spill) { |
| 1884 PrintF("\n Object Histogram:\n"); | 1820 PrintF("\n Object Histogram:\n"); |
| 1885 for (int i = 0; i <= LAST_TYPE; i++) { | 1821 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1886 if (isolate->heap_histograms()[i].number() > 0) { | 1822 if (isolate->heap_histograms()[i].number() > 0) { |
| 1887 PrintF(" %-34s%10d (%10d bytes)\n", | 1823 PrintF(" %-34s%10d (%10d bytes)\n", |
| 1888 isolate->heap_histograms()[i].name(), | 1824 isolate->heap_histograms()[i].name(), |
| 1889 isolate->heap_histograms()[i].number(), | 1825 isolate->heap_histograms()[i].number(), |
| 1890 isolate->heap_histograms()[i].bytes()); | 1826 isolate->heap_histograms()[i].bytes()); |
| 1891 } | 1827 } |
| 1892 } | 1828 } |
| 1893 PrintF("\n"); | 1829 PrintF("\n"); |
| 1894 | 1830 |
| 1895 // Summarize string types. | 1831 // Summarize string types. |
| 1896 int string_number = 0; | 1832 int string_number = 0; |
| 1897 int string_bytes = 0; | 1833 int string_bytes = 0; |
| 1898 #define INCREMENT(type, size, name, camel_name) \ | 1834 #define INCREMENT(type, size, name, camel_name) \ |
| 1899 string_number += isolate->heap_histograms()[type].number(); \ | 1835 string_number += isolate->heap_histograms()[type].number(); \ |
| 1900 string_bytes += isolate->heap_histograms()[type].bytes(); | 1836 string_bytes += isolate->heap_histograms()[type].bytes(); |
| 1901 STRING_TYPE_LIST(INCREMENT) | 1837 STRING_TYPE_LIST(INCREMENT) |
| 1902 #undef INCREMENT | 1838 #undef INCREMENT |
| 1903 if (string_number > 0) { | 1839 if (string_number > 0) { |
| 1904 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, | 1840 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, |
| 1905 string_bytes); | 1841 string_bytes); |
| 1906 } | 1842 } |
| 1907 | 1843 |
| 1908 if (FLAG_collect_heap_spill_statistics && print_spill) { | 1844 if (FLAG_collect_heap_spill_statistics && print_spill) { |
| 1909 isolate->js_spill_information()->Print(); | 1845 isolate->js_spill_information()->Print(); |
| 1910 } | 1846 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1925 // the new space before a collection to get a histogram of allocated objects. | 1861 // the new space before a collection to get a histogram of allocated objects. |
| 1926 // This only happens when --log-gc flag is set. | 1862 // This only happens when --log-gc flag is set. |
| 1927 void NewSpace::CollectStatistics() { | 1863 void NewSpace::CollectStatistics() { |
| 1928 ClearHistograms(); | 1864 ClearHistograms(); |
| 1929 SemiSpaceIterator it(this); | 1865 SemiSpaceIterator it(this); |
| 1930 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) | 1866 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) |
| 1931 RecordAllocation(obj); | 1867 RecordAllocation(obj); |
| 1932 } | 1868 } |
| 1933 | 1869 |
| 1934 | 1870 |
| 1935 static void DoReportStatistics(Isolate* isolate, | 1871 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info, |
| 1936 HistogramInfo* info, const char* description) { | 1872 const char* description) { |
| 1937 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); | 1873 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); |
| 1938 // Lump all the string types together. | 1874 // Lump all the string types together. |
| 1939 int string_number = 0; | 1875 int string_number = 0; |
| 1940 int string_bytes = 0; | 1876 int string_bytes = 0; |
| 1941 #define INCREMENT(type, size, name, camel_name) \ | 1877 #define INCREMENT(type, size, name, camel_name) \ |
| 1942 string_number += info[type].number(); \ | 1878 string_number += info[type].number(); \ |
| 1943 string_bytes += info[type].bytes(); | 1879 string_bytes += info[type].bytes(); |
| 1944 STRING_TYPE_LIST(INCREMENT) | 1880 STRING_TYPE_LIST(INCREMENT) |
| 1945 #undef INCREMENT | 1881 #undef INCREMENT |
| 1946 if (string_number > 0) { | 1882 if (string_number > 0) { |
| 1947 LOG(isolate, | 1883 LOG(isolate, |
| 1948 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); | 1884 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); |
| 1949 } | 1885 } |
| 1950 | 1886 |
| 1951 // Then do the other types. | 1887 // Then do the other types. |
| 1952 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { | 1888 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { |
| 1953 if (info[i].number() > 0) { | 1889 if (info[i].number() > 0) { |
| 1954 LOG(isolate, | 1890 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(), |
| 1955 HeapSampleItemEvent(info[i].name(), info[i].number(), | 1891 info[i].bytes())); |
| 1956 info[i].bytes())); | |
| 1957 } | 1892 } |
| 1958 } | 1893 } |
| 1959 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); | 1894 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); |
| 1960 } | 1895 } |
| 1961 | 1896 |
| 1962 | 1897 |
| 1963 void NewSpace::ReportStatistics() { | 1898 void NewSpace::ReportStatistics() { |
| 1964 #ifdef DEBUG | 1899 #ifdef DEBUG |
| 1965 if (FLAG_heap_stats) { | 1900 if (FLAG_heap_stats) { |
| 1966 float pct = static_cast<float>(Available()) / Capacity(); | 1901 float pct = static_cast<float>(Available()) / Capacity(); |
| 1967 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 1902 PrintF(" capacity: %" V8_PTR_PREFIX |
| 1968 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 1903 "d" |
| 1969 Capacity(), Available(), static_cast<int>(pct*100)); | 1904 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 1905 Capacity(), Available(), static_cast<int>(pct * 100)); |
| 1970 PrintF("\n Object Histogram:\n"); | 1906 PrintF("\n Object Histogram:\n"); |
| 1971 for (int i = 0; i <= LAST_TYPE; i++) { | 1907 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1972 if (allocated_histogram_[i].number() > 0) { | 1908 if (allocated_histogram_[i].number() > 0) { |
| 1973 PrintF(" %-34s%10d (%10d bytes)\n", | 1909 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(), |
| 1974 allocated_histogram_[i].name(), | |
| 1975 allocated_histogram_[i].number(), | 1910 allocated_histogram_[i].number(), |
| 1976 allocated_histogram_[i].bytes()); | 1911 allocated_histogram_[i].bytes()); |
| 1977 } | 1912 } |
| 1978 } | 1913 } |
| 1979 PrintF("\n"); | 1914 PrintF("\n"); |
| 1980 } | 1915 } |
| 1981 #endif // DEBUG | 1916 #endif // DEBUG |
| 1982 | 1917 |
| 1983 if (FLAG_log_gc) { | 1918 if (FLAG_log_gc) { |
| 1984 Isolate* isolate = heap()->isolate(); | 1919 Isolate* isolate = heap()->isolate(); |
| (...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2147 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { | 2082 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { |
| 2148 FreeListNode* node = top(); | 2083 FreeListNode* node = top(); |
| 2149 while (node != NULL) { | 2084 while (node != NULL) { |
| 2150 if (Page::FromAddress(node->address()) == p) return true; | 2085 if (Page::FromAddress(node->address()) == p) return true; |
| 2151 node = node->next(); | 2086 node = node->next(); |
| 2152 } | 2087 } |
| 2153 return false; | 2088 return false; |
| 2154 } | 2089 } |
| 2155 | 2090 |
| 2156 | 2091 |
| 2157 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) { | 2092 FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) { |
| 2158 FreeListNode* node = top(); | 2093 FreeListNode* node = top(); |
| 2159 | 2094 |
| 2160 if (node == NULL) return NULL; | 2095 if (node == NULL) return NULL; |
| 2161 | 2096 |
| 2162 while (node != NULL && | 2097 while (node != NULL && |
| 2163 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | 2098 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
| 2164 available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); | 2099 available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); |
| 2165 node = node->next(); | 2100 node = node->next(); |
| 2166 } | 2101 } |
| 2167 | 2102 |
| 2168 if (node != NULL) { | 2103 if (node != NULL) { |
| 2169 set_top(node->next()); | 2104 set_top(node->next()); |
| 2170 *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); | 2105 *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); |
| 2171 available_ -= *node_size; | 2106 available_ -= *node_size; |
| 2172 } else { | 2107 } else { |
| 2173 set_top(NULL); | 2108 set_top(NULL); |
| 2174 } | 2109 } |
| 2175 | 2110 |
| 2176 if (top() == NULL) { | 2111 if (top() == NULL) { |
| 2177 set_end(NULL); | 2112 set_end(NULL); |
| 2178 } | 2113 } |
| 2179 | 2114 |
| 2180 return node; | 2115 return node; |
| 2181 } | 2116 } |
| 2182 | 2117 |
| 2183 | 2118 |
| 2184 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, | 2119 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, |
| 2185 int *node_size) { | 2120 int* node_size) { |
| 2186 FreeListNode* node = PickNodeFromList(node_size); | 2121 FreeListNode* node = PickNodeFromList(node_size); |
| 2187 if (node != NULL && *node_size < size_in_bytes) { | 2122 if (node != NULL && *node_size < size_in_bytes) { |
| 2188 Free(node, *node_size); | 2123 Free(node, *node_size); |
| 2189 *node_size = 0; | 2124 *node_size = 0; |
| 2190 return NULL; | 2125 return NULL; |
| 2191 } | 2126 } |
| 2192 return node; | 2127 return node; |
| 2193 } | 2128 } |
| 2194 | 2129 |
| 2195 | 2130 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2210 if (*map_location == NULL) { | 2145 if (*map_location == NULL) { |
| 2211 *map_location = heap->free_space_map(); | 2146 *map_location = heap->free_space_map(); |
| 2212 } else { | 2147 } else { |
| 2213 DCHECK(*map_location == heap->free_space_map()); | 2148 DCHECK(*map_location == heap->free_space_map()); |
| 2214 } | 2149 } |
| 2215 n = n->next(); | 2150 n = n->next(); |
| 2216 } | 2151 } |
| 2217 } | 2152 } |
| 2218 | 2153 |
| 2219 | 2154 |
| 2220 FreeList::FreeList(PagedSpace* owner) | 2155 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { |
| 2221 : owner_(owner), heap_(owner->heap()) { | |
| 2222 Reset(); | 2156 Reset(); |
| 2223 } | 2157 } |
| 2224 | 2158 |
| 2225 | 2159 |
| 2226 intptr_t FreeList::Concatenate(FreeList* free_list) { | 2160 intptr_t FreeList::Concatenate(FreeList* free_list) { |
| 2227 intptr_t free_bytes = 0; | 2161 intptr_t free_bytes = 0; |
| 2228 free_bytes += small_list_.Concatenate(free_list->small_list()); | 2162 free_bytes += small_list_.Concatenate(free_list->small_list()); |
| 2229 free_bytes += medium_list_.Concatenate(free_list->medium_list()); | 2163 free_bytes += medium_list_.Concatenate(free_list->medium_list()); |
| 2230 free_bytes += large_list_.Concatenate(free_list->large_list()); | 2164 free_bytes += large_list_.Concatenate(free_list->large_list()); |
| 2231 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | 2165 free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2307 DCHECK(size_in_bytes <= *node_size); | 2241 DCHECK(size_in_bytes <= *node_size); |
| 2308 page = Page::FromAddress(node->address()); | 2242 page = Page::FromAddress(node->address()); |
| 2309 page->add_available_in_large_free_list(-(*node_size)); | 2243 page->add_available_in_large_free_list(-(*node_size)); |
| 2310 DCHECK(IsVeryLong() || available() == SumFreeLists()); | 2244 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2311 return node; | 2245 return node; |
| 2312 } | 2246 } |
| 2313 } | 2247 } |
| 2314 | 2248 |
| 2315 int huge_list_available = huge_list_.available(); | 2249 int huge_list_available = huge_list_.available(); |
| 2316 FreeListNode* top_node = huge_list_.top(); | 2250 FreeListNode* top_node = huge_list_.top(); |
| 2317 for (FreeListNode** cur = &top_node; | 2251 for (FreeListNode** cur = &top_node; *cur != NULL; |
| 2318 *cur != NULL; | |
| 2319 cur = (*cur)->next_address()) { | 2252 cur = (*cur)->next_address()) { |
| 2320 FreeListNode* cur_node = *cur; | 2253 FreeListNode* cur_node = *cur; |
| 2321 while (cur_node != NULL && | 2254 while (cur_node != NULL && |
| 2322 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | 2255 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
| 2323 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); | 2256 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
| 2324 huge_list_available -= size; | 2257 huge_list_available -= size; |
| 2325 page = Page::FromAddress(cur_node->address()); | 2258 page = Page::FromAddress(cur_node->address()); |
| 2326 page->add_available_in_huge_free_list(-size); | 2259 page->add_available_in_huge_free_list(-size); |
| 2327 cur_node = cur_node->next(); | 2260 cur_node = cur_node->next(); |
| 2328 } | 2261 } |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2397 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | 2330 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
| 2398 // Don't free list allocate if there is linear space available. | 2331 // Don't free list allocate if there is linear space available. |
| 2399 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); | 2332 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
| 2400 | 2333 |
| 2401 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2334 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 2402 // Mark the old linear allocation area with a free space map so it can be | 2335 // Mark the old linear allocation area with a free space map so it can be |
| 2403 // skipped when scanning the heap. This also puts it back in the free list | 2336 // skipped when scanning the heap. This also puts it back in the free list |
| 2404 // if it is big enough. | 2337 // if it is big enough. |
| 2405 owner_->Free(owner_->top(), old_linear_size); | 2338 owner_->Free(owner_->top(), old_linear_size); |
| 2406 | 2339 |
| 2407 owner_->heap()->incremental_marking()->OldSpaceStep( | 2340 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - |
| 2408 size_in_bytes - old_linear_size); | 2341 old_linear_size); |
| 2409 | 2342 |
| 2410 int new_node_size = 0; | 2343 int new_node_size = 0; |
| 2411 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2344 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2412 if (new_node == NULL) { | 2345 if (new_node == NULL) { |
| 2413 owner_->SetTopAndLimit(NULL, NULL); | 2346 owner_->SetTopAndLimit(NULL, NULL); |
| 2414 return NULL; | 2347 return NULL; |
| 2415 } | 2348 } |
| 2416 | 2349 |
| 2417 int bytes_left = new_node_size - size_in_bytes; | 2350 int bytes_left = new_node_size - size_in_bytes; |
| 2418 DCHECK(bytes_left >= 0); | 2351 DCHECK(bytes_left >= 0); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2465 return new_node; | 2398 return new_node; |
| 2466 } | 2399 } |
| 2467 | 2400 |
| 2468 | 2401 |
| 2469 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2402 intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 2470 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); | 2403 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
| 2471 p->set_available_in_huge_free_list(0); | 2404 p->set_available_in_huge_free_list(0); |
| 2472 | 2405 |
| 2473 if (sum < p->area_size()) { | 2406 if (sum < p->area_size()) { |
| 2474 sum += small_list_.EvictFreeListItemsInList(p) + | 2407 sum += small_list_.EvictFreeListItemsInList(p) + |
| 2475 medium_list_.EvictFreeListItemsInList(p) + | 2408 medium_list_.EvictFreeListItemsInList(p) + |
| 2476 large_list_.EvictFreeListItemsInList(p); | 2409 large_list_.EvictFreeListItemsInList(p); |
| 2477 p->set_available_in_small_free_list(0); | 2410 p->set_available_in_small_free_list(0); |
| 2478 p->set_available_in_medium_free_list(0); | 2411 p->set_available_in_medium_free_list(0); |
| 2479 p->set_available_in_large_free_list(0); | 2412 p->set_available_in_large_free_list(0); |
| 2480 } | 2413 } |
| 2481 | 2414 |
| 2482 return sum; | 2415 return sum; |
| 2483 } | 2416 } |
| 2484 | 2417 |
| 2485 | 2418 |
| 2486 bool FreeList::ContainsPageFreeListItems(Page* p) { | 2419 bool FreeList::ContainsPageFreeListItems(Page* p) { |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2522 while (cur != NULL) { | 2455 while (cur != NULL) { |
| 2523 length++; | 2456 length++; |
| 2524 cur = cur->next(); | 2457 cur = cur->next(); |
| 2525 if (length == kVeryLongFreeList) return length; | 2458 if (length == kVeryLongFreeList) return length; |
| 2526 } | 2459 } |
| 2527 return length; | 2460 return length; |
| 2528 } | 2461 } |
| 2529 | 2462 |
| 2530 | 2463 |
| 2531 bool FreeList::IsVeryLong() { | 2464 bool FreeList::IsVeryLong() { |
| 2532 if (small_list_.FreeListLength() == kVeryLongFreeList) return true; | 2465 if (small_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2533 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; | 2466 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2534 if (large_list_.FreeListLength() == kVeryLongFreeList) return true; | 2467 if (large_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2535 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; | 2468 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2536 return false; | 2469 return false; |
| 2537 } | 2470 } |
| 2538 | 2471 |
| 2539 | 2472 |
| 2540 // This can take a very long time because it is linear in the number of entries | 2473 // This can take a very long time because it is linear in the number of entries |
| 2541 // on the free list, so it should not be called if FreeListLength returns | 2474 // on the free list, so it should not be called if FreeListLength returns |
| 2542 // kVeryLongFreeList. | 2475 // kVeryLongFreeList. |
| 2543 intptr_t FreeList::SumFreeLists() { | 2476 intptr_t FreeList::SumFreeLists() { |
| 2544 intptr_t sum = small_list_.SumFreeList(); | 2477 intptr_t sum = small_list_.SumFreeList(); |
| 2545 sum += medium_list_.SumFreeList(); | 2478 sum += medium_list_.SumFreeList(); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2562 // sweeper threads. | 2495 // sweeper threads. |
| 2563 unswept_free_bytes_ = 0; | 2496 unswept_free_bytes_ = 0; |
| 2564 | 2497 |
| 2565 // Clear the free list before a full GC---it will be rebuilt afterward. | 2498 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2566 free_list_.Reset(); | 2499 free_list_.Reset(); |
| 2567 } | 2500 } |
| 2568 | 2501 |
| 2569 | 2502 |
| 2570 intptr_t PagedSpace::SizeOfObjects() { | 2503 intptr_t PagedSpace::SizeOfObjects() { |
| 2571 DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() || | 2504 DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() || |
| 2572 (unswept_free_bytes_ == 0)); | 2505 (unswept_free_bytes_ == 0)); |
| 2573 return Size() - unswept_free_bytes_ - (limit() - top()); | 2506 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2574 } | 2507 } |
| 2575 | 2508 |
| 2576 | 2509 |
| 2577 // After we have booted, we have created a map which represents free space | 2510 // After we have booted, we have created a map which represents free space |
| 2578 // on the heap. If there was already a free list then the elements on it | 2511 // on the heap. If there was already a free list then the elements on it |
| 2579 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2512 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2580 // fix them. | 2513 // fix them. |
| 2581 void PagedSpace::RepairFreeListsAfterBoot() { | 2514 void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); } |
| 2582 free_list_.RepairLists(heap()); | |
| 2583 } | |
| 2584 | 2515 |
| 2585 | 2516 |
| 2586 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | 2517 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2587 if (allocation_info_.top() >= allocation_info_.limit()) return; | 2518 if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2588 | 2519 |
| 2589 if (Page::FromAllocationTop(allocation_info_.top())-> | 2520 if (Page::FromAllocationTop(allocation_info_.top()) |
| 2590 IsEvacuationCandidate()) { | 2521 ->IsEvacuationCandidate()) { |
| 2591 // Create filler object to keep page iterable if it was iterable. | 2522 // Create filler object to keep page iterable if it was iterable. |
| 2592 int remaining = | 2523 int remaining = |
| 2593 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); | 2524 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2594 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); | 2525 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2595 | 2526 |
| 2596 allocation_info_.set_top(NULL); | 2527 allocation_info_.set_top(NULL); |
| 2597 allocation_info_.set_limit(NULL); | 2528 allocation_info_.set_limit(NULL); |
| 2598 } | 2529 } |
| 2599 } | 2530 } |
| 2600 | 2531 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 2622 if (collector->sweeping_in_progress()) { | 2553 if (collector->sweeping_in_progress()) { |
| 2623 // First try to refill the free-list, concurrent sweeper threads | 2554 // First try to refill the free-list, concurrent sweeper threads |
| 2624 // may have freed some objects in the meantime. | 2555 // may have freed some objects in the meantime. |
| 2625 collector->RefillFreeList(this); | 2556 collector->RefillFreeList(this); |
| 2626 | 2557 |
| 2627 // Retry the free list allocation. | 2558 // Retry the free list allocation. |
| 2628 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2559 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2629 if (object != NULL) return object; | 2560 if (object != NULL) return object; |
| 2630 | 2561 |
| 2631 // If sweeping is still in progress try to sweep pages on the main thread. | 2562 // If sweeping is still in progress try to sweep pages on the main thread. |
| 2632 int free_chunk = | 2563 int free_chunk = collector->SweepInParallel(this, size_in_bytes); |
| 2633 collector->SweepInParallel(this, size_in_bytes); | |
| 2634 collector->RefillFreeList(this); | 2564 collector->RefillFreeList(this); |
| 2635 if (free_chunk >= size_in_bytes) { | 2565 if (free_chunk >= size_in_bytes) { |
| 2636 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2566 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2637 // We should be able to allocate an object here since we just freed that | 2567 // We should be able to allocate an object here since we just freed that |
| 2638 // much memory. | 2568 // much memory. |
| 2639 DCHECK(object != NULL); | 2569 DCHECK(object != NULL); |
| 2640 if (object != NULL) return object; | 2570 if (object != NULL) return object; |
| 2641 } | 2571 } |
| 2642 } | 2572 } |
| 2643 | 2573 |
| 2644 // Free list allocation failed and there is no next page. Fail if we have | 2574 // Free list allocation failed and there is no next page. Fail if we have |
| 2645 // hit the old generation size limit that should cause a garbage | 2575 // hit the old generation size limit that should cause a garbage |
| 2646 // collection. | 2576 // collection. |
| 2647 if (!heap()->always_allocate() | 2577 if (!heap()->always_allocate() && |
| 2648 && heap()->OldGenerationAllocationLimitReached()) { | 2578 heap()->OldGenerationAllocationLimitReached()) { |
| 2649 // If sweeper threads are active, wait for them at that point and steal | 2579 // If sweeper threads are active, wait for them at that point and steal |
| 2650 // elements form their free-lists. | 2580 // elements form their free-lists. |
| 2651 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2581 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
| 2652 if (object != NULL) return object; | 2582 if (object != NULL) return object; |
| 2653 } | 2583 } |
| 2654 | 2584 |
| 2655 // Try to expand the space and allocate in the new next page. | 2585 // Try to expand the space and allocate in the new next page. |
| 2656 if (Expand()) { | 2586 if (Expand()) { |
| 2657 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); | 2587 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); |
| 2658 return free_list_.Allocate(size_in_bytes); | 2588 return free_list_.Allocate(size_in_bytes); |
| 2659 } | 2589 } |
| 2660 | 2590 |
| 2661 // If sweeper threads are active, wait for them at that point and steal | 2591 // If sweeper threads are active, wait for them at that point and steal |
| 2662 // elements form their free-lists. Allocation may still fail their which | 2592 // elements form their free-lists. Allocation may still fail their which |
| 2663 // would indicate that there is not enough memory for the given allocation. | 2593 // would indicate that there is not enough memory for the given allocation. |
| 2664 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2594 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
| 2665 } | 2595 } |
| 2666 | 2596 |
| 2667 | 2597 |
| 2668 #ifdef DEBUG | 2598 #ifdef DEBUG |
| 2669 void PagedSpace::ReportCodeStatistics(Isolate* isolate) { | 2599 void PagedSpace::ReportCodeStatistics(Isolate* isolate) { |
| 2670 CommentStatistic* comments_statistics = | 2600 CommentStatistic* comments_statistics = |
| 2671 isolate->paged_space_comments_statistics(); | 2601 isolate->paged_space_comments_statistics(); |
| 2672 ReportCodeKindStatistics(isolate->code_kind_statistics()); | 2602 ReportCodeKindStatistics(isolate->code_kind_statistics()); |
| 2673 PrintF("Code comment statistics (\" [ comment-txt : size/ " | 2603 PrintF( |
| 2674 "count (average)\"):\n"); | 2604 "Code comment statistics (\" [ comment-txt : size/ " |
| 2605 "count (average)\"):\n"); |
| 2675 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { | 2606 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 2676 const CommentStatistic& cs = comments_statistics[i]; | 2607 const CommentStatistic& cs = comments_statistics[i]; |
| 2677 if (cs.size > 0) { | 2608 if (cs.size > 0) { |
| 2678 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, | 2609 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, |
| 2679 cs.size/cs.count); | 2610 cs.size / cs.count); |
| 2680 } | 2611 } |
| 2681 } | 2612 } |
| 2682 PrintF("\n"); | 2613 PrintF("\n"); |
| 2683 } | 2614 } |
| 2684 | 2615 |
| 2685 | 2616 |
| 2686 void PagedSpace::ResetCodeStatistics(Isolate* isolate) { | 2617 void PagedSpace::ResetCodeStatistics(Isolate* isolate) { |
| 2687 CommentStatistic* comments_statistics = | 2618 CommentStatistic* comments_statistics = |
| 2688 isolate->paged_space_comments_statistics(); | 2619 isolate->paged_space_comments_statistics(); |
| 2689 ClearCodeKindStatistics(isolate->code_kind_statistics()); | 2620 ClearCodeKindStatistics(isolate->code_kind_statistics()); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2785 prev_pc <= code->instruction_end()); | 2716 prev_pc <= code->instruction_end()); |
| 2786 delta += static_cast<int>(code->instruction_end() - prev_pc); | 2717 delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 2787 EnterComment(isolate, "NoComment", delta); | 2718 EnterComment(isolate, "NoComment", delta); |
| 2788 } | 2719 } |
| 2789 } | 2720 } |
| 2790 } | 2721 } |
| 2791 | 2722 |
| 2792 | 2723 |
| 2793 void PagedSpace::ReportStatistics() { | 2724 void PagedSpace::ReportStatistics() { |
| 2794 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2725 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2795 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 2726 PrintF(" capacity: %" V8_PTR_PREFIX |
| 2796 ", waste: %" V8_PTR_PREFIX "d" | 2727 "d" |
| 2797 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 2728 ", waste: %" V8_PTR_PREFIX |
| 2729 "d" |
| 2730 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2798 Capacity(), Waste(), Available(), pct); | 2731 Capacity(), Waste(), Available(), pct); |
| 2799 | 2732 |
| 2800 if (!swept_precisely_) return; | 2733 if (!swept_precisely_) return; |
| 2801 ClearHistograms(heap()->isolate()); | 2734 ClearHistograms(heap()->isolate()); |
| 2802 HeapObjectIterator obj_it(this); | 2735 HeapObjectIterator obj_it(this); |
| 2803 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) | 2736 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2804 CollectHistogramInfo(obj); | 2737 CollectHistogramInfo(obj); |
| 2805 ReportHistogram(heap()->isolate(), true); | 2738 ReportHistogram(heap()->isolate(), true); |
| 2806 } | 2739 } |
| 2807 #endif | 2740 #endif |
| 2808 | 2741 |
| 2809 | 2742 |
| 2810 // ----------------------------------------------------------------------------- | 2743 // ----------------------------------------------------------------------------- |
| 2811 // MapSpace implementation | 2744 // MapSpace implementation |
| 2812 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless | 2745 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless |
| 2813 // there is at least one non-inlined virtual function. I would prefer to hide | 2746 // there is at least one non-inlined virtual function. I would prefer to hide |
| 2814 // the VerifyObject definition behind VERIFY_HEAP. | 2747 // the VerifyObject definition behind VERIFY_HEAP. |
| 2815 | 2748 |
| 2816 void MapSpace::VerifyObject(HeapObject* object) { | 2749 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
| 2817 CHECK(object->IsMap()); | |
| 2818 } | |
| 2819 | 2750 |
| 2820 | 2751 |
| 2821 // ----------------------------------------------------------------------------- | 2752 // ----------------------------------------------------------------------------- |
| 2822 // CellSpace and PropertyCellSpace implementation | 2753 // CellSpace and PropertyCellSpace implementation |
| 2823 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless | 2754 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless |
| 2824 // there is at least one non-inlined virtual function. I would prefer to hide | 2755 // there is at least one non-inlined virtual function. I would prefer to hide |
| 2825 // the VerifyObject definition behind VERIFY_HEAP. | 2756 // the VerifyObject definition behind VERIFY_HEAP. |
| 2826 | 2757 |
| 2827 void CellSpace::VerifyObject(HeapObject* object) { | 2758 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); } |
| 2828 CHECK(object->IsCell()); | |
| 2829 } | |
| 2830 | 2759 |
| 2831 | 2760 |
| 2832 void PropertyCellSpace::VerifyObject(HeapObject* object) { | 2761 void PropertyCellSpace::VerifyObject(HeapObject* object) { |
| 2833 CHECK(object->IsPropertyCell()); | 2762 CHECK(object->IsPropertyCell()); |
| 2834 } | 2763 } |
| 2835 | 2764 |
| 2836 | 2765 |
| 2837 // ----------------------------------------------------------------------------- | 2766 // ----------------------------------------------------------------------------- |
| 2838 // LargeObjectIterator | 2767 // LargeObjectIterator |
| 2839 | 2768 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2854 if (current_ == NULL) return NULL; | 2783 if (current_ == NULL) return NULL; |
| 2855 | 2784 |
| 2856 HeapObject* object = current_->GetObject(); | 2785 HeapObject* object = current_->GetObject(); |
| 2857 current_ = current_->next_page(); | 2786 current_ = current_->next_page(); |
| 2858 return object; | 2787 return object; |
| 2859 } | 2788 } |
| 2860 | 2789 |
| 2861 | 2790 |
| 2862 // ----------------------------------------------------------------------------- | 2791 // ----------------------------------------------------------------------------- |
| 2863 // LargeObjectSpace | 2792 // LargeObjectSpace |
| 2864 static bool ComparePointers(void* key1, void* key2) { | 2793 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; } |
| 2865 return key1 == key2; | |
| 2866 } | |
| 2867 | 2794 |
| 2868 | 2795 |
| 2869 LargeObjectSpace::LargeObjectSpace(Heap* heap, | 2796 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity, |
| 2870 intptr_t max_capacity, | |
| 2871 AllocationSpace id) | 2797 AllocationSpace id) |
| 2872 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2798 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 2873 max_capacity_(max_capacity), | 2799 max_capacity_(max_capacity), |
| 2874 first_page_(NULL), | 2800 first_page_(NULL), |
| 2875 size_(0), | 2801 size_(0), |
| 2876 page_count_(0), | 2802 page_count_(0), |
| 2877 objects_size_(0), | 2803 objects_size_(0), |
| 2878 chunk_map_(ComparePointers, 1024) {} | 2804 chunk_map_(ComparePointers, 1024) {} |
| 2879 | 2805 |
| 2880 | 2806 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 2910 // If so, fail the allocation. | 2836 // If so, fail the allocation. |
| 2911 if (!heap()->always_allocate() && | 2837 if (!heap()->always_allocate() && |
| 2912 heap()->OldGenerationAllocationLimitReached()) { | 2838 heap()->OldGenerationAllocationLimitReached()) { |
| 2913 return AllocationResult::Retry(identity()); | 2839 return AllocationResult::Retry(identity()); |
| 2914 } | 2840 } |
| 2915 | 2841 |
| 2916 if (Size() + object_size > max_capacity_) { | 2842 if (Size() + object_size > max_capacity_) { |
| 2917 return AllocationResult::Retry(identity()); | 2843 return AllocationResult::Retry(identity()); |
| 2918 } | 2844 } |
| 2919 | 2845 |
| 2920 LargePage* page = heap()->isolate()->memory_allocator()-> | 2846 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage( |
| 2921 AllocateLargePage(object_size, this, executable); | 2847 object_size, this, executable); |
| 2922 if (page == NULL) return AllocationResult::Retry(identity()); | 2848 if (page == NULL) return AllocationResult::Retry(identity()); |
| 2923 DCHECK(page->area_size() >= object_size); | 2849 DCHECK(page->area_size() >= object_size); |
| 2924 | 2850 |
| 2925 size_ += static_cast<int>(page->size()); | 2851 size_ += static_cast<int>(page->size()); |
| 2926 objects_size_ += object_size; | 2852 objects_size_ += object_size; |
| 2927 page_count_++; | 2853 page_count_++; |
| 2928 page->set_next_page(first_page_); | 2854 page->set_next_page(first_page_); |
| 2929 first_page_ = page; | 2855 first_page_ = page; |
| 2930 | 2856 |
| 2931 if (size_ > maximum_committed_) { | 2857 if (size_ > maximum_committed_) { |
| 2932 maximum_committed_ = size_; | 2858 maximum_committed_ = size_; |
| 2933 } | 2859 } |
| 2934 | 2860 |
| 2935 // Register all MemoryChunk::kAlignment-aligned chunks covered by | 2861 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 2936 // this large page in the chunk map. | 2862 // this large page in the chunk map. |
| 2937 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | 2863 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 2938 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | 2864 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
| 2939 for (uintptr_t key = base; key <= limit; key++) { | 2865 for (uintptr_t key = base; key <= limit; key++) { |
| 2940 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2866 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 2941 static_cast<uint32_t>(key), | 2867 static_cast<uint32_t>(key), true); |
| 2942 true); | |
| 2943 DCHECK(entry != NULL); | 2868 DCHECK(entry != NULL); |
| 2944 entry->value = page; | 2869 entry->value = page; |
| 2945 } | 2870 } |
| 2946 | 2871 |
| 2947 HeapObject* object = page->GetObject(); | 2872 HeapObject* object = page->GetObject(); |
| 2948 | 2873 |
| 2949 if (Heap::ShouldZapGarbage()) { | 2874 if (Heap::ShouldZapGarbage()) { |
| 2950 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2875 // Make the object consistent so the heap can be verified in OldSpaceStep. |
| 2951 // We only need to do this in debug builds or if verify_heap is on. | 2876 // We only need to do this in debug builds or if verify_heap is on. |
| 2952 reinterpret_cast<Object**>(object->address())[0] = | 2877 reinterpret_cast<Object**>(object->address())[0] = |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2977 if (page != NULL) { | 2902 if (page != NULL) { |
| 2978 return page->GetObject(); | 2903 return page->GetObject(); |
| 2979 } | 2904 } |
| 2980 return Smi::FromInt(0); // Signaling not found. | 2905 return Smi::FromInt(0); // Signaling not found. |
| 2981 } | 2906 } |
| 2982 | 2907 |
| 2983 | 2908 |
| 2984 LargePage* LargeObjectSpace::FindPage(Address a) { | 2909 LargePage* LargeObjectSpace::FindPage(Address a) { |
| 2985 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; | 2910 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; |
| 2986 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2911 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 2987 static_cast<uint32_t>(key), | 2912 static_cast<uint32_t>(key), false); |
| 2988 false); | |
| 2989 if (e != NULL) { | 2913 if (e != NULL) { |
| 2990 DCHECK(e->value != NULL); | 2914 DCHECK(e->value != NULL); |
| 2991 LargePage* page = reinterpret_cast<LargePage*>(e->value); | 2915 LargePage* page = reinterpret_cast<LargePage*>(e->value); |
| 2992 DCHECK(page->is_valid()); | 2916 DCHECK(page->is_valid()); |
| 2993 if (page->Contains(a)) { | 2917 if (page->Contains(a)) { |
| 2994 return page; | 2918 return page; |
| 2995 } | 2919 } |
| 2996 } | 2920 } |
| 2997 return NULL; | 2921 return NULL; |
| 2998 } | 2922 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3017 LargePage* page = current; | 2941 LargePage* page = current; |
| 3018 // Cut the chunk out from the chunk list. | 2942 // Cut the chunk out from the chunk list. |
| 3019 current = current->next_page(); | 2943 current = current->next_page(); |
| 3020 if (previous == NULL) { | 2944 if (previous == NULL) { |
| 3021 first_page_ = current; | 2945 first_page_ = current; |
| 3022 } else { | 2946 } else { |
| 3023 previous->set_next_page(current); | 2947 previous->set_next_page(current); |
| 3024 } | 2948 } |
| 3025 | 2949 |
| 3026 // Free the chunk. | 2950 // Free the chunk. |
| 3027 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2951 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object, |
| 3028 object, heap()->isolate()); | 2952 heap()->isolate()); |
| 3029 size_ -= static_cast<int>(page->size()); | 2953 size_ -= static_cast<int>(page->size()); |
| 3030 objects_size_ -= object->Size(); | 2954 objects_size_ -= object->Size(); |
| 3031 page_count_--; | 2955 page_count_--; |
| 3032 | 2956 |
| 3033 // Remove entries belonging to this page. | 2957 // Remove entries belonging to this page. |
| 3034 // Use variable alignment to help pass length check (<= 80 characters) | 2958 // Use variable alignment to help pass length check (<= 80 characters) |
| 3035 // of single line in tools/presubmit.py. | 2959 // of single line in tools/presubmit.py. |
| 3036 const intptr_t alignment = MemoryChunk::kAlignment; | 2960 const intptr_t alignment = MemoryChunk::kAlignment; |
| 3037 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment; | 2961 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
| 3038 uintptr_t limit = base + (page->size()-1)/alignment; | 2962 uintptr_t limit = base + (page->size() - 1) / alignment; |
| 3039 for (uintptr_t key = base; key <= limit; key++) { | 2963 for (uintptr_t key = base; key <= limit; key++) { |
| 3040 chunk_map_.Remove(reinterpret_cast<void*>(key), | 2964 chunk_map_.Remove(reinterpret_cast<void*>(key), |
| 3041 static_cast<uint32_t>(key)); | 2965 static_cast<uint32_t>(key)); |
| 3042 } | 2966 } |
| 3043 | 2967 |
| 3044 if (is_pointer_object) { | 2968 if (is_pointer_object) { |
| 3045 heap()->QueueMemoryChunkForFree(page); | 2969 heap()->QueueMemoryChunkForFree(page); |
| 3046 } else { | 2970 } else { |
| 3047 heap()->isolate()->memory_allocator()->Free(page); | 2971 heap()->isolate()->memory_allocator()->Free(page); |
| 3048 } | 2972 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3061 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); | 2985 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); |
| 3062 | 2986 |
| 3063 return owned; | 2987 return owned; |
| 3064 } | 2988 } |
| 3065 | 2989 |
| 3066 | 2990 |
| 3067 #ifdef VERIFY_HEAP | 2991 #ifdef VERIFY_HEAP |
| 3068 // We do not assume that the large object iterator works, because it depends | 2992 // We do not assume that the large object iterator works, because it depends |
| 3069 // on the invariants we are checking during verification. | 2993 // on the invariants we are checking during verification. |
| 3070 void LargeObjectSpace::Verify() { | 2994 void LargeObjectSpace::Verify() { |
| 3071 for (LargePage* chunk = first_page_; | 2995 for (LargePage* chunk = first_page_; chunk != NULL; |
| 3072 chunk != NULL; | |
| 3073 chunk = chunk->next_page()) { | 2996 chunk = chunk->next_page()) { |
| 3074 // Each chunk contains an object that starts at the large object page's | 2997 // Each chunk contains an object that starts at the large object page's |
| 3075 // object area start. | 2998 // object area start. |
| 3076 HeapObject* object = chunk->GetObject(); | 2999 HeapObject* object = chunk->GetObject(); |
| 3077 Page* page = Page::FromAddress(object->address()); | 3000 Page* page = Page::FromAddress(object->address()); |
| 3078 CHECK(object->address() == page->area_start()); | 3001 CHECK(object->address() == page->area_start()); |
| 3079 | 3002 |
| 3080 // The first word should be a map, and we expect all map pointers to be | 3003 // The first word should be a map, and we expect all map pointers to be |
| 3081 // in map space. | 3004 // in map space. |
| 3082 Map* map = object->map(); | 3005 Map* map = object->map(); |
| 3083 CHECK(map->IsMap()); | 3006 CHECK(map->IsMap()); |
| 3084 CHECK(heap()->map_space()->Contains(map)); | 3007 CHECK(heap()->map_space()->Contains(map)); |
| 3085 | 3008 |
| 3086 // We have only code, sequential strings, external strings | 3009 // We have only code, sequential strings, external strings |
| 3087 // (sequential strings that have been morphed into external | 3010 // (sequential strings that have been morphed into external |
| 3088 // strings), fixed arrays, byte arrays, and constant pool arrays in the | 3011 // strings), fixed arrays, byte arrays, and constant pool arrays in the |
| 3089 // large object space. | 3012 // large object space. |
| 3090 CHECK(object->IsCode() || object->IsSeqString() || | 3013 CHECK(object->IsCode() || object->IsSeqString() || |
| 3091 object->IsExternalString() || object->IsFixedArray() || | 3014 object->IsExternalString() || object->IsFixedArray() || |
| 3092 object->IsFixedDoubleArray() || object->IsByteArray() || | 3015 object->IsFixedDoubleArray() || object->IsByteArray() || |
| 3093 object->IsConstantPoolArray()); | 3016 object->IsConstantPoolArray()); |
| 3094 | 3017 |
| 3095 // The object itself should look OK. | 3018 // The object itself should look OK. |
| 3096 object->ObjectVerify(); | 3019 object->ObjectVerify(); |
| 3097 | 3020 |
| 3098 // Byte arrays and strings don't have interior pointers. | 3021 // Byte arrays and strings don't have interior pointers. |
| 3099 if (object->IsCode()) { | 3022 if (object->IsCode()) { |
| 3100 VerifyPointersVisitor code_visitor; | 3023 VerifyPointersVisitor code_visitor; |
| 3101 object->IterateBody(map->instance_type(), | 3024 object->IterateBody(map->instance_type(), object->Size(), &code_visitor); |
| 3102 object->Size(), | |
| 3103 &code_visitor); | |
| 3104 } else if (object->IsFixedArray()) { | 3025 } else if (object->IsFixedArray()) { |
| 3105 FixedArray* array = FixedArray::cast(object); | 3026 FixedArray* array = FixedArray::cast(object); |
| 3106 for (int j = 0; j < array->length(); j++) { | 3027 for (int j = 0; j < array->length(); j++) { |
| 3107 Object* element = array->get(j); | 3028 Object* element = array->get(j); |
| 3108 if (element->IsHeapObject()) { | 3029 if (element->IsHeapObject()) { |
| 3109 HeapObject* element_object = HeapObject::cast(element); | 3030 HeapObject* element_object = HeapObject::cast(element); |
| 3110 CHECK(heap()->Contains(element_object)); | 3031 CHECK(heap()->Contains(element_object)); |
| 3111 CHECK(element_object->map()->IsMap()); | 3032 CHECK(element_object->map()->IsMap()); |
| 3112 } | 3033 } |
| 3113 } | 3034 } |
| (...skipping 16 matching lines...) Expand all Loading... |
| 3130 void LargeObjectSpace::ReportStatistics() { | 3051 void LargeObjectSpace::ReportStatistics() { |
| 3131 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); | 3052 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); |
| 3132 int num_objects = 0; | 3053 int num_objects = 0; |
| 3133 ClearHistograms(heap()->isolate()); | 3054 ClearHistograms(heap()->isolate()); |
| 3134 LargeObjectIterator it(this); | 3055 LargeObjectIterator it(this); |
| 3135 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 3056 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3136 num_objects++; | 3057 num_objects++; |
| 3137 CollectHistogramInfo(obj); | 3058 CollectHistogramInfo(obj); |
| 3138 } | 3059 } |
| 3139 | 3060 |
| 3140 PrintF(" number of objects %d, " | 3061 PrintF( |
| 3141 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); | 3062 " number of objects %d, " |
| 3063 "size of objects %" V8_PTR_PREFIX "d\n", |
| 3064 num_objects, objects_size_); |
| 3142 if (num_objects > 0) ReportHistogram(heap()->isolate(), false); | 3065 if (num_objects > 0) ReportHistogram(heap()->isolate(), false); |
| 3143 } | 3066 } |
| 3144 | 3067 |
| 3145 | 3068 |
| 3146 void LargeObjectSpace::CollectCodeStatistics() { | 3069 void LargeObjectSpace::CollectCodeStatistics() { |
| 3147 Isolate* isolate = heap()->isolate(); | 3070 Isolate* isolate = heap()->isolate(); |
| 3148 LargeObjectIterator obj_it(this); | 3071 LargeObjectIterator obj_it(this); |
| 3149 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { | 3072 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 3150 if (obj->IsCode()) { | 3073 if (obj->IsCode()) { |
| 3151 Code* code = Code::cast(obj); | 3074 Code* code = Code::cast(obj); |
| 3152 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 3075 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 3153 } | 3076 } |
| 3154 } | 3077 } |
| 3155 } | 3078 } |
| 3156 | 3079 |
| 3157 | 3080 |
| 3158 void Page::Print() { | 3081 void Page::Print() { |
| 3159 // Make a best-effort to print the objects in the page. | 3082 // Make a best-effort to print the objects in the page. |
| 3160 PrintF("Page@%p in %s\n", | 3083 PrintF("Page@%p in %s\n", this->address(), |
| 3161 this->address(), | |
| 3162 AllocationSpaceName(this->owner()->identity())); | 3084 AllocationSpaceName(this->owner()->identity())); |
| 3163 printf(" --------------------------------------\n"); | 3085 printf(" --------------------------------------\n"); |
| 3164 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); | 3086 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); |
| 3165 unsigned mark_size = 0; | 3087 unsigned mark_size = 0; |
| 3166 for (HeapObject* object = objects.Next(); | 3088 for (HeapObject* object = objects.Next(); object != NULL; |
| 3167 object != NULL; | |
| 3168 object = objects.Next()) { | 3089 object = objects.Next()) { |
| 3169 bool is_marked = Marking::MarkBitFrom(object).Get(); | 3090 bool is_marked = Marking::MarkBitFrom(object).Get(); |
| 3170 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. | 3091 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. |
| 3171 if (is_marked) { | 3092 if (is_marked) { |
| 3172 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); | 3093 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); |
| 3173 } | 3094 } |
| 3174 object->ShortPrint(); | 3095 object->ShortPrint(); |
| 3175 PrintF("\n"); | 3096 PrintF("\n"); |
| 3176 } | 3097 } |
| 3177 printf(" --------------------------------------\n"); | 3098 printf(" --------------------------------------\n"); |
| 3178 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3099 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3179 } | 3100 } |
| 3180 | 3101 |
| 3181 #endif // DEBUG | 3102 #endif // DEBUG |
| 3182 | 3103 } |
| 3183 } } // namespace v8::internal | 3104 } // namespace v8::internal |
| OLD | NEW |