Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "liveobjectlist-inl.h" | 30 #include "liveobjectlist-inl.h" |
| 31 #include "macro-assembler.h" | 31 #include "macro-assembler.h" |
| 32 #include "mark-compact.h" | 32 #include "mark-compact.h" |
| 33 #include "platform.h" | 33 #include "platform.h" |
| 34 #include "snapshot.h" | |
| 34 | 35 |
| 35 namespace v8 { | 36 namespace v8 { |
| 36 namespace internal { | 37 namespace internal { |
| 37 | 38 |
| 38 | 39 |
| 39 // ---------------------------------------------------------------------------- | 40 // ---------------------------------------------------------------------------- |
| 40 // HeapObjectIterator | 41 // HeapObjectIterator |
| 41 | 42 |
| 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 43 // You can't actually iterate over the anchor page. It is not a real page, | 44 // You can't actually iterate over the anchor page. It is not a real page, |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 256 | 257 |
| 257 | 258 |
| 258 // ----------------------------------------------------------------------------- | 259 // ----------------------------------------------------------------------------- |
| 259 // MemoryAllocator | 260 // MemoryAllocator |
| 260 // | 261 // |
| 261 | 262 |
| 262 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 263 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 263 : isolate_(isolate), | 264 : isolate_(isolate), |
| 264 capacity_(0), | 265 capacity_(0), |
| 265 capacity_executable_(0), | 266 capacity_executable_(0), |
| 266 size_(0), | 267 memory_allocator_reserved_(0), |
| 267 size_executable_(0) { | 268 size_executable_(0) { |
| 268 } | 269 } |
| 269 | 270 |
| 270 | 271 |
| 271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | 272 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 272 capacity_ = RoundUp(capacity, Page::kPageSize); | 273 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 274 ASSERT_GE(capacity_, capacity_executable_); | 275 ASSERT_GE(capacity_, capacity_executable_); |
| 275 | 276 |
| 276 size_ = 0; | 277 memory_allocator_reserved_ = 0; |
| 277 size_executable_ = 0; | 278 size_executable_ = 0; |
| 278 | 279 |
| 279 return true; | 280 return true; |
| 280 } | 281 } |
| 281 | 282 |
| 282 | 283 |
| 283 void MemoryAllocator::TearDown() { | 284 void MemoryAllocator::TearDown() { |
| 284 // Check that spaces were torn down before MemoryAllocator. | 285 // Check that spaces were torn down before MemoryAllocator. |
| 285 ASSERT(size_ == 0); | 286 CHECK_EQ(memory_allocator_reserved_, 0); |
| 286 // TODO(gc) this will be true again when we fix FreeMemory. | 287 // TODO(gc) this will be true again when we fix FreeMemory. |
| 287 // ASSERT(size_executable_ == 0); | 288 // ASSERT(size_executable_ == 0); |
| 288 capacity_ = 0; | 289 capacity_ = 0; |
| 289 capacity_executable_ = 0; | 290 capacity_executable_ = 0; |
| 290 } | 291 } |
| 291 | 292 |
| 292 | 293 |
| 293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, | 294 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, |
| 294 Executability executable) { | 295 Executability executable) { |
| 295 // TODO(gc) make code_range part of memory allocator? | 296 // TODO(gc) make code_range part of memory allocator? |
| 296 ASSERT(reservation->IsReserved()); | 297 ASSERT(reservation->IsReserved()); |
| 297 size_t size = reservation->size(); | 298 size_t size = reservation->size(); |
| 298 ASSERT(size_ >= size); | 299 ASSERT(memory_allocator_reserved_ >= size); |
| 299 size_ -= size; | 300 memory_allocator_reserved_ -= size; |
| 300 | 301 |
| 301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 302 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 302 | 303 |
| 303 if (executable == EXECUTABLE) { | 304 if (executable == EXECUTABLE) { |
| 304 ASSERT(size_executable_ >= size); | 305 ASSERT(size_executable_ >= size); |
| 305 size_executable_ -= size; | 306 size_executable_ -= size; |
| 306 } | 307 } |
| 307 // Code which is part of the code-range does not have its own VirtualMemory. | 308 // Code which is part of the code-range does not have its own VirtualMemory. |
| 308 ASSERT(!isolate_->code_range()->contains( | 309 ASSERT(!isolate_->code_range()->contains( |
| 309 static_cast<Address>(reservation->address()))); | 310 static_cast<Address>(reservation->address()))); |
| 310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 311 reservation->Release(); | 312 reservation->Release(); |
| 312 } | 313 } |
| 313 | 314 |
| 314 | 315 |
| 315 void MemoryAllocator::FreeMemory(Address base, | 316 void MemoryAllocator::FreeMemory(Address base, |
| 316 size_t size, | 317 size_t size, |
| 317 Executability executable) { | 318 Executability executable) { |
| 318 // TODO(gc) make code_range part of memory allocator? | 319 // TODO(gc) make code_range part of memory allocator? |
| 319 ASSERT(size_ >= size); | 320 ASSERT(memory_allocator_reserved_ >= size); |
| 320 size_ -= size; | 321 memory_allocator_reserved_ -= size; |
| 321 | 322 |
| 322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 323 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 323 | 324 |
| 324 if (executable == EXECUTABLE) { | 325 if (executable == EXECUTABLE) { |
| 325 ASSERT(size_executable_ >= size); | 326 ASSERT(size_executable_ >= size); |
| 326 size_executable_ -= size; | 327 size_executable_ -= size; |
| 327 } | 328 } |
| 328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { | 329 if (isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 329 ASSERT(executable == EXECUTABLE); | 330 ASSERT(executable == EXECUTABLE); |
| 330 isolate_->code_range()->FreeRawMemory(base, size); | 331 isolate_->code_range()->FreeRawMemory(base, size); |
| 331 } else { | 332 } else { |
| 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 333 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 333 bool result = VirtualMemory::ReleaseRegion(base, size); | 334 bool result = VirtualMemory::ReleaseRegion(base, size); |
| 334 USE(result); | 335 USE(result); |
| 335 ASSERT(result); | 336 ASSERT(result); |
| 336 } | 337 } |
| 337 } | 338 } |
| 338 | 339 |
| 339 | 340 |
| 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 341 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 341 size_t alignment, | 342 size_t alignment, |
| 342 VirtualMemory* controller) { | 343 VirtualMemory* controller) { |
| 343 VirtualMemory reservation(size, alignment); | 344 VirtualMemory reservation(size, alignment); |
| 344 | 345 |
| 345 if (!reservation.IsReserved()) return NULL; | 346 if (!reservation.IsReserved()) return NULL; |
| 346 size_ += reservation.size(); | 347 memory_allocator_reserved_ += reservation.size(); |
| 347 Address base = RoundUp(static_cast<Address>(reservation.address()), | 348 Address base = RoundUp(static_cast<Address>(reservation.address()), |
| 348 alignment); | 349 alignment); |
| 349 controller->TakeControl(&reservation); | 350 controller->TakeControl(&reservation); |
| 350 return base; | 351 return base; |
| 351 } | 352 } |
| 352 | 353 |
| 353 | 354 |
| 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 356 size_t reserved_size, | |
| 355 size_t alignment, | 357 size_t alignment, |
| 356 Executability executable, | 358 Executability executable, |
| 357 VirtualMemory* controller) { | 359 VirtualMemory* controller) { |
| 360 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >= | |
| 361 RoundUp(size, OS::CommitPageSize())); | |
| 358 VirtualMemory reservation; | 362 VirtualMemory reservation; |
| 359 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 363 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation); |
| 360 if (base == NULL) return NULL; | 364 if (base == NULL) return NULL; |
| 361 if (!reservation.Commit(base, | 365 if (!reservation.Commit(base, |
| 362 size, | 366 size, |
| 363 executable == EXECUTABLE)) { | 367 executable == EXECUTABLE)) { |
| 364 return NULL; | 368 return NULL; |
| 365 } | 369 } |
| 366 controller->TakeControl(&reservation); | 370 controller->TakeControl(&reservation); |
| 367 return base; | 371 return base; |
| 368 } | 372 } |
| 369 | 373 |
| 370 | 374 |
| 371 void Page::InitializeAsAnchor(PagedSpace* owner) { | 375 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 372 set_owner(owner); | 376 set_owner(owner); |
| 373 set_prev_page(this); | 377 set_prev_page(this); |
| 374 set_next_page(this); | 378 set_next_page(this); |
| 375 } | 379 } |
| 376 | 380 |
| 377 | 381 |
| 382 void Page::CommitMore(intptr_t space_needed) { | |
| 383 intptr_t reserved_page_size = reservation_.IsReserved() ? | |
| 384 reservation_.size() : | |
| 385 Page::kPageSize; | |
| 386 ASSERT(size() < reserved_page_size); | |
| 387 intptr_t expand = Min(Max(size(), space_needed), reserved_page_size - size()); | |
| 388 // At least double the page size (this also rounds to OS page size). | |
| 389 expand = Min(reserved_page_size - size(), | |
| 390 RoundUpToPowerOf2(size() + expand) - size()); | |
| 391 ASSERT(expand <= kPageSize - size()); | |
| 392 ASSERT(expand <= reserved_page_size - size()); | |
| 393 Executability executable = | |
| 394 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | |
| 395 Address old_end = ObjectAreaEnd(); | |
| 396 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return; | |
| 397 | |
| 398 set_size(size() + expand); | |
| 399 | |
| 400 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner()); | |
| 401 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping( | |
| 402 paged_space, | |
| 403 old_end, | |
| 404 0, // No new memory was reserved. | |
| 405 expand, // New memory committed. | |
| 406 executable); | |
| 407 paged_space->IncreaseCapacity(expand); | |
| 408 | |
| 409 // In map space we have to align the expanded area with the correct map | |
| 410 // alignment. | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
Code below does not mention map space at all and i
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 411 uintptr_t end_int = old_end - ObjectAreaStart(); | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:20:53
variable name is wrong. it's size, not end.
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 412 uintptr_t aligned_end_int = | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:20:53
ditto
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 413 end_int - end_int % paged_space->ObjectAlignment(); | |
| 414 if (aligned_end_int < end_int) { | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:20:53
!= instead of < for readability
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 415 aligned_end_int += paged_space->ObjectAlignment(); | |
| 416 } | |
| 417 Address new_area = | |
| 418 reinterpret_cast<Address>(ObjectAreaStart() + aligned_end_int); | |
| 419 // This will waste the space for one map per doubling of the page size until | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
Code is generic and does not reference map space d
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 420 // the next GC. | |
| 421 paged_space->AddToFreeLists(old_end, new_area - old_end); | |
| 422 | |
| 423 expand -= (new_area - old_end); | |
| 424 | |
| 425 paged_space->AddToFreeLists(new_area, expand); | |
| 426 } | |
| 427 | |
| 428 | |
| 378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 429 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
| 379 Address start, | 430 Address start, |
| 380 SemiSpace* semi_space) { | 431 SemiSpace* semi_space) { |
| 381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 432 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
| 382 start, | 433 start, |
| 383 Page::kPageSize, | 434 Page::kPageSize, |
| 384 NOT_EXECUTABLE, | 435 NOT_EXECUTABLE, |
| 385 semi_space); | 436 semi_space); |
| 386 chunk->set_next_chunk(NULL); | 437 chunk->set_next_chunk(NULL); |
| 387 chunk->set_prev_chunk(NULL); | 438 chunk->set_prev_chunk(NULL); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 453 ClearFlag(SCAN_ON_SCAVENGE); | 504 ClearFlag(SCAN_ON_SCAVENGE); |
| 454 } | 505 } |
| 455 next_chunk_->prev_chunk_ = prev_chunk_; | 506 next_chunk_->prev_chunk_ = prev_chunk_; |
| 456 prev_chunk_->next_chunk_ = next_chunk_; | 507 prev_chunk_->next_chunk_ = next_chunk_; |
| 457 prev_chunk_ = NULL; | 508 prev_chunk_ = NULL; |
| 458 next_chunk_ = NULL; | 509 next_chunk_ = NULL; |
| 459 } | 510 } |
| 460 | 511 |
| 461 | 512 |
| 462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 513 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 514 intptr_t committed_body_size, | |
| 463 Executability executable, | 515 Executability executable, |
| 464 Space* owner) { | 516 Space* owner) { |
| 465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 517 ASSERT(body_size >= committed_body_size); |
| 518 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size, | |
| 519 OS::CommitPageSize()); | |
| 520 intptr_t committed_chunk_size = | |
| 521 committed_body_size + MemoryChunk::kObjectStartOffset; | |
| 522 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize()); | |
| 466 Heap* heap = isolate_->heap(); | 523 Heap* heap = isolate_->heap(); |
| 467 Address base = NULL; | 524 Address base = NULL; |
| 468 VirtualMemory reservation; | 525 VirtualMemory reservation; |
| 469 if (executable == EXECUTABLE) { | 526 if (executable == EXECUTABLE) { |
| 470 // Check executable memory limit. | 527 // Check executable memory limit. |
| 471 if (size_executable_ + chunk_size > capacity_executable_) { | 528 if (size_executable_ + chunk_size > capacity_executable_) { |
| 472 LOG(isolate_, | 529 LOG(isolate_, |
| 473 StringEvent("MemoryAllocator::AllocateRawMemory", | 530 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 474 "V8 Executable Allocation capacity exceeded")); | 531 "V8 Executable Allocation capacity exceeded")); |
| 475 return NULL; | 532 return NULL; |
| 476 } | 533 } |
| 477 | 534 |
| 478 // Allocate executable memory either from code range or from the | 535 // Allocate executable memory either from code range or from the |
| 479 // OS. | 536 // OS. |
| 480 if (isolate_->code_range()->exists()) { | 537 if (isolate_->code_range()->exists()) { |
| 481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 538 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| 482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 539 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 483 MemoryChunk::kAlignment)); | 540 MemoryChunk::kAlignment)); |
| 484 if (base == NULL) return NULL; | 541 if (base == NULL) return NULL; |
| 485 size_ += chunk_size; | 542 // The AllocateAlignedMemory method will update the memory allocator |
| 486 // Update executable memory size. | 543 // memory used, but we are not using that if we have a code range, so |
| 487 size_executable_ += chunk_size; | 544 // we update it here. |
| 545 memory_allocator_reserved_ += chunk_size; | |
| 488 } else { | 546 } else { |
| 489 base = AllocateAlignedMemory(chunk_size, | 547 base = AllocateAlignedMemory(committed_chunk_size, |
| 548 chunk_size, | |
| 490 MemoryChunk::kAlignment, | 549 MemoryChunk::kAlignment, |
| 491 executable, | 550 executable, |
| 492 &reservation); | 551 &reservation); |
| 493 if (base == NULL) return NULL; | 552 if (base == NULL) return NULL; |
| 494 // Update executable memory size. | |
| 495 size_executable_ += reservation.size(); | |
| 496 } | 553 } |
| 497 } else { | 554 } else { |
| 498 base = AllocateAlignedMemory(chunk_size, | 555 base = AllocateAlignedMemory(committed_chunk_size, |
| 556 chunk_size, | |
| 499 MemoryChunk::kAlignment, | 557 MemoryChunk::kAlignment, |
| 500 executable, | 558 executable, |
| 501 &reservation); | 559 &reservation); |
| 502 | 560 |
| 503 if (base == NULL) return NULL; | 561 if (base == NULL) return NULL; |
| 504 } | 562 } |
| 505 | 563 |
| 506 #ifdef DEBUG | 564 AllocationBookkeeping( |
| 507 ZapBlock(base, chunk_size); | 565 owner, base, chunk_size, committed_chunk_size, executable); |
| 508 #endif | |
| 509 isolate_->counters()->memory_allocated()-> | |
| 510 Increment(static_cast<int>(chunk_size)); | |
| 511 | |
| 512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | |
| 513 if (owner != NULL) { | |
| 514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
| 515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | |
| 516 } | |
| 517 | 566 |
| 518 MemoryChunk* result = MemoryChunk::Initialize(heap, | 567 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 519 base, | 568 base, |
| 520 chunk_size, | 569 committed_chunk_size, |
| 521 executable, | 570 executable, |
| 522 owner); | 571 owner); |
| 523 result->set_reserved_memory(&reservation); | 572 result->set_reserved_memory(&reservation); |
| 524 return result; | 573 return result; |
| 525 } | 574 } |
| 526 | 575 |
| 527 | 576 |
| 528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, | 577 void MemoryAllocator::AllocationBookkeeping(Space* owner, |
| 578 Address base, | |
| 579 intptr_t reserved_chunk_size, | |
| 580 intptr_t committed_chunk_size, | |
| 581 Executability executable) { | |
| 582 if (executable == EXECUTABLE) { | |
| 583 // Update executable memory size. | |
| 584 size_executable_ += reserved_chunk_size; | |
| 585 } | |
| 586 | |
| 587 #ifdef DEBUG | |
| 588 ZapBlock(base, committed_chunk_size); | |
| 589 #endif | |
| 590 isolate_->counters()->memory_allocated()-> | |
| 591 Increment(static_cast<int>(committed_chunk_size)); | |
| 592 | |
| 593 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size)); | |
| 594 if (owner != NULL) { | |
| 595 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
| 596 PerformAllocationCallback( | |
| 597 space, kAllocationActionAllocate, committed_chunk_size); | |
| 598 } | |
| 599 } | |
| 600 | |
| 601 | |
| 602 Page* MemoryAllocator::AllocatePage(intptr_t object_area_size, | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
object_area_size should really be comitted_object_
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 603 PagedSpace* owner, | |
| 529 Executability executable) { | 604 Executability executable) { |
| 530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); | 605 ASSERT(object_area_size <= Page::kObjectAreaSize); |
| 606 | |
| 607 MemoryChunk* chunk = | |
| 608 AllocateChunk(Page::kObjectAreaSize, object_area_size, executable, owner); | |
| 531 | 609 |
| 532 if (chunk == NULL) return NULL; | 610 if (chunk == NULL) return NULL; |
| 533 | 611 |
| 534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 612 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 535 } | 613 } |
| 536 | 614 |
| 537 | 615 |
| 538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 616 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 539 Executability executable, | 617 Executability executable, |
| 540 Space* owner) { | 618 Space* owner) { |
| 541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 619 MemoryChunk* chunk = |
| 620 AllocateChunk(object_size, object_size, executable, owner); | |
| 542 if (chunk == NULL) return NULL; | 621 if (chunk == NULL) return NULL; |
| 543 return LargePage::Initialize(isolate_->heap(), chunk); | 622 return LargePage::Initialize(isolate_->heap(), chunk); |
| 544 } | 623 } |
| 545 | 624 |
| 546 | 625 |
| 547 void MemoryAllocator::Free(MemoryChunk* chunk) { | 626 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 627 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 549 if (chunk->owner() != NULL) { | 628 if (chunk->owner() != NULL) { |
| 550 ObjectSpace space = | 629 ObjectSpace space = |
| 551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 630 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 631 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 553 } | 632 } |
| 554 | 633 |
| 555 delete chunk->slots_buffer(); | 634 delete chunk->slots_buffer(); |
| 556 delete chunk->skip_list(); | 635 delete chunk->skip_list(); |
| 557 | 636 |
| 558 VirtualMemory* reservation = chunk->reserved_memory(); | 637 VirtualMemory* reservation = chunk->reserved_memory(); |
| 559 if (reservation->IsReserved()) { | 638 if (reservation->IsReserved()) { |
| 560 FreeMemory(reservation, chunk->executable()); | 639 FreeMemory(reservation, chunk->executable()); |
| 561 } else { | 640 } else { |
| 641 // When we do not have a reservation that is because this allocation | |
| 642 // is part of the huge reserved chunk of memory reserved for code on | |
| 643 // x64. In that case the size was rounded up to the page size on | |
| 644 // allocation so we do the same now when freeing. | |
| 562 FreeMemory(chunk->address(), | 645 FreeMemory(chunk->address(), |
| 563 chunk->size(), | 646 RoundUp(chunk->size(), Page::kPageSize), |
| 564 chunk->executable()); | 647 chunk->executable()); |
| 565 } | 648 } |
| 566 } | 649 } |
| 567 | 650 |
| 568 | 651 |
| 569 bool MemoryAllocator::CommitBlock(Address start, | 652 bool MemoryAllocator::CommitBlock(Address start, |
| 570 size_t size, | 653 size_t size, |
| 571 Executability executable) { | 654 Executability executable) { |
| 572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; | 655 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; |
| 573 #ifdef DEBUG | 656 #ifdef DEBUG |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 633 memory_allocation_callbacks_.Remove(i); | 716 memory_allocation_callbacks_.Remove(i); |
| 634 return; | 717 return; |
| 635 } | 718 } |
| 636 } | 719 } |
| 637 UNREACHABLE(); | 720 UNREACHABLE(); |
| 638 } | 721 } |
| 639 | 722 |
| 640 | 723 |
| 641 #ifdef DEBUG | 724 #ifdef DEBUG |
| 642 void MemoryAllocator::ReportStatistics() { | 725 void MemoryAllocator::ReportStatistics() { |
| 643 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 726 float pct = |
| 727 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_; | |
| 644 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 728 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 645 ", used: %" V8_PTR_PREFIX "d" | 729 ", used: %" V8_PTR_PREFIX "d" |
| 646 ", available: %%%d\n\n", | 730 ", available: %%%d\n\n", |
| 647 capacity_, size_, static_cast<int>(pct*100)); | 731 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100)); |
| 648 } | 732 } |
| 649 #endif | 733 #endif |
| 650 | 734 |
| 651 // ----------------------------------------------------------------------------- | 735 // ----------------------------------------------------------------------------- |
| 652 // PagedSpace implementation | 736 // PagedSpace implementation |
| 653 | 737 |
| 654 PagedSpace::PagedSpace(Heap* heap, | 738 PagedSpace::PagedSpace(Heap* heap, |
| 655 intptr_t max_capacity, | 739 intptr_t max_capacity, |
| 656 AllocationSpace id, | 740 AllocationSpace id, |
| 657 Executability executable) | 741 Executability executable) |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 705 Address next = cur + obj->Size(); | 789 Address next = cur + obj->Size(); |
| 706 if ((cur <= addr) && (addr < next)) return obj; | 790 if ((cur <= addr) && (addr < next)) return obj; |
| 707 } | 791 } |
| 708 | 792 |
| 709 UNREACHABLE(); | 793 UNREACHABLE(); |
| 710 return Failure::Exception(); | 794 return Failure::Exception(); |
| 711 } | 795 } |
| 712 | 796 |
| 713 bool PagedSpace::CanExpand() { | 797 bool PagedSpace::CanExpand() { |
| 714 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 798 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
| 715 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | |
| 716 | 799 |
| 717 if (Capacity() == max_capacity_) return false; | 800 if (Capacity() == max_capacity_) return false; |
| 718 | 801 |
| 719 ASSERT(Capacity() < max_capacity_); | 802 ASSERT(Capacity() < max_capacity_); |
| 720 | 803 |
| 721 // Are we going to exceed capacity for this space? | 804 // Are we going to exceed capacity for this space? |
| 722 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 805 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 723 | 806 |
| 724 return true; | 807 return true; |
| 725 } | 808 } |
| 726 | 809 |
| 727 bool PagedSpace::Expand() { | 810 bool PagedSpace::Expand(intptr_t size_in_bytes) { |
| 728 if (!CanExpand()) return false; | 811 if (!CanExpand()) return false; |
| 729 | 812 |
| 813 Page* last_page = anchor_.prev_page(); | |
| 814 if (last_page != &anchor_) { | |
| 815 // We have have run out of linear allocation space. This may be because | |
| 816 // the most recently allocated page (stored last in the list) is a small | |
| 817 // one, that starts on a page aligned boundary, but has not a full kPageSize | |
| 818 // of committed memory. Let's commit more memory for the page. | |
| 819 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ? | |
| 820 last_page->reserved_memory()->size() : | |
| 821 Page::kPageSize; | |
| 822 if (last_page->size() < reserved_page_size && | |
| 823 reserved_page_size - last_page->size() >= size_in_bytes && | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
I feel uncomfortable when arithmetic expression is
Erik Corry
2012/01/17 11:37:22
Done.
| |
| 824 !last_page->IsEvacuationCandidate() && | |
| 825 last_page->WasSwept()) { | |
| 826 last_page->CommitMore(size_in_bytes); | |
| 827 return true; | |
| 828 } | |
| 829 } | |
| 830 | |
| 831 // We initially only commit a part of the page, but the deserialization | |
| 832 // of the initial snapshot makes the assumption that it can deserialize | |
| 833 // into linear memory of a certain size per space, so some of the spaces | |
| 834 // need to have a little more committed memory. | |
| 835 int initial = Max(OS::CommitPageSize(), kMinimumSpaceSizes[identity()]); | |
| 836 | |
| 837 ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize); | |
| 838 | |
| 839 intptr_t expansion_size = | |
| 840 Max(initial, | |
| 841 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) - | |
| 842 MemoryChunk::kObjectStartOffset; | |
| 843 | |
| 730 Page* p = heap()->isolate()->memory_allocator()-> | 844 Page* p = heap()->isolate()->memory_allocator()-> |
| 731 AllocatePage(this, executable()); | 845 AllocatePage(expansion_size, this, executable()); |
| 732 if (p == NULL) return false; | 846 if (p == NULL) return false; |
| 733 | 847 |
| 734 ASSERT(Capacity() <= max_capacity_); | 848 ASSERT(Capacity() <= max_capacity_); |
| 735 | 849 |
| 736 p->InsertAfter(anchor_.prev_page()); | 850 p->InsertAfter(anchor_.prev_page()); |
| 737 | 851 |
| 738 return true; | 852 return true; |
| 739 } | 853 } |
| 740 | 854 |
| 741 | 855 |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 764 if (page->WasSwept()) { | 878 if (page->WasSwept()) { |
| 765 intptr_t size = free_list_.EvictFreeListItems(page); | 879 intptr_t size = free_list_.EvictFreeListItems(page); |
| 766 accounting_stats_.AllocateBytes(size); | 880 accounting_stats_.AllocateBytes(size); |
| 767 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); | 881 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); |
| 768 } | 882 } |
| 769 | 883 |
| 770 if (Page::FromAllocationTop(allocation_info_.top) == page) { | 884 if (Page::FromAllocationTop(allocation_info_.top) == page) { |
| 771 allocation_info_.top = allocation_info_.limit = NULL; | 885 allocation_info_.top = allocation_info_.limit = NULL; |
| 772 } | 886 } |
| 773 | 887 |
| 888 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart(); | |
| 889 | |
| 774 page->Unlink(); | 890 page->Unlink(); |
| 775 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 891 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 776 heap()->isolate()->memory_allocator()->Free(page); | 892 heap()->isolate()->memory_allocator()->Free(page); |
| 777 } else { | 893 } else { |
| 778 heap()->QueueMemoryChunkForFree(page); | 894 heap()->QueueMemoryChunkForFree(page); |
| 779 } | 895 } |
| 780 | 896 |
| 781 ASSERT(Capacity() > 0); | 897 ASSERT(Capacity() > 0); |
| 782 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 898 accounting_stats_.ShrinkSpace(size); |
| 783 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); | |
| 784 } | 899 } |
| 785 | 900 |
| 786 | 901 |
| 787 void PagedSpace::ReleaseAllUnusedPages() { | 902 void PagedSpace::ReleaseAllUnusedPages() { |
| 788 PageIterator it(this); | 903 PageIterator it(this); |
| 789 while (it.has_next()) { | 904 while (it.has_next()) { |
| 790 Page* page = it.next(); | 905 Page* page = it.next(); |
| 791 if (!page->WasSwept()) { | 906 if (!page->WasSwept()) { |
| 792 if (page->LiveBytes() == 0) ReleasePage(page); | 907 if (page->LiveBytes() == 0) ReleasePage(page); |
| 793 } else { | 908 } else { |
| (...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1651 // Free lists for old object spaces implementation | 1766 // Free lists for old object spaces implementation |
| 1652 | 1767 |
| 1653 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 1768 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| 1654 ASSERT(size_in_bytes > 0); | 1769 ASSERT(size_in_bytes > 0); |
| 1655 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1770 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1656 | 1771 |
| 1657 // We write a map and possibly size information to the block. If the block | 1772 // We write a map and possibly size information to the block. If the block |
| 1658 // is big enough to be a FreeSpace with at least one extra word (the next | 1773 // is big enough to be a FreeSpace with at least one extra word (the next |
| 1659 // pointer), we set its map to be the free space map and its size to an | 1774 // pointer), we set its map to be the free space map and its size to an |
| 1660 // appropriate array length for the desired size from HeapObject::Size(). | 1775 // appropriate array length for the desired size from HeapObject::Size(). |
| 1661 // If the block is too small (eg, one or two words), to hold both a size | 1776 // If the block is too small (e.g. one or two words), to hold both a size |
| 1662 // field and a next pointer, we give it a filler map that gives it the | 1777 // field and a next pointer, we give it a filler map that gives it the |
| 1663 // correct size. | 1778 // correct size. |
| 1664 if (size_in_bytes > FreeSpace::kHeaderSize) { | 1779 if (size_in_bytes > FreeSpace::kHeaderSize) { |
| 1665 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); | 1780 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); |
| 1666 // Can't use FreeSpace::cast because it fails during deserialization. | 1781 // Can't use FreeSpace::cast because it fails during deserialization. |
| 1667 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | 1782 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
| 1668 this_as_free_space->set_size(size_in_bytes); | 1783 this_as_free_space->set_size(size_in_bytes); |
| 1669 } else if (size_in_bytes == kPointerSize) { | 1784 } else if (size_in_bytes == kPointerSize) { |
| 1670 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); | 1785 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); |
| 1671 } else if (size_in_bytes == 2 * kPointerSize) { | 1786 } else if (size_in_bytes == 2 * kPointerSize) { |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1755 } else { | 1870 } else { |
| 1756 node->set_next(huge_list_); | 1871 node->set_next(huge_list_); |
| 1757 huge_list_ = node; | 1872 huge_list_ = node; |
| 1758 } | 1873 } |
| 1759 available_ += size_in_bytes; | 1874 available_ += size_in_bytes; |
| 1760 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1875 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1761 return 0; | 1876 return 0; |
| 1762 } | 1877 } |
| 1763 | 1878 |
| 1764 | 1879 |
| 1765 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { | 1880 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, |
| 1881 int* node_size, | |
| 1882 int minimum_size) { | |
| 1766 FreeListNode* node = *list; | 1883 FreeListNode* node = *list; |
| 1767 | 1884 |
| 1768 if (node == NULL) return NULL; | 1885 if (node == NULL) return NULL; |
| 1769 | 1886 |
| 1887 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map()); | |
| 1888 | |
| 1770 while (node != NULL && | 1889 while (node != NULL && |
| 1771 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | 1890 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
| 1772 available_ -= node->Size(); | 1891 available_ -= node->Size(); |
| 1773 node = node->next(); | 1892 node = node->next(); |
| 1774 } | 1893 } |
| 1775 | 1894 |
| 1776 if (node != NULL) { | 1895 if (node == NULL) { |
| 1777 *node_size = node->Size(); | |
| 1778 *list = node->next(); | |
| 1779 } else { | |
| 1780 *list = NULL; | 1896 *list = NULL; |
| 1897 return NULL; | |
| 1781 } | 1898 } |
| 1782 | 1899 |
| 1900 // Gets the size without checking the map. When we are booting we have | |
| 1901 // a FreeListNode before we have created its map. | |
| 1902 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size(); | |
| 1903 | |
| 1904 // We don't search the list for one that fits, preferring to look in the | |
| 1905 // list of larger nodes, but we do check the first in the list, because | |
| 1906 // if we had to expand the space or page we may have placed an entry that | |
| 1907 // was just long enough at the head of one of the lists. | |
| 1908 if (size < minimum_size) return NULL; | |
| 1909 | |
| 1910 *node_size = size; | |
| 1911 available_ -= size; | |
| 1912 *list = node->next(); | |
| 1913 | |
| 1783 return node; | 1914 return node; |
| 1784 } | 1915 } |
| 1785 | 1916 |
| 1786 | 1917 |
| 1787 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { | 1918 FreeListNode* FreeList::FindAbuttingNode( |
| 1919 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) { | |
| 1920 FreeListNode* first_node = *list_head; | |
| 1921 if (first_node != NULL && | |
| 1922 first_node->address() == limit && | |
| 1923 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes && | |
| 1924 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) { | |
| 1925 FreeListNode* answer = first_node; | |
| 1926 int size = reinterpret_cast<FreeSpace*>(first_node)->Size(); | |
| 1927 available_ -= size; | |
| 1928 *node_size = size; | |
| 1929 *list_head = first_node->next(); | |
| 1930 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1931 return answer; | |
| 1932 } | |
| 1933 return NULL; | |
| 1934 } | |
| 1935 | |
| 1936 | |
| 1937 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, | |
| 1938 int* node_size, | |
| 1939 Address limit) { | |
| 1788 FreeListNode* node = NULL; | 1940 FreeListNode* node = NULL; |
| 1789 | 1941 |
| 1790 if (size_in_bytes <= kSmallAllocationMax) { | 1942 if (limit != NULL) { |
| 1791 node = PickNodeFromList(&small_list_, node_size); | 1943 // We may have a memory area at the head of the free list, which abuts the |
| 1944 // old linear allocation area. This happens if the linear allocation area | |
| 1945 // has been shortened to allow an incremental marking step to be performed. | |
| 1946 // In that case we prefer to return the free memory area that is contiguous | |
| 1947 // with the old linear allocation area. | |
| 1948 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_); | |
| 1949 if (node != NULL) return node; | |
| 1950 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_); | |
| 1792 if (node != NULL) return node; | 1951 if (node != NULL) return node; |
| 1793 } | 1952 } |
| 1794 | 1953 |
| 1795 if (size_in_bytes <= kMediumAllocationMax) { | 1954 node = PickNodeFromList(&small_list_, node_size, size_in_bytes); |
| 1796 node = PickNodeFromList(&medium_list_, node_size); | 1955 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1956 if (node != NULL) return node; | |
| 1957 | |
| 1958 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes); | |
| 1959 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1960 if (node != NULL) return node; | |
| 1961 | |
| 1962 node = PickNodeFromList(&large_list_, node_size, size_in_bytes); | |
| 1963 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1964 if (node != NULL) return node; | |
| 1965 | |
| 1966 // The tricky third clause in this for statement is due to the fact that | |
| 1967 // PickNodeFromList can cut pages out of the list if they are unavailable for | |
| 1968 // new allocation (e.g. if they are on a page that has been scheduled for | |
| 1969 // evacuation). | |
| 1970 for (FreeListNode** cur = &huge_list_; | |
| 1971 *cur != NULL; | |
| 1972 cur = (*cur) == NULL ? cur : (*cur)->next_address()) { | |
|
Vyacheslav Egorov (Chromium)
2012/01/16 17:02:54
*cur == NULL ? NULL : ...
for better readability.
Erik Corry
2012/01/17 07:48:12
It may be more readable, but it will also crash :-
| |
| 1973 node = PickNodeFromList(cur, node_size, size_in_bytes); | |
| 1974 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
| 1797 if (node != NULL) return node; | 1975 if (node != NULL) return node; |
| 1798 } | 1976 } |
| 1799 | 1977 |
| 1800 if (size_in_bytes <= kLargeAllocationMax) { | |
| 1801 node = PickNodeFromList(&large_list_, node_size); | |
| 1802 if (node != NULL) return node; | |
| 1803 } | |
| 1804 | |
| 1805 for (FreeListNode** cur = &huge_list_; | |
| 1806 *cur != NULL; | |
| 1807 cur = (*cur)->next_address()) { | |
| 1808 FreeListNode* cur_node = *cur; | |
| 1809 while (cur_node != NULL && | |
| 1810 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | |
| 1811 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); | |
| 1812 cur_node = cur_node->next(); | |
| 1813 } | |
| 1814 | |
| 1815 *cur = cur_node; | |
| 1816 if (cur_node == NULL) break; | |
| 1817 | |
| 1818 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); | |
| 1819 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | |
| 1820 int size = cur_as_free_space->Size(); | |
| 1821 if (size >= size_in_bytes) { | |
| 1822 // Large enough node found. Unlink it from the list. | |
| 1823 node = *cur; | |
| 1824 *node_size = size; | |
| 1825 *cur = node->next(); | |
| 1826 break; | |
| 1827 } | |
| 1828 } | |
| 1829 | |
| 1830 return node; | 1978 return node; |
| 1831 } | 1979 } |
| 1832 | 1980 |
| 1833 | 1981 |
| 1834 // Allocation on the old space free list. If it succeeds then a new linear | 1982 // Allocation on the old space free list. If it succeeds then a new linear |
| 1835 // allocation space has been set up with the top and limit of the space. If | 1983 // allocation space has been set up with the top and limit of the space. If |
| 1836 // the allocation fails then NULL is returned, and the caller can perform a GC | 1984 // the allocation fails then NULL is returned, and the caller can perform a GC |
| 1837 // or allocate a new page before retrying. | 1985 // or allocate a new page before retrying. |
| 1838 HeapObject* FreeList::Allocate(int size_in_bytes) { | 1986 HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 1839 ASSERT(0 < size_in_bytes); | 1987 ASSERT(0 < size_in_bytes); |
| 1840 ASSERT(size_in_bytes <= kMaxBlockSize); | 1988 ASSERT(size_in_bytes <= kMaxBlockSize); |
| 1841 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1989 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1842 // Don't free list allocate if there is linear space available. | 1990 // Don't free list allocate if there is linear space available. |
| 1843 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); | 1991 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
| 1844 | 1992 |
| 1845 int new_node_size = 0; | 1993 int new_node_size = 0; |
| 1846 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 1994 FreeListNode* new_node = |
| 1995 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit()); | |
| 1847 if (new_node == NULL) return NULL; | 1996 if (new_node == NULL) return NULL; |
| 1848 | 1997 |
| 1849 available_ -= new_node_size; | 1998 if (new_node->address() == owner_->limit()) { |
| 1999 // The new freelist node we were given is an extension of the one we had | |
| 2000 // last. This is a common thing to happen when we extend a small page by | |
| 2001 // committing more memory. In this case we just add the new node to the | |
| 2002 // linear allocation area and recurse. | |
| 2003 owner_->Allocate(new_node_size); | |
| 2004 owner_->SetTop(owner_->top(), new_node->address() + new_node_size); | |
| 2005 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes); | |
| 2006 Object* answer; | |
| 2007 if (!allocation->ToObject(&answer)) return NULL; | |
| 2008 return HeapObject::cast(answer); | |
| 2009 } | |
| 2010 | |
| 1850 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 2011 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1851 | 2012 |
| 1852 int bytes_left = new_node_size - size_in_bytes; | 2013 int bytes_left = new_node_size - size_in_bytes; |
| 1853 ASSERT(bytes_left >= 0); | 2014 ASSERT(bytes_left >= 0); |
| 1854 | 2015 |
| 1855 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2016 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 1856 // Mark the old linear allocation area with a free space map so it can be | 2017 // Mark the old linear allocation area with a free space map so it can be |
| 1857 // skipped when scanning the heap. This also puts it back in the free list | 2018 // skipped when scanning the heap. This also puts it back in the free list |
| 1858 // if it is big enough. | 2019 // if it is big enough. |
| 1859 owner_->Free(owner_->top(), old_linear_size); | 2020 if (old_linear_size != 0) { |
| 2021 owner_->AddToFreeLists(owner_->top(), old_linear_size); | |
| 2022 } | |
| 1860 | 2023 |
| 1861 #ifdef DEBUG | 2024 #ifdef DEBUG |
| 1862 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2025 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 1863 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); | 2026 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); |
| 1864 } | 2027 } |
| 1865 #endif | 2028 #endif |
| 1866 | 2029 |
| 1867 owner_->heap()->incremental_marking()->OldSpaceStep( | 2030 owner_->heap()->incremental_marking()->OldSpaceStep( |
| 1868 size_in_bytes - old_linear_size); | 2031 size_in_bytes - old_linear_size); |
| 1869 | 2032 |
| 1870 // The old-space-step might have finished sweeping and restarted marking. | 2033 // The old-space-step might have finished sweeping and restarted marking. |
| 1871 // Verify that it did not turn the page of the new node into an evacuation | 2034 // Verify that it did not turn the page of the new node into an evacuation |
| 1872 // candidate. | 2035 // candidate. |
| 1873 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2036 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 1874 | 2037 |
| 1875 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2038 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 1876 | 2039 |
| 1877 // Memory in the linear allocation area is counted as allocated. We may free | 2040 // Memory in the linear allocation area is counted as allocated. We may free |
| 1878 // a little of this again immediately - see below. | 2041 // a little of this again immediately - see below. |
| 1879 owner_->Allocate(new_node_size); | 2042 owner_->Allocate(new_node_size); |
| 1880 | 2043 |
| 1881 if (bytes_left > kThreshold && | 2044 if (bytes_left > kThreshold && |
| 1882 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2045 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| 1883 FLAG_incremental_marking_steps) { | 2046 FLAG_incremental_marking_steps) { |
| 1884 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2047 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 1885 // We don't want to give too large linear areas to the allocator while | 2048 // We don't want to give too large linear areas to the allocator while |
| 1886 // incremental marking is going on, because we won't check again whether | 2049 // incremental marking is going on, because we won't check again whether |
| 1887 // we want to do another increment until the linear area is used up. | 2050 // we want to do another increment until the linear area is used up. |
| 1888 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2051 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size, |
| 1889 new_node_size - size_in_bytes - linear_size); | 2052 new_node_size - size_in_bytes - linear_size); |
| 1890 owner_->SetTop(new_node->address() + size_in_bytes, | 2053 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1891 new_node->address() + size_in_bytes + linear_size); | 2054 new_node->address() + size_in_bytes + linear_size); |
| 1892 } else if (bytes_left > 0) { | 2055 } else if (bytes_left > 0) { |
| 1893 // Normally we give the rest of the node to the allocator as its new | 2056 // Normally we give the rest of the node to the allocator as its new |
| 1894 // linear allocation area. | 2057 // linear allocation area. |
| 1895 owner_->SetTop(new_node->address() + size_in_bytes, | 2058 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1896 new_node->address() + new_node_size); | 2059 new_node->address() + new_node_size); |
| 1897 } else { | 2060 } else { |
| 2061 ASSERT(bytes_left == 0); | |
| 1898 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2062 // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 1899 // are zero. | 2063 // are zero. |
| 1900 owner_->SetTop(NULL, NULL); | 2064 owner_->SetTop(NULL, NULL); |
| 1901 } | 2065 } |
| 1902 | 2066 |
| 1903 return new_node; | 2067 return new_node; |
| 1904 } | 2068 } |
| 1905 | 2069 |
| 1906 | 2070 |
| 1907 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { | 2071 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2020 // or because we have lowered the limit in order to get periodic incremental | 2184 // or because we have lowered the limit in order to get periodic incremental |
| 2021 // marking. The most reliable way to ensure that there is linear space is | 2185 // marking. The most reliable way to ensure that there is linear space is |
| 2022 // to do the allocation, then rewind the limit. | 2186 // to do the allocation, then rewind the limit. |
| 2023 ASSERT(bytes <= InitialCapacity()); | 2187 ASSERT(bytes <= InitialCapacity()); |
| 2024 MaybeObject* maybe = AllocateRaw(bytes); | 2188 MaybeObject* maybe = AllocateRaw(bytes); |
| 2025 Object* object = NULL; | 2189 Object* object = NULL; |
| 2026 if (!maybe->ToObject(&object)) return false; | 2190 if (!maybe->ToObject(&object)) return false; |
| 2027 HeapObject* allocation = HeapObject::cast(object); | 2191 HeapObject* allocation = HeapObject::cast(object); |
| 2028 Address top = allocation_info_.top; | 2192 Address top = allocation_info_.top; |
| 2029 if ((top - bytes) == allocation->address()) { | 2193 if ((top - bytes) == allocation->address()) { |
| 2030 allocation_info_.top = allocation->address(); | 2194 Address new_top = allocation->address(); |
| 2195 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart()); | |
| 2196 allocation_info_.top = new_top; | |
| 2031 return true; | 2197 return true; |
| 2032 } | 2198 } |
| 2033 // There may be a borderline case here where the allocation succeeded, but | 2199 // There may be a borderline case here where the allocation succeeded, but |
| 2034 // the limit and top have moved on to a new page. In that case we try again. | 2200 // the limit and top have moved on to a new page. In that case we try again. |
| 2035 return ReserveSpace(bytes); | 2201 return ReserveSpace(bytes); |
| 2036 } | 2202 } |
| 2037 | 2203 |
| 2038 | 2204 |
| 2039 void PagedSpace::PrepareForMarkCompact() { | 2205 void PagedSpace::PrepareForMarkCompact() { |
| 2040 // We don't have a linear allocation area while sweeping. It will be restored | 2206 // We don't have a linear allocation area while sweeping. It will be restored |
| 2041 // on the first allocation after the sweep. | 2207 // on the first allocation after the sweep. |
| 2042 // Mark the old linear allocation area with a free space map so it can be | 2208 // Mark the old linear allocation area with a free space map so it can be |
| 2043 // skipped when scanning the heap. | 2209 // skipped when scanning the heap. |
| 2044 int old_linear_size = static_cast<int>(limit() - top()); | 2210 int old_linear_size = static_cast<int>(limit() - top()); |
| 2045 Free(top(), old_linear_size); | 2211 AddToFreeLists(top(), old_linear_size); |
| 2046 SetTop(NULL, NULL); | 2212 SetTop(NULL, NULL); |
| 2047 | 2213 |
| 2048 // Stop lazy sweeping and clear marking bits for unswept pages. | 2214 // Stop lazy sweeping and clear marking bits for unswept pages. |
| 2049 if (first_unswept_page_ != NULL) { | 2215 if (first_unswept_page_ != NULL) { |
| 2050 Page* p = first_unswept_page_; | 2216 Page* p = first_unswept_page_; |
| 2051 do { | 2217 do { |
| 2052 // Do not use ShouldBeSweptLazily predicate here. | 2218 // Do not use ShouldBeSweptLazily predicate here. |
| 2053 // New evacuation candidates were selected but they still have | 2219 // New evacuation candidates were selected but they still have |
| 2054 // to be swept before collection starts. | 2220 // to be swept before collection starts. |
| 2055 if (!p->WasSwept()) { | 2221 if (!p->WasSwept()) { |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 2078 if (new_top <= allocation_info_.limit) return true; | 2244 if (new_top <= allocation_info_.limit) return true; |
| 2079 | 2245 |
| 2080 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | 2246 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 2081 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2247 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 2082 if (new_area == NULL) return false; | 2248 if (new_area == NULL) return false; |
| 2083 | 2249 |
| 2084 int old_linear_size = static_cast<int>(limit() - top()); | 2250 int old_linear_size = static_cast<int>(limit() - top()); |
| 2085 // Mark the old linear allocation area with a free space so it can be | 2251 // Mark the old linear allocation area with a free space so it can be |
| 2086 // skipped when scanning the heap. This also puts it back in the free list | 2252 // skipped when scanning the heap. This also puts it back in the free list |
| 2087 // if it is big enough. | 2253 // if it is big enough. |
| 2088 Free(top(), old_linear_size); | 2254 AddToFreeLists(top(), old_linear_size); |
| 2089 | 2255 |
| 2090 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2256 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 2091 Allocate(size_in_bytes); | 2257 // The AddToFreeLists call above will reduce the size of the space in the |
| 2258 // allocation stats. We don't need to add this linear area to the size | |
| 2259 // with an Allocate(size_in_bytes) call here, because the | |
| 2260 // free_list_.Allocate() call above already accounted for this memory. | |
| 2092 return true; | 2261 return true; |
| 2093 } | 2262 } |
| 2094 | 2263 |
| 2095 | 2264 |
| 2096 // You have to call this last, since the implementation from PagedSpace | 2265 // You have to call this last, since the implementation from PagedSpace |
| 2097 // doesn't know that memory was 'promised' to large object space. | 2266 // doesn't know that memory was 'promised' to large object space. |
| 2098 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2267 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 2099 return heap()->OldGenerationSpaceAvailable() >= bytes; | 2268 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 2100 } | 2269 } |
| 2101 | 2270 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2162 | 2331 |
| 2163 // Free list allocation failed and there is no next page. Fail if we have | 2332 // Free list allocation failed and there is no next page. Fail if we have |
| 2164 // hit the old generation size limit that should cause a garbage | 2333 // hit the old generation size limit that should cause a garbage |
| 2165 // collection. | 2334 // collection. |
| 2166 if (!heap()->always_allocate() && | 2335 if (!heap()->always_allocate() && |
| 2167 heap()->OldGenerationAllocationLimitReached()) { | 2336 heap()->OldGenerationAllocationLimitReached()) { |
| 2168 return NULL; | 2337 return NULL; |
| 2169 } | 2338 } |
| 2170 | 2339 |
| 2171 // Try to expand the space and allocate in the new next page. | 2340 // Try to expand the space and allocate in the new next page. |
| 2172 if (Expand()) { | 2341 if (Expand(size_in_bytes)) { |
| 2173 return free_list_.Allocate(size_in_bytes); | 2342 return free_list_.Allocate(size_in_bytes); |
| 2174 } | 2343 } |
| 2175 | 2344 |
| 2176 // Last ditch, sweep all the remaining pages to try to find space. This may | 2345 // Last ditch, sweep all the remaining pages to try to find space. This may |
| 2177 // cause a pause. | 2346 // cause a pause. |
| 2178 if (!IsSweepingComplete()) { | 2347 if (!IsSweepingComplete()) { |
| 2179 AdvanceSweeper(kMaxInt); | 2348 AdvanceSweeper(kMaxInt); |
| 2180 | 2349 |
| 2181 // Retry the free list allocation. | 2350 // Retry the free list allocation. |
| 2182 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2351 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| (...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2523 if (previous == NULL) { | 2692 if (previous == NULL) { |
| 2524 first_page_ = current; | 2693 first_page_ = current; |
| 2525 } else { | 2694 } else { |
| 2526 previous->set_next_page(current); | 2695 previous->set_next_page(current); |
| 2527 } | 2696 } |
| 2528 | 2697 |
| 2529 // Free the chunk. | 2698 // Free the chunk. |
| 2530 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2699 heap()->mark_compact_collector()->ReportDeleteIfNeeded( |
| 2531 object, heap()->isolate()); | 2700 object, heap()->isolate()); |
| 2532 size_ -= static_cast<int>(page->size()); | 2701 size_ -= static_cast<int>(page->size()); |
| 2702 ASSERT(size_ >= 0); | |
| 2533 objects_size_ -= object->Size(); | 2703 objects_size_ -= object->Size(); |
| 2534 page_count_--; | 2704 page_count_--; |
| 2535 | 2705 |
| 2536 if (is_pointer_object) { | 2706 if (is_pointer_object) { |
| 2537 heap()->QueueMemoryChunkForFree(page); | 2707 heap()->QueueMemoryChunkForFree(page); |
| 2538 } else { | 2708 } else { |
| 2539 heap()->isolate()->memory_allocator()->Free(page); | 2709 heap()->isolate()->memory_allocator()->Free(page); |
| 2540 } | 2710 } |
| 2541 } | 2711 } |
| 2542 } | 2712 } |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2661 object->ShortPrint(); | 2831 object->ShortPrint(); |
| 2662 PrintF("\n"); | 2832 PrintF("\n"); |
| 2663 } | 2833 } |
| 2664 printf(" --------------------------------------\n"); | 2834 printf(" --------------------------------------\n"); |
| 2665 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2835 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2666 } | 2836 } |
| 2667 | 2837 |
| 2668 #endif // DEBUG | 2838 #endif // DEBUG |
| 2669 | 2839 |
| 2670 } } // namespace v8::internal | 2840 } } // namespace v8::internal |
| OLD | NEW |