Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(219)

Side by Side Diff: src/spaces.cc

Issue 9178014: Revert 10413-10416 initial memory use reduction due to (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 13 matching lines...) Expand all
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "liveobjectlist-inl.h" 30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h" 31 #include "macro-assembler.h"
32 #include "mark-compact.h" 32 #include "mark-compact.h"
33 #include "platform.h" 33 #include "platform.h"
34 #include "snapshot.h"
35 34
36 namespace v8 { 35 namespace v8 {
37 namespace internal { 36 namespace internal {
38 37
39 38
40 // ---------------------------------------------------------------------------- 39 // ----------------------------------------------------------------------------
41 // HeapObjectIterator 40 // HeapObjectIterator
42 41
43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
44 // You can't actually iterate over the anchor page. It is not a real page, 43 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
257 256
258 257
259 // ----------------------------------------------------------------------------- 258 // -----------------------------------------------------------------------------
260 // MemoryAllocator 259 // MemoryAllocator
261 // 260 //
262 261
263 MemoryAllocator::MemoryAllocator(Isolate* isolate) 262 MemoryAllocator::MemoryAllocator(Isolate* isolate)
264 : isolate_(isolate), 263 : isolate_(isolate),
265 capacity_(0), 264 capacity_(0),
266 capacity_executable_(0), 265 capacity_executable_(0),
267 memory_allocator_reserved_(0), 266 size_(0),
268 size_executable_(0) { 267 size_executable_(0) {
269 } 268 }
270 269
271 270
272 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { 271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
273 capacity_ = RoundUp(capacity, Page::kPageSize); 272 capacity_ = RoundUp(capacity, Page::kPageSize);
274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
275 ASSERT_GE(capacity_, capacity_executable_); 274 ASSERT_GE(capacity_, capacity_executable_);
276 275
277 memory_allocator_reserved_ = 0; 276 size_ = 0;
278 size_executable_ = 0; 277 size_executable_ = 0;
279 278
280 return true; 279 return true;
281 } 280 }
282 281
283 282
284 void MemoryAllocator::TearDown() { 283 void MemoryAllocator::TearDown() {
285 // Check that spaces were torn down before MemoryAllocator. 284 // Check that spaces were torn down before MemoryAllocator.
286 CHECK_EQ(memory_allocator_reserved_, 0); 285 ASSERT(size_ == 0);
287 // TODO(gc) this will be true again when we fix FreeMemory. 286 // TODO(gc) this will be true again when we fix FreeMemory.
288 // ASSERT(size_executable_ == 0); 287 // ASSERT(size_executable_ == 0);
289 capacity_ = 0; 288 capacity_ = 0;
290 capacity_executable_ = 0; 289 capacity_executable_ = 0;
291 } 290 }
292 291
293 292
294 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
295 Executability executable) { 294 Executability executable) {
296 // TODO(gc) make code_range part of memory allocator? 295 // TODO(gc) make code_range part of memory allocator?
297 ASSERT(reservation->IsReserved()); 296 ASSERT(reservation->IsReserved());
298 size_t size = reservation->size(); 297 size_t size = reservation->size();
299 ASSERT(memory_allocator_reserved_ >= size); 298 ASSERT(size_ >= size);
300 memory_allocator_reserved_ -= size; 299 size_ -= size;
301 300
302 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
303 302
304 if (executable == EXECUTABLE) { 303 if (executable == EXECUTABLE) {
305 ASSERT(size_executable_ >= size); 304 ASSERT(size_executable_ >= size);
306 size_executable_ -= size; 305 size_executable_ -= size;
307 } 306 }
308 // Code which is part of the code-range does not have its own VirtualMemory. 307 // Code which is part of the code-range does not have its own VirtualMemory.
309 ASSERT(!isolate_->code_range()->contains( 308 ASSERT(!isolate_->code_range()->contains(
310 static_cast<Address>(reservation->address()))); 309 static_cast<Address>(reservation->address())));
311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
312 reservation->Release(); 311 reservation->Release();
313 } 312 }
314 313
315 314
316 void MemoryAllocator::FreeMemory(Address base, 315 void MemoryAllocator::FreeMemory(Address base,
317 size_t size, 316 size_t size,
318 Executability executable) { 317 Executability executable) {
319 // TODO(gc) make code_range part of memory allocator? 318 // TODO(gc) make code_range part of memory allocator?
320 ASSERT(memory_allocator_reserved_ >= size); 319 ASSERT(size_ >= size);
321 memory_allocator_reserved_ -= size; 320 size_ -= size;
322 321
323 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
324 323
325 if (executable == EXECUTABLE) { 324 if (executable == EXECUTABLE) {
326 ASSERT(size_executable_ >= size); 325 ASSERT(size_executable_ >= size);
327 size_executable_ -= size; 326 size_executable_ -= size;
328 } 327 }
329 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 328 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
330 ASSERT(executable == EXECUTABLE); 329 ASSERT(executable == EXECUTABLE);
331 isolate_->code_range()->FreeRawMemory(base, size); 330 isolate_->code_range()->FreeRawMemory(base, size);
332 } else { 331 } else {
333 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
334 bool result = VirtualMemory::ReleaseRegion(base, size); 333 bool result = VirtualMemory::ReleaseRegion(base, size);
335 USE(result); 334 USE(result);
336 ASSERT(result); 335 ASSERT(result);
337 } 336 }
338 } 337 }
339 338
340 339
341 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
342 size_t alignment, 341 size_t alignment,
343 VirtualMemory* controller) { 342 VirtualMemory* controller) {
344 VirtualMemory reservation(size, alignment); 343 VirtualMemory reservation(size, alignment);
345 344
346 if (!reservation.IsReserved()) return NULL; 345 if (!reservation.IsReserved()) return NULL;
347 memory_allocator_reserved_ += reservation.size(); 346 size_ += reservation.size();
348 Address base = RoundUp(static_cast<Address>(reservation.address()), 347 Address base = RoundUp(static_cast<Address>(reservation.address()),
349 alignment); 348 alignment);
350 controller->TakeControl(&reservation); 349 controller->TakeControl(&reservation);
351 return base; 350 return base;
352 } 351 }
353 352
354 353
355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
356 size_t reserved_size,
357 size_t alignment, 355 size_t alignment,
358 Executability executable, 356 Executability executable,
359 VirtualMemory* controller) { 357 VirtualMemory* controller) {
360 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
361 RoundUp(size, OS::CommitPageSize()));
362 VirtualMemory reservation; 358 VirtualMemory reservation;
363 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation); 359 Address base = ReserveAlignedMemory(size, alignment, &reservation);
364 if (base == NULL) return NULL; 360 if (base == NULL) return NULL;
365 if (!reservation.Commit(base, 361 if (!reservation.Commit(base,
366 size, 362 size,
367 executable == EXECUTABLE)) { 363 executable == EXECUTABLE)) {
368 return NULL; 364 return NULL;
369 } 365 }
370 controller->TakeControl(&reservation); 366 controller->TakeControl(&reservation);
371 return base; 367 return base;
372 } 368 }
373 369
374 370
375 void Page::InitializeAsAnchor(PagedSpace* owner) { 371 void Page::InitializeAsAnchor(PagedSpace* owner) {
376 set_owner(owner); 372 set_owner(owner);
377 set_prev_page(this); 373 set_prev_page(this);
378 set_next_page(this); 374 set_next_page(this);
379 } 375 }
380 376
381 377
382 void Page::CommitMore(intptr_t space_needed) {
383 intptr_t reserved_page_size = reservation_.IsReserved() ?
384 reservation_.size() :
385 Page::kPageSize;
386 ASSERT(size() < reserved_page_size);
387 intptr_t expand = Min(Max(size(), space_needed), reserved_page_size - size());
388 // At least double the page size (this also rounds to OS page size).
389 expand = Min(reserved_page_size - size(),
390 RoundUpToPowerOf2(size() + expand) - size());
391 ASSERT(expand <= kPageSize - size());
392 ASSERT(expand <= reserved_page_size - size());
393 Executability executable =
394 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
395 Address old_end = ObjectAreaEnd();
396 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
397
398 set_size(size() + expand);
399
400 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
401 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
402 paged_space,
403 old_end,
404 0, // No new memory was reserved.
405 expand, // New memory committed.
406 executable);
407 paged_space->IncreaseCapacity(expand);
408
409 // In spaces with alignment requirements (e.g. map space) we have to align
410 // the expanded area with the correct object alignment.
411 uintptr_t object_area_size = old_end - ObjectAreaStart();
412 uintptr_t aligned_object_area_size =
413 object_area_size - object_area_size % paged_space->ObjectAlignment();
414 if (aligned_object_area_size != object_area_size) {
415 aligned_object_area_size += paged_space->ObjectAlignment();
416 }
417 Address new_area =
418 reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
419 // In spaces with alignment requirements, this will waste the space for one
420 // object per doubling of the page size until the next GC.
421 paged_space->AddToFreeLists(old_end, new_area - old_end);
422
423 expand -= (new_area - old_end);
424
425 paged_space->AddToFreeLists(new_area, expand);
426 }
427
428
429 NewSpacePage* NewSpacePage::Initialize(Heap* heap, 378 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
430 Address start, 379 Address start,
431 SemiSpace* semi_space) { 380 SemiSpace* semi_space) {
432 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 381 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
433 start, 382 start,
434 Page::kPageSize, 383 Page::kPageSize,
435 NOT_EXECUTABLE, 384 NOT_EXECUTABLE,
436 semi_space); 385 semi_space);
437 chunk->set_next_chunk(NULL); 386 chunk->set_next_chunk(NULL);
438 chunk->set_prev_chunk(NULL); 387 chunk->set_prev_chunk(NULL);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 ClearFlag(SCAN_ON_SCAVENGE); 453 ClearFlag(SCAN_ON_SCAVENGE);
505 } 454 }
506 next_chunk_->prev_chunk_ = prev_chunk_; 455 next_chunk_->prev_chunk_ = prev_chunk_;
507 prev_chunk_->next_chunk_ = next_chunk_; 456 prev_chunk_->next_chunk_ = next_chunk_;
508 prev_chunk_ = NULL; 457 prev_chunk_ = NULL;
509 next_chunk_ = NULL; 458 next_chunk_ = NULL;
510 } 459 }
511 460
512 461
513 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
514 intptr_t committed_body_size,
515 Executability executable, 463 Executability executable,
516 Space* owner) { 464 Space* owner) {
517 ASSERT(body_size >= committed_body_size); 465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
518 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
519 OS::CommitPageSize());
520 intptr_t committed_chunk_size =
521 committed_body_size + MemoryChunk::kObjectStartOffset;
522 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
523 Heap* heap = isolate_->heap(); 466 Heap* heap = isolate_->heap();
524 Address base = NULL; 467 Address base = NULL;
525 VirtualMemory reservation; 468 VirtualMemory reservation;
526 if (executable == EXECUTABLE) { 469 if (executable == EXECUTABLE) {
527 // Check executable memory limit. 470 // Check executable memory limit.
528 if (size_executable_ + chunk_size > capacity_executable_) { 471 if (size_executable_ + chunk_size > capacity_executable_) {
529 LOG(isolate_, 472 LOG(isolate_,
530 StringEvent("MemoryAllocator::AllocateRawMemory", 473 StringEvent("MemoryAllocator::AllocateRawMemory",
531 "V8 Executable Allocation capacity exceeded")); 474 "V8 Executable Allocation capacity exceeded"));
532 return NULL; 475 return NULL;
533 } 476 }
534 477
535 // Allocate executable memory either from code range or from the 478 // Allocate executable memory either from code range or from the
536 // OS. 479 // OS.
537 if (isolate_->code_range()->exists()) { 480 if (isolate_->code_range()->exists()) {
538 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
539 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
540 MemoryChunk::kAlignment)); 483 MemoryChunk::kAlignment));
541 if (base == NULL) return NULL; 484 if (base == NULL) return NULL;
542 // The AllocateAlignedMemory method will update the memory allocator 485 size_ += chunk_size;
543 // memory used, but we are not using that if we have a code range, so 486 // Update executable memory size.
544 // we update it here. 487 size_executable_ += chunk_size;
545 memory_allocator_reserved_ += chunk_size;
546 } else { 488 } else {
547 base = AllocateAlignedMemory(committed_chunk_size, 489 base = AllocateAlignedMemory(chunk_size,
548 chunk_size,
549 MemoryChunk::kAlignment, 490 MemoryChunk::kAlignment,
550 executable, 491 executable,
551 &reservation); 492 &reservation);
552 if (base == NULL) return NULL; 493 if (base == NULL) return NULL;
494 // Update executable memory size.
495 size_executable_ += reservation.size();
553 } 496 }
554 } else { 497 } else {
555 base = AllocateAlignedMemory(committed_chunk_size, 498 base = AllocateAlignedMemory(chunk_size,
556 chunk_size,
557 MemoryChunk::kAlignment, 499 MemoryChunk::kAlignment,
558 executable, 500 executable,
559 &reservation); 501 &reservation);
560 502
561 if (base == NULL) return NULL; 503 if (base == NULL) return NULL;
562 } 504 }
563 505
564 AllocationBookkeeping( 506 #ifdef DEBUG
565 owner, base, chunk_size, committed_chunk_size, executable); 507 ZapBlock(base, chunk_size);
508 #endif
509 isolate_->counters()->memory_allocated()->
510 Increment(static_cast<int>(chunk_size));
511
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
513 if (owner != NULL) {
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
516 }
566 517
567 MemoryChunk* result = MemoryChunk::Initialize(heap, 518 MemoryChunk* result = MemoryChunk::Initialize(heap,
568 base, 519 base,
569 committed_chunk_size, 520 chunk_size,
570 executable, 521 executable,
571 owner); 522 owner);
572 result->set_reserved_memory(&reservation); 523 result->set_reserved_memory(&reservation);
573 return result; 524 return result;
574 } 525 }
575 526
576 527
577 void MemoryAllocator::AllocationBookkeeping(Space* owner, 528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
578 Address base,
579 intptr_t reserved_chunk_size,
580 intptr_t committed_chunk_size,
581 Executability executable) {
582 if (executable == EXECUTABLE) {
583 // Update executable memory size.
584 size_executable_ += reserved_chunk_size;
585 }
586
587 #ifdef DEBUG
588 ZapBlock(base, committed_chunk_size);
589 #endif
590 isolate_->counters()->memory_allocated()->
591 Increment(static_cast<int>(committed_chunk_size));
592
593 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
594 if (owner != NULL) {
595 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
596 PerformAllocationCallback(
597 space, kAllocationActionAllocate, committed_chunk_size);
598 }
599 }
600
601
602 Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
603 PagedSpace* owner,
604 Executability executable) { 529 Executability executable) {
605 ASSERT(committed_object_area_size <= Page::kObjectAreaSize); 530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
606
607 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
608 committed_object_area_size,
609 executable,
610 owner);
611 531
612 if (chunk == NULL) return NULL; 532 if (chunk == NULL) return NULL;
613 533
614 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 534 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
615 } 535 }
616 536
617 537
618 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
619 Executability executable, 539 Executability executable,
620 Space* owner) { 540 Space* owner) {
621 MemoryChunk* chunk = 541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
622 AllocateChunk(object_size, object_size, executable, owner);
623 if (chunk == NULL) return NULL; 542 if (chunk == NULL) return NULL;
624 return LargePage::Initialize(isolate_->heap(), chunk); 543 return LargePage::Initialize(isolate_->heap(), chunk);
625 } 544 }
626 545
627 546
628 void MemoryAllocator::Free(MemoryChunk* chunk) { 547 void MemoryAllocator::Free(MemoryChunk* chunk) {
629 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
630 if (chunk->owner() != NULL) { 549 if (chunk->owner() != NULL) {
631 ObjectSpace space = 550 ObjectSpace space =
632 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 551 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
633 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
634 } 553 }
635 554
636 delete chunk->slots_buffer(); 555 delete chunk->slots_buffer();
637 delete chunk->skip_list(); 556 delete chunk->skip_list();
638 557
639 VirtualMemory* reservation = chunk->reserved_memory(); 558 VirtualMemory* reservation = chunk->reserved_memory();
640 if (reservation->IsReserved()) { 559 if (reservation->IsReserved()) {
641 FreeMemory(reservation, chunk->executable()); 560 FreeMemory(reservation, chunk->executable());
642 } else { 561 } else {
643 // When we do not have a reservation that is because this allocation
644 // is part of the huge reserved chunk of memory reserved for code on
645 // x64. In that case the size was rounded up to the page size on
646 // allocation so we do the same now when freeing.
647 FreeMemory(chunk->address(), 562 FreeMemory(chunk->address(),
648 RoundUp(chunk->size(), Page::kPageSize), 563 chunk->size(),
649 chunk->executable()); 564 chunk->executable());
650 } 565 }
651 } 566 }
652 567
653 568
654 bool MemoryAllocator::CommitBlock(Address start, 569 bool MemoryAllocator::CommitBlock(Address start,
655 size_t size, 570 size_t size,
656 Executability executable) { 571 Executability executable) {
657 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
658 #ifdef DEBUG 573 #ifdef DEBUG
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
718 memory_allocation_callbacks_.Remove(i); 633 memory_allocation_callbacks_.Remove(i);
719 return; 634 return;
720 } 635 }
721 } 636 }
722 UNREACHABLE(); 637 UNREACHABLE();
723 } 638 }
724 639
725 640
726 #ifdef DEBUG 641 #ifdef DEBUG
727 void MemoryAllocator::ReportStatistics() { 642 void MemoryAllocator::ReportStatistics() {
728 float pct = 643 float pct = static_cast<float>(capacity_ - size_) / capacity_;
729 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
730 PrintF(" capacity: %" V8_PTR_PREFIX "d" 644 PrintF(" capacity: %" V8_PTR_PREFIX "d"
731 ", used: %" V8_PTR_PREFIX "d" 645 ", used: %" V8_PTR_PREFIX "d"
732 ", available: %%%d\n\n", 646 ", available: %%%d\n\n",
733 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100)); 647 capacity_, size_, static_cast<int>(pct*100));
734 } 648 }
735 #endif 649 #endif
736 650
737 // ----------------------------------------------------------------------------- 651 // -----------------------------------------------------------------------------
738 // PagedSpace implementation 652 // PagedSpace implementation
739 653
740 PagedSpace::PagedSpace(Heap* heap, 654 PagedSpace::PagedSpace(Heap* heap,
741 intptr_t max_capacity, 655 intptr_t max_capacity,
742 AllocationSpace id, 656 AllocationSpace id,
743 Executability executable) 657 Executability executable)
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
791 Address next = cur + obj->Size(); 705 Address next = cur + obj->Size();
792 if ((cur <= addr) && (addr < next)) return obj; 706 if ((cur <= addr) && (addr < next)) return obj;
793 } 707 }
794 708
795 UNREACHABLE(); 709 UNREACHABLE();
796 return Failure::Exception(); 710 return Failure::Exception();
797 } 711 }
798 712
799 bool PagedSpace::CanExpand() { 713 bool PagedSpace::CanExpand() {
800 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 714 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
715 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
801 716
802 if (Capacity() == max_capacity_) return false; 717 if (Capacity() == max_capacity_) return false;
803 718
804 ASSERT(Capacity() < max_capacity_); 719 ASSERT(Capacity() < max_capacity_);
805 720
806 // Are we going to exceed capacity for this space? 721 // Are we going to exceed capacity for this space?
807 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 722 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
808 723
809 return true; 724 return true;
810 } 725 }
811 726
812 bool PagedSpace::Expand(intptr_t size_in_bytes) { 727 bool PagedSpace::Expand() {
813 if (!CanExpand()) return false; 728 if (!CanExpand()) return false;
814 729
815 Page* last_page = anchor_.prev_page();
816 if (last_page != &anchor_) {
817 // We have run out of linear allocation space. This may be because the
818 // most recently allocated page (stored last in the list) is a small one,
819 // that starts on a page aligned boundary, but has not a full kPageSize of
820 // committed memory. Let's commit more memory for the page.
821 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
822 last_page->reserved_memory()->size() :
823 Page::kPageSize;
824 if (last_page->size() < reserved_page_size &&
825 (reserved_page_size - last_page->size()) >= size_in_bytes &&
826 !last_page->IsEvacuationCandidate() &&
827 last_page->WasSwept()) {
828 last_page->CommitMore(size_in_bytes);
829 return true;
830 }
831 }
832
833 // We initially only commit a part of the page, but the deserialization
834 // of the initial snapshot makes the assumption that it can deserialize
835 // into linear memory of a certain size per space, so some of the spaces
836 // need to have a little more committed memory.
837 int initial = Max(OS::CommitPageSize(), kMinimumSpaceSizes[identity()]);
838
839 ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
840
841 intptr_t expansion_size =
842 Max(initial,
843 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
844 MemoryChunk::kObjectStartOffset;
845
846 Page* p = heap()->isolate()->memory_allocator()-> 730 Page* p = heap()->isolate()->memory_allocator()->
847 AllocatePage(expansion_size, this, executable()); 731 AllocatePage(this, executable());
848 if (p == NULL) return false; 732 if (p == NULL) return false;
849 733
850 ASSERT(Capacity() <= max_capacity_); 734 ASSERT(Capacity() <= max_capacity_);
851 735
852 p->InsertAfter(anchor_.prev_page()); 736 p->InsertAfter(anchor_.prev_page());
853 737
854 return true; 738 return true;
855 } 739 }
856 740
857 741
(...skipping 22 matching lines...) Expand all
880 if (page->WasSwept()) { 764 if (page->WasSwept()) {
881 intptr_t size = free_list_.EvictFreeListItems(page); 765 intptr_t size = free_list_.EvictFreeListItems(page);
882 accounting_stats_.AllocateBytes(size); 766 accounting_stats_.AllocateBytes(size);
883 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); 767 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
884 } 768 }
885 769
886 if (Page::FromAllocationTop(allocation_info_.top) == page) { 770 if (Page::FromAllocationTop(allocation_info_.top) == page) {
887 allocation_info_.top = allocation_info_.limit = NULL; 771 allocation_info_.top = allocation_info_.limit = NULL;
888 } 772 }
889 773
890 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
891
892 page->Unlink(); 774 page->Unlink();
893 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 775 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
894 heap()->isolate()->memory_allocator()->Free(page); 776 heap()->isolate()->memory_allocator()->Free(page);
895 } else { 777 } else {
896 heap()->QueueMemoryChunkForFree(page); 778 heap()->QueueMemoryChunkForFree(page);
897 } 779 }
898 780
899 ASSERT(Capacity() > 0); 781 ASSERT(Capacity() > 0);
900 accounting_stats_.ShrinkSpace(size); 782 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
783 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
901 } 784 }
902 785
903 786
904 void PagedSpace::ReleaseAllUnusedPages() { 787 void PagedSpace::ReleaseAllUnusedPages() {
905 PageIterator it(this); 788 PageIterator it(this);
906 while (it.has_next()) { 789 while (it.has_next()) {
907 Page* page = it.next(); 790 Page* page = it.next();
908 if (!page->WasSwept()) { 791 if (!page->WasSwept()) {
909 if (page->LiveBytes() == 0) ReleasePage(page); 792 if (page->LiveBytes() == 0) ReleasePage(page);
910 } else { 793 } else {
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
1019 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ 902 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1020 promoted_histogram_[name].set_name(#name); 903 promoted_histogram_[name].set_name(#name);
1021 INSTANCE_TYPE_LIST(SET_NAME) 904 INSTANCE_TYPE_LIST(SET_NAME)
1022 #undef SET_NAME 905 #undef SET_NAME
1023 906
1024 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); 907 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1025 ASSERT(static_cast<intptr_t>(chunk_size_) >= 908 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1026 2 * heap()->ReservedSemiSpaceSize()); 909 2 * heap()->ReservedSemiSpaceSize());
1027 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); 910 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1028 911
1029 to_space_.SetUp(chunk_base_, 912 if (!to_space_.SetUp(chunk_base_,
1030 initial_semispace_capacity, 913 initial_semispace_capacity,
1031 maximum_semispace_capacity); 914 maximum_semispace_capacity)) {
1032 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, 915 return false;
1033 initial_semispace_capacity, 916 }
1034 maximum_semispace_capacity); 917 if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1035 if (!to_space_.Commit()) { 918 initial_semispace_capacity,
919 maximum_semispace_capacity)) {
1036 return false; 920 return false;
1037 } 921 }
1038 922
1039 start_ = chunk_base_; 923 start_ = chunk_base_;
1040 address_mask_ = ~(2 * reserved_semispace_capacity - 1); 924 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1041 object_mask_ = address_mask_ | kHeapObjectTagMask; 925 object_mask_ = address_mask_ | kHeapObjectTagMask;
1042 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; 926 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1043 927
1044 ResetAllocationInfo(); 928 ResetAllocationInfo();
1045 929
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
1258 ASSERT_EQ(from_space_.id(), kFromSpace); 1142 ASSERT_EQ(from_space_.id(), kFromSpace);
1259 ASSERT_EQ(to_space_.id(), kToSpace); 1143 ASSERT_EQ(to_space_.id(), kToSpace);
1260 from_space_.Verify(); 1144 from_space_.Verify();
1261 to_space_.Verify(); 1145 to_space_.Verify();
1262 } 1146 }
1263 #endif 1147 #endif
1264 1148
1265 // ----------------------------------------------------------------------------- 1149 // -----------------------------------------------------------------------------
1266 // SemiSpace implementation 1150 // SemiSpace implementation
1267 1151
1268 void SemiSpace::SetUp(Address start, 1152 bool SemiSpace::SetUp(Address start,
1269 int initial_capacity, 1153 int initial_capacity,
1270 int maximum_capacity) { 1154 int maximum_capacity) {
1271 // Creates a space in the young generation. The constructor does not 1155 // Creates a space in the young generation. The constructor does not
1272 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of 1156 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1273 // memory of size 'capacity' when set up, and does not grow or shrink 1157 // memory of size 'capacity' when set up, and does not grow or shrink
1274 // otherwise. In the mark-compact collector, the memory region of the from 1158 // otherwise. In the mark-compact collector, the memory region of the from
1275 // space is used as the marking stack. It requires contiguous memory 1159 // space is used as the marking stack. It requires contiguous memory
1276 // addresses. 1160 // addresses.
1277 ASSERT(maximum_capacity >= Page::kPageSize); 1161 ASSERT(maximum_capacity >= Page::kPageSize);
1278 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); 1162 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1279 capacity_ = initial_capacity; 1163 capacity_ = initial_capacity;
1280 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); 1164 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1281 committed_ = false; 1165 committed_ = false;
1282 start_ = start; 1166 start_ = start;
1283 address_mask_ = ~(maximum_capacity - 1); 1167 address_mask_ = ~(maximum_capacity - 1);
1284 object_mask_ = address_mask_ | kHeapObjectTagMask; 1168 object_mask_ = address_mask_ | kHeapObjectTagMask;
1285 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; 1169 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1286 age_mark_ = start_; 1170 age_mark_ = start_;
1171
1172 return Commit();
1287 } 1173 }
1288 1174
1289 1175
1290 void SemiSpace::TearDown() { 1176 void SemiSpace::TearDown() {
1291 start_ = NULL; 1177 start_ = NULL;
1292 capacity_ = 0; 1178 capacity_ = 0;
1293 } 1179 }
1294 1180
1295 1181
1296 bool SemiSpace::Commit() { 1182 bool SemiSpace::Commit() {
(...skipping 468 matching lines...) Expand 10 before | Expand all | Expand 10 after
1765 // Free lists for old object spaces implementation 1651 // Free lists for old object spaces implementation
1766 1652
1767 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { 1653 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1768 ASSERT(size_in_bytes > 0); 1654 ASSERT(size_in_bytes > 0);
1769 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1655 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1770 1656
1771 // We write a map and possibly size information to the block. If the block 1657 // We write a map and possibly size information to the block. If the block
1772 // is big enough to be a FreeSpace with at least one extra word (the next 1658 // is big enough to be a FreeSpace with at least one extra word (the next
1773 // pointer), we set its map to be the free space map and its size to an 1659 // pointer), we set its map to be the free space map and its size to an
1774 // appropriate array length for the desired size from HeapObject::Size(). 1660 // appropriate array length for the desired size from HeapObject::Size().
1775 // If the block is too small (e.g. one or two words), to hold both a size 1661 // If the block is too small (eg, one or two words), to hold both a size
1776 // field and a next pointer, we give it a filler map that gives it the 1662 // field and a next pointer, we give it a filler map that gives it the
1777 // correct size. 1663 // correct size.
1778 if (size_in_bytes > FreeSpace::kHeaderSize) { 1664 if (size_in_bytes > FreeSpace::kHeaderSize) {
1779 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); 1665 set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1780 // Can't use FreeSpace::cast because it fails during deserialization. 1666 // Can't use FreeSpace::cast because it fails during deserialization.
1781 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); 1667 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1782 this_as_free_space->set_size(size_in_bytes); 1668 this_as_free_space->set_size(size_in_bytes);
1783 } else if (size_in_bytes == kPointerSize) { 1669 } else if (size_in_bytes == kPointerSize) {
1784 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); 1670 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1785 } else if (size_in_bytes == 2 * kPointerSize) { 1671 } else if (size_in_bytes == 2 * kPointerSize) {
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1869 } else { 1755 } else {
1870 node->set_next(huge_list_); 1756 node->set_next(huge_list_);
1871 huge_list_ = node; 1757 huge_list_ = node;
1872 } 1758 }
1873 available_ += size_in_bytes; 1759 available_ += size_in_bytes;
1874 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1760 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1875 return 0; 1761 return 0;
1876 } 1762 }
1877 1763
1878 1764
1879 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, 1765 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1880 int* node_size,
1881 int minimum_size) {
1882 FreeListNode* node = *list; 1766 FreeListNode* node = *list;
1883 1767
1884 if (node == NULL) return NULL; 1768 if (node == NULL) return NULL;
1885 1769
1886 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
1887
1888 while (node != NULL && 1770 while (node != NULL &&
1889 Page::FromAddress(node->address())->IsEvacuationCandidate()) { 1771 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1890 available_ -= node->Size(); 1772 available_ -= node->Size();
1891 node = node->next(); 1773 node = node->next();
1892 } 1774 }
1893 1775
1894 if (node == NULL) { 1776 if (node != NULL) {
1777 *node_size = node->Size();
1778 *list = node->next();
1779 } else {
1895 *list = NULL; 1780 *list = NULL;
1896 return NULL;
1897 } 1781 }
1898 1782
1899 // Gets the size without checking the map. When we are booting we have
1900 // a FreeListNode before we have created its map.
1901 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
1902
1903 // We don't search the list for one that fits, preferring to look in the
1904 // list of larger nodes, but we do check the first in the list, because
1905 // if we had to expand the space or page we may have placed an entry that
1906 // was just long enough at the head of one of the lists.
1907 if (size < minimum_size) return NULL;
1908
1909 *node_size = size;
1910 available_ -= size;
1911 *list = node->next();
1912
1913 return node; 1783 return node;
1914 } 1784 }
1915 1785
1916 1786
1917 FreeListNode* FreeList::FindAbuttingNode( 1787 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1918 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
1919 FreeListNode* first_node = *list_head;
1920 if (first_node != NULL &&
1921 first_node->address() == limit &&
1922 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
1923 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
1924 FreeListNode* answer = first_node;
1925 int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
1926 available_ -= size;
1927 *node_size = size;
1928 *list_head = first_node->next();
1929 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1930 return answer;
1931 }
1932 return NULL;
1933 }
1934
1935
1936 FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
1937 int* node_size,
1938 Address limit) {
1939 FreeListNode* node = NULL; 1788 FreeListNode* node = NULL;
1940 1789
1941 if (limit != NULL) { 1790 if (size_in_bytes <= kSmallAllocationMax) {
1942 // We may have a memory area at the head of the free list, which abuts the 1791 node = PickNodeFromList(&small_list_, node_size);
1943 // old linear allocation area. This happens if the linear allocation area
1944 // has been shortened to allow an incremental marking step to be performed.
1945 // In that case we prefer to return the free memory area that is contiguous
1946 // with the old linear allocation area.
1947 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
1948 if (node != NULL) return node;
1949 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
1950 if (node != NULL) return node; 1792 if (node != NULL) return node;
1951 } 1793 }
1952 1794
1953 node = PickNodeFromList(&small_list_, node_size, size_in_bytes); 1795 if (size_in_bytes <= kMediumAllocationMax) {
1954 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1796 node = PickNodeFromList(&medium_list_, node_size);
1955 if (node != NULL) return node;
1956
1957 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
1958 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1959 if (node != NULL) return node;
1960
1961 node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
1962 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1963 if (node != NULL) return node;
1964
1965 // The tricky third clause in this for statement is due to the fact that
1966 // PickNodeFromList can cut pages out of the list if they are unavailable for
1967 // new allocation (e.g. if they are on a page that has been scheduled for
1968 // evacuation).
1969 for (FreeListNode** cur = &huge_list_;
1970 *cur != NULL;
1971 cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
1972 node = PickNodeFromList(cur, node_size, size_in_bytes);
1973 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1974 if (node != NULL) return node; 1797 if (node != NULL) return node;
1975 } 1798 }
1976 1799
1800 if (size_in_bytes <= kLargeAllocationMax) {
1801 node = PickNodeFromList(&large_list_, node_size);
1802 if (node != NULL) return node;
1803 }
1804
1805 for (FreeListNode** cur = &huge_list_;
1806 *cur != NULL;
1807 cur = (*cur)->next_address()) {
1808 FreeListNode* cur_node = *cur;
1809 while (cur_node != NULL &&
1810 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1811 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1812 cur_node = cur_node->next();
1813 }
1814
1815 *cur = cur_node;
1816 if (cur_node == NULL) break;
1817
1818 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1819 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1820 int size = cur_as_free_space->Size();
1821 if (size >= size_in_bytes) {
1822 // Large enough node found. Unlink it from the list.
1823 node = *cur;
1824 *node_size = size;
1825 *cur = node->next();
1826 break;
1827 }
1828 }
1829
1977 return node; 1830 return node;
1978 } 1831 }
1979 1832
1980 1833
1981 // Allocation on the old space free list. If it succeeds then a new linear 1834 // Allocation on the old space free list. If it succeeds then a new linear
1982 // allocation space has been set up with the top and limit of the space. If 1835 // allocation space has been set up with the top and limit of the space. If
1983 // the allocation fails then NULL is returned, and the caller can perform a GC 1836 // the allocation fails then NULL is returned, and the caller can perform a GC
1984 // or allocate a new page before retrying. 1837 // or allocate a new page before retrying.
1985 HeapObject* FreeList::Allocate(int size_in_bytes) { 1838 HeapObject* FreeList::Allocate(int size_in_bytes) {
1986 ASSERT(0 < size_in_bytes); 1839 ASSERT(0 < size_in_bytes);
1987 ASSERT(size_in_bytes <= kMaxBlockSize); 1840 ASSERT(size_in_bytes <= kMaxBlockSize);
1988 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1841 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1989 // Don't free list allocate if there is linear space available. 1842 // Don't free list allocate if there is linear space available.
1990 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 1843 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1991 1844
1992 int new_node_size = 0; 1845 int new_node_size = 0;
1993 FreeListNode* new_node = 1846 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1994 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
1995 if (new_node == NULL) return NULL; 1847 if (new_node == NULL) return NULL;
1996 1848
1997 if (new_node->address() == owner_->limit()) { 1849 available_ -= new_node_size;
1998 // The new freelist node we were given is an extension of the one we had
1999 // last. This is a common thing to happen when we extend a small page by
2000 // committing more memory. In this case we just add the new node to the
2001 // linear allocation area and recurse.
2002 owner_->Allocate(new_node_size);
2003 owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
2004 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
2005 Object* answer;
2006 if (!allocation->ToObject(&answer)) return NULL;
2007 return HeapObject::cast(answer);
2008 }
2009
2010 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1850 ASSERT(IsVeryLong() || available_ == SumFreeLists());
2011 1851
2012 int bytes_left = new_node_size - size_in_bytes; 1852 int bytes_left = new_node_size - size_in_bytes;
2013 ASSERT(bytes_left >= 0); 1853 ASSERT(bytes_left >= 0);
2014 1854
2015 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); 1855 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2016 // Mark the old linear allocation area with a free space map so it can be 1856 // Mark the old linear allocation area with a free space map so it can be
2017 // skipped when scanning the heap. This also puts it back in the free list 1857 // skipped when scanning the heap. This also puts it back in the free list
2018 // if it is big enough. 1858 // if it is big enough.
2019 if (old_linear_size != 0) { 1859 owner_->Free(owner_->top(), old_linear_size);
2020 owner_->AddToFreeLists(owner_->top(), old_linear_size);
2021 }
2022 1860
2023 #ifdef DEBUG 1861 #ifdef DEBUG
2024 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { 1862 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2025 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); 1863 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
2026 } 1864 }
2027 #endif 1865 #endif
2028 1866
2029 owner_->heap()->incremental_marking()->OldSpaceStep( 1867 owner_->heap()->incremental_marking()->OldSpaceStep(
2030 size_in_bytes - old_linear_size); 1868 size_in_bytes - old_linear_size);
2031 1869
2032 // The old-space-step might have finished sweeping and restarted marking. 1870 // The old-space-step might have finished sweeping and restarted marking.
2033 // Verify that it did not turn the page of the new node into an evacuation 1871 // Verify that it did not turn the page of the new node into an evacuation
2034 // candidate. 1872 // candidate.
2035 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); 1873 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2036 1874
2037 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 1875 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2038 1876
2039 // Memory in the linear allocation area is counted as allocated. We may free 1877 // Memory in the linear allocation area is counted as allocated. We may free
2040 // a little of this again immediately - see below. 1878 // a little of this again immediately - see below.
2041 owner_->Allocate(new_node_size); 1879 owner_->Allocate(new_node_size);
2042 1880
2043 if (bytes_left > kThreshold && 1881 if (bytes_left > kThreshold &&
2044 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && 1882 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2045 FLAG_incremental_marking_steps) { 1883 FLAG_incremental_marking_steps) {
2046 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 1884 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2047 // We don't want to give too large linear areas to the allocator while 1885 // We don't want to give too large linear areas to the allocator while
2048 // incremental marking is going on, because we won't check again whether 1886 // incremental marking is going on, because we won't check again whether
2049 // we want to do another increment until the linear area is used up. 1887 // we want to do another increment until the linear area is used up.
2050 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size, 1888 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2051 new_node_size - size_in_bytes - linear_size); 1889 new_node_size - size_in_bytes - linear_size);
2052 owner_->SetTop(new_node->address() + size_in_bytes, 1890 owner_->SetTop(new_node->address() + size_in_bytes,
2053 new_node->address() + size_in_bytes + linear_size); 1891 new_node->address() + size_in_bytes + linear_size);
2054 } else if (bytes_left > 0) { 1892 } else if (bytes_left > 0) {
2055 // Normally we give the rest of the node to the allocator as its new 1893 // Normally we give the rest of the node to the allocator as its new
2056 // linear allocation area. 1894 // linear allocation area.
2057 owner_->SetTop(new_node->address() + size_in_bytes, 1895 owner_->SetTop(new_node->address() + size_in_bytes,
2058 new_node->address() + new_node_size); 1896 new_node->address() + new_node_size);
2059 } else { 1897 } else {
2060 ASSERT(bytes_left == 0);
2061 // TODO(gc) Try not freeing linear allocation region when bytes_left 1898 // TODO(gc) Try not freeing linear allocation region when bytes_left
2062 // are zero. 1899 // are zero.
2063 owner_->SetTop(NULL, NULL); 1900 owner_->SetTop(NULL, NULL);
2064 } 1901 }
2065 1902
2066 return new_node; 1903 return new_node;
2067 } 1904 }
2068 1905
2069 1906
2070 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 1907 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2183 // or because we have lowered the limit in order to get periodic incremental 2020 // or because we have lowered the limit in order to get periodic incremental
2184 // marking. The most reliable way to ensure that there is linear space is 2021 // marking. The most reliable way to ensure that there is linear space is
2185 // to do the allocation, then rewind the limit. 2022 // to do the allocation, then rewind the limit.
2186 ASSERT(bytes <= InitialCapacity()); 2023 ASSERT(bytes <= InitialCapacity());
2187 MaybeObject* maybe = AllocateRaw(bytes); 2024 MaybeObject* maybe = AllocateRaw(bytes);
2188 Object* object = NULL; 2025 Object* object = NULL;
2189 if (!maybe->ToObject(&object)) return false; 2026 if (!maybe->ToObject(&object)) return false;
2190 HeapObject* allocation = HeapObject::cast(object); 2027 HeapObject* allocation = HeapObject::cast(object);
2191 Address top = allocation_info_.top; 2028 Address top = allocation_info_.top;
2192 if ((top - bytes) == allocation->address()) { 2029 if ((top - bytes) == allocation->address()) {
2193 Address new_top = allocation->address(); 2030 allocation_info_.top = allocation->address();
2194 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
2195 allocation_info_.top = new_top;
2196 return true; 2031 return true;
2197 } 2032 }
2198 // There may be a borderline case here where the allocation succeeded, but 2033 // There may be a borderline case here where the allocation succeeded, but
2199 // the limit and top have moved on to a new page. In that case we try again. 2034 // the limit and top have moved on to a new page. In that case we try again.
2200 return ReserveSpace(bytes); 2035 return ReserveSpace(bytes);
2201 } 2036 }
2202 2037
2203 2038
2204 void PagedSpace::PrepareForMarkCompact() { 2039 void PagedSpace::PrepareForMarkCompact() {
2205 // We don't have a linear allocation area while sweeping. It will be restored 2040 // We don't have a linear allocation area while sweeping. It will be restored
2206 // on the first allocation after the sweep. 2041 // on the first allocation after the sweep.
2207 // Mark the old linear allocation area with a free space map so it can be 2042 // Mark the old linear allocation area with a free space map so it can be
2208 // skipped when scanning the heap. 2043 // skipped when scanning the heap.
2209 int old_linear_size = static_cast<int>(limit() - top()); 2044 int old_linear_size = static_cast<int>(limit() - top());
2210 AddToFreeLists(top(), old_linear_size); 2045 Free(top(), old_linear_size);
2211 SetTop(NULL, NULL); 2046 SetTop(NULL, NULL);
2212 2047
2213 // Stop lazy sweeping and clear marking bits for unswept pages. 2048 // Stop lazy sweeping and clear marking bits for unswept pages.
2214 if (first_unswept_page_ != NULL) { 2049 if (first_unswept_page_ != NULL) {
2215 Page* p = first_unswept_page_; 2050 Page* p = first_unswept_page_;
2216 do { 2051 do {
2217 // Do not use ShouldBeSweptLazily predicate here. 2052 // Do not use ShouldBeSweptLazily predicate here.
2218 // New evacuation candidates were selected but they still have 2053 // New evacuation candidates were selected but they still have
2219 // to be swept before collection starts. 2054 // to be swept before collection starts.
2220 if (!p->WasSwept()) { 2055 if (!p->WasSwept()) {
(...skipping 22 matching lines...) Expand all
2243 if (new_top <= allocation_info_.limit) return true; 2078 if (new_top <= allocation_info_.limit) return true;
2244 2079
2245 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2080 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2246 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2081 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2247 if (new_area == NULL) return false; 2082 if (new_area == NULL) return false;
2248 2083
2249 int old_linear_size = static_cast<int>(limit() - top()); 2084 int old_linear_size = static_cast<int>(limit() - top());
2250 // Mark the old linear allocation area with a free space so it can be 2085 // Mark the old linear allocation area with a free space so it can be
2251 // skipped when scanning the heap. This also puts it back in the free list 2086 // skipped when scanning the heap. This also puts it back in the free list
2252 // if it is big enough. 2087 // if it is big enough.
2253 AddToFreeLists(top(), old_linear_size); 2088 Free(top(), old_linear_size);
2254 2089
2255 SetTop(new_area->address(), new_area->address() + size_in_bytes); 2090 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2256 // The AddToFreeLists call above will reduce the size of the space in the 2091 Allocate(size_in_bytes);
2257 // allocation stats. We don't need to add this linear area to the size
2258 // with an Allocate(size_in_bytes) call here, because the
2259 // free_list_.Allocate() call above already accounted for this memory.
2260 return true; 2092 return true;
2261 } 2093 }
2262 2094
2263 2095
2264 // You have to call this last, since the implementation from PagedSpace 2096 // You have to call this last, since the implementation from PagedSpace
2265 // doesn't know that memory was 'promised' to large object space. 2097 // doesn't know that memory was 'promised' to large object space.
2266 bool LargeObjectSpace::ReserveSpace(int bytes) { 2098 bool LargeObjectSpace::ReserveSpace(int bytes) {
2267 return heap()->OldGenerationSpaceAvailable() >= bytes; 2099 return heap()->OldGenerationSpaceAvailable() >= bytes;
2268 } 2100 }
2269 2101
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
2330 2162
2331 // Free list allocation failed and there is no next page. Fail if we have 2163 // Free list allocation failed and there is no next page. Fail if we have
2332 // hit the old generation size limit that should cause a garbage 2164 // hit the old generation size limit that should cause a garbage
2333 // collection. 2165 // collection.
2334 if (!heap()->always_allocate() && 2166 if (!heap()->always_allocate() &&
2335 heap()->OldGenerationAllocationLimitReached()) { 2167 heap()->OldGenerationAllocationLimitReached()) {
2336 return NULL; 2168 return NULL;
2337 } 2169 }
2338 2170
2339 // Try to expand the space and allocate in the new next page. 2171 // Try to expand the space and allocate in the new next page.
2340 if (Expand(size_in_bytes)) { 2172 if (Expand()) {
2341 return free_list_.Allocate(size_in_bytes); 2173 return free_list_.Allocate(size_in_bytes);
2342 } 2174 }
2343 2175
2344 // Last ditch, sweep all the remaining pages to try to find space. This may 2176 // Last ditch, sweep all the remaining pages to try to find space. This may
2345 // cause a pause. 2177 // cause a pause.
2346 if (!IsSweepingComplete()) { 2178 if (!IsSweepingComplete()) {
2347 AdvanceSweeper(kMaxInt); 2179 AdvanceSweeper(kMaxInt);
2348 2180
2349 // Retry the free list allocation. 2181 // Retry the free list allocation.
2350 HeapObject* object = free_list_.Allocate(size_in_bytes); 2182 HeapObject* object = free_list_.Allocate(size_in_bytes);
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2691 if (previous == NULL) { 2523 if (previous == NULL) {
2692 first_page_ = current; 2524 first_page_ = current;
2693 } else { 2525 } else {
2694 previous->set_next_page(current); 2526 previous->set_next_page(current);
2695 } 2527 }
2696 2528
2697 // Free the chunk. 2529 // Free the chunk.
2698 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2530 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2699 object, heap()->isolate()); 2531 object, heap()->isolate());
2700 size_ -= static_cast<int>(page->size()); 2532 size_ -= static_cast<int>(page->size());
2701 ASSERT(size_ >= 0);
2702 objects_size_ -= object->Size(); 2533 objects_size_ -= object->Size();
2703 page_count_--; 2534 page_count_--;
2704 2535
2705 if (is_pointer_object) { 2536 if (is_pointer_object) {
2706 heap()->QueueMemoryChunkForFree(page); 2537 heap()->QueueMemoryChunkForFree(page);
2707 } else { 2538 } else {
2708 heap()->isolate()->memory_allocator()->Free(page); 2539 heap()->isolate()->memory_allocator()->Free(page);
2709 } 2540 }
2710 } 2541 }
2711 } 2542 }
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
2830 object->ShortPrint(); 2661 object->ShortPrint();
2831 PrintF("\n"); 2662 PrintF("\n");
2832 } 2663 }
2833 printf(" --------------------------------------\n"); 2664 printf(" --------------------------------------\n");
2834 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2665 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2835 } 2666 }
2836 2667
2837 #endif // DEBUG 2668 #endif // DEBUG
2838 2669
2839 } } // namespace v8::internal 2670 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698