Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: src/spaces.cc

Issue 6639024: Get rid of distinction between below- and above-watermark in page allocation.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | src/spaces-inl.h » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 23 matching lines...) Expand all
35 namespace v8 { 35 namespace v8 {
36 namespace internal { 36 namespace internal {
37 37
38 // For contiguous spaces, top should be in the space (or at the end) and limit 38 // For contiguous spaces, top should be in the space (or at the end) and limit
39 // should be the end of the space. 39 // should be the end of the space.
40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41 ASSERT((space).low() <= (info).top \ 41 ASSERT((space).low() <= (info).top \
42 && (info).top <= (space).high() \ 42 && (info).top <= (space).high() \
43 && (info).limit == (space).high()) 43 && (info).limit == (space).high())
44 44
45 intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
46
47 // ---------------------------------------------------------------------------- 45 // ----------------------------------------------------------------------------
48 // HeapObjectIterator 46 // HeapObjectIterator
49 47
50 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
51 Initialize(space->bottom(), space->top(), NULL); 49 // You can't actually iterate over the anchor page. It is not a real page,
50 // just an anchor for the double linked page list. Initialize as if we have
51 // reached the end of the anchor page, then the first iteration will move on
52 // to the first page.
53 Initialize(space,
54 NULL,
55 NULL,
56 kAllPagesInSpace,
57 NULL);
52 } 58 }
53 59
54 60
55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, 61 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56 HeapObjectCallback size_func) { 62 HeapObjectCallback size_func) {
57 Initialize(space->bottom(), space->top(), size_func); 63 // You can't actually iterate over the anchor page. It is not a real page,
64 // just an anchor for the double linked page list. Initialize the current
65 // address and end as NULL, then the first iteration will move on
66 // to the first page.
67 Initialize(space,
68 NULL,
69 NULL,
70 kAllPagesInSpace,
71 size_func);
58 } 72 }
59 73
60 74
61 HeapObjectIterator::HeapObjectIterator(Page* page, 75 HeapObjectIterator::HeapObjectIterator(Page* page,
62 HeapObjectCallback size_func) { 76 HeapObjectCallback size_func) {
63 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); 77 Initialize(page->owner(),
78 page->ObjectAreaStart(),
79 page->ObjectAreaEnd(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(!page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY));
64 } 83 }
65 84
66 85
67 void HeapObjectIterator::Initialize(Address cur, Address end, 86 void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
68 HeapObjectCallback size_f) { 89 HeapObjectCallback size_f) {
90 space_ = space;
69 cur_addr_ = cur; 91 cur_addr_ = cur;
70 end_addr_ = end; 92 cur_end_ = end;
71 end_page_ = Page::FromAllocationTop(end); 93 page_mode_ = mode;
72 size_func_ = size_f; 94 size_func_ = size_f;
73 Page* p = Page::FromAllocationTop(cur_addr_);
74 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
75
76 if (!p->IsFlagSet(Page::IS_CONTINUOUS)) {
77 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED);
78 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_);
79 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
80 }
81 95
82 #ifdef DEBUG 96 #ifdef DEBUG
83 Verify(); 97 Verify();
84 #endif 98 #endif
85 } 99 }
86 100
87 101
88 HeapObject* HeapObjectIterator::FromNextPage() { 102 // We have hit the end of the page and should advance to the next block of
89 if (cur_addr_ == end_addr_) return NULL; 103 // objects. This happens at the end of the page.
90 104 bool HeapObjectIterator::AdvanceToNextPage() {
91 Page* cur_page = Page::FromAllocationTop(cur_addr_); 105 ASSERT(cur_addr_ == cur_end_);
106 if (page_mode_ == kOnePageOnly) return false;
107 Page* cur_page;
108 if (cur_addr_ == NULL) {
109 cur_page = space_->anchor();
110 } else {
111 cur_page = Page::FromAddress(cur_addr_ - 1);
112 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
113 }
92 cur_page = cur_page->next_page(); 114 cur_page = cur_page->next_page();
93 ASSERT(cur_page->is_valid()); 115 if (cur_page == space_->anchor()) return false;
94
95 cur_addr_ = cur_page->ObjectAreaStart(); 116 cur_addr_ = cur_page->ObjectAreaStart();
96 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); 117 cur_end_ = cur_page->ObjectAreaEnd();
97 118 ASSERT(!cur_page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY));
98 if (!cur_page->IsFlagSet(Page::IS_CONTINUOUS)) {
99 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED);
100 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_);
101 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
102 }
103
104 if (cur_addr_ == end_addr_) return NULL;
105 ASSERT(cur_addr_ < cur_limit_);
106 #ifdef DEBUG
107 Verify();
108 #endif
109 return FromCurrentPage();
110 }
111
112
113 void HeapObjectIterator::AdvanceUsingMarkbits() {
114 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); 119 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED);
115 HeapObject* obj = HeapObject::FromAddress(cur_addr_); 120 return true;
116 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117 ASSERT_OBJECT_SIZE(obj_size);
118 cur_addr_ = Marking::NextLiveObject(obj,
119 obj_size,
120 cur_limit_);
121 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
122 } 121 }
123 122
124 123
125 #ifdef DEBUG 124 #ifdef DEBUG
126 void HeapObjectIterator::Verify() { 125 void HeapObjectIterator::Verify() {
127 Page* p = Page::FromAllocationTop(cur_addr_); 126 // TODO(gc): We should do something here.
128 ASSERT(p == Page::FromAllocationTop(cur_limit_));
129 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
130 } 127 }
131 #endif 128 #endif
132 129
133 130
134 // ----------------------------------------------------------------------------- 131 // -----------------------------------------------------------------------------
135 // PageIterator
136
137 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
138 prev_page_ = NULL;
139 switch (mode) {
140 case PAGES_IN_USE:
141 stop_page_ = space->AllocationTopPage();
142 break;
143 case ALL_PAGES:
144 #ifdef DEBUG
145 // Verify that the cached last page in the space is actually the
146 // last page.
147 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
148 if (!p->next_page()->is_valid()) {
149 ASSERT(space->last_page_ == p);
150 }
151 }
152 #endif
153 stop_page_ = space->last_page_;
154 break;
155 }
156 }
157
158
159 // -----------------------------------------------------------------------------
160 // CodeRange 132 // CodeRange
161 133
162 List<CodeRange::FreeBlock> CodeRange::free_list_(0); 134 List<CodeRange::FreeBlock> CodeRange::free_list_(0);
163 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); 135 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
164 int CodeRange::current_allocation_block_index_ = 0; 136 int CodeRange::current_allocation_block_index_ = 0;
165 VirtualMemory* CodeRange::code_range_ = NULL; 137 VirtualMemory* CodeRange::code_range_ = NULL;
166 138
167 139
168 bool CodeRange::Setup(const size_t requested) { 140 bool CodeRange::Setup(const size_t requested) {
169 ASSERT(code_range_ == NULL); 141 ASSERT(code_range_ == NULL);
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 executable == EXECUTABLE)) { 378 executable == EXECUTABLE)) {
407 VirtualMemory::ReleaseRegion(base, *allocated_size); 379 VirtualMemory::ReleaseRegion(base, *allocated_size);
408 size_ -= *allocated_size; 380 size_ -= *allocated_size;
409 return NULL; 381 return NULL;
410 } 382 }
411 383
412 return base; 384 return base;
413 } 385 }
414 386
415 387
388 void Page::InitializeAsAnchor(PagedSpace* owner) {
389 owner_ = owner;
390 set_prev_page(this);
391 set_next_page(this);
392 }
393
394
395 void MemoryChunk::InsertAfter(MemoryChunk* other) {
396 next_chunk_ = other->next_chunk_;
397 prev_chunk_ = other;
398 other->next_chunk_->prev_chunk_ = this;
399 other->next_chunk_ = this;
400 }
401
402
403 void MemoryChunk::Unlink() {
404 next_chunk_->prev_chunk_ = prev_chunk_;
405 prev_chunk_->next_chunk_ = next_chunk_;
406 prev_chunk_ = NULL;
407 next_chunk_ = NULL;
408 }
409
410
416 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 411 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
417 Executability executable, 412 Executability executable,
418 Space* owner) { 413 Space* owner) {
419 size_t chunk_size = MemoryChunk::kBodyOffset + body_size; 414 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
420 Address base = NULL; 415 Address base = NULL;
421 if (executable == EXECUTABLE) { 416 if (executable == EXECUTABLE) {
422 // Check executable memory limit. 417 // Check executable memory limit.
423 if (size_executable_ + chunk_size > capacity_executable_) { 418 if (size_executable_ + chunk_size > capacity_executable_) {
424 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", 419 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
425 "V8 Executable Allocation capacity exceeded")); 420 "V8 Executable Allocation capacity exceeded"));
426 return NULL; 421 return NULL;
427 } 422 }
428 423
429 // Allocate executable memory either from code range or from the 424 // Allocate executable memory either from code range or from the
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
467 return MemoryChunk::Initialize(base, chunk_size, executable, owner); 462 return MemoryChunk::Initialize(base, chunk_size, executable, owner);
468 } 463 }
469 464
470 465
471 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 466 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
472 Executability executable) { 467 Executability executable) {
473 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 468 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
474 469
475 if (chunk == NULL) return NULL; 470 if (chunk == NULL) return NULL;
476 471
477 return Page::Initialize(chunk); 472 return Page::Initialize(chunk, executable, owner);
478 } 473 }
479 474
480 475
481 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 476 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
482 Executability executable, 477 Executability executable,
483 Space* owner) { 478 Space* owner) {
484 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 479 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
485 if (chunk == NULL) return NULL; 480 if (chunk == NULL) return NULL;
486 return LargePage::Initialize(chunk); 481 return LargePage::Initialize(chunk);
487 } 482 }
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 capacity_, size_, static_cast<int>(pct*100)); 577 capacity_, size_, static_cast<int>(pct*100));
583 } 578 }
584 #endif 579 #endif
585 580
586 // ----------------------------------------------------------------------------- 581 // -----------------------------------------------------------------------------
587 // PagedSpace implementation 582 // PagedSpace implementation
588 583
589 PagedSpace::PagedSpace(intptr_t max_capacity, 584 PagedSpace::PagedSpace(intptr_t max_capacity,
590 AllocationSpace id, 585 AllocationSpace id,
591 Executability executable) 586 Executability executable)
592 : Space(id, executable) { 587 : Space(id, executable),
588 free_list_(this),
589 was_swept_conservatively_(false) {
593 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 590 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
594 * Page::kObjectAreaSize; 591 * Page::kObjectAreaSize;
595 accounting_stats_.Clear(); 592 accounting_stats_.Clear();
596 593
597 allocation_info_.top = NULL; 594 allocation_info_.top = NULL;
598 allocation_info_.limit = NULL; 595 allocation_info_.limit = NULL;
596
597 anchor_.InitializeAsAnchor(this);
599 } 598 }
600 599
601 600
602 bool PagedSpace::Setup() { 601 bool PagedSpace::Setup() {
603 if (HasBeenSetup()) return false;
604
605 // Maximum space capacity can not be less than single page size.
606 if (max_capacity_ < Page::kPageSize) return false;
607
608 first_page_ = MemoryAllocator::AllocatePage(this, executable());
609 if (!first_page_->is_valid()) return false;
610
611 // We are sure that the first page is valid and that we have at least one
612 // page.
613 accounting_stats_.ExpandSpace(Page::kObjectAreaSize);
614 ASSERT(Capacity() <= max_capacity_);
615
616 last_page_ = first_page_;
617 ASSERT(!last_page_->next_page()->is_valid());
618
619 // Use first_page_ for allocation.
620 SetAllocationInfo(&allocation_info_, first_page_);
621
622 return true; 602 return true;
623 } 603 }
624 604
625 605
626 bool PagedSpace::HasBeenSetup() { 606 bool PagedSpace::HasBeenSetup() {
627 return (Capacity() > 0); 607 return true;
628 } 608 }
629 609
630 610
631 void PagedSpace::TearDown() { 611 void PagedSpace::TearDown() {
632 Page* next = NULL; 612 PageIterator iterator(this);
633 for (Page* p = first_page_; p->is_valid(); p = next) { 613 while (iterator.has_next()) {
634 next = p->next_page(); 614 MemoryAllocator::Free(iterator.next());
635 MemoryAllocator::Free(p);
636 } 615 }
637 first_page_ = NULL; 616 anchor_.set_next_page(&anchor_);
638 last_page_ = NULL; 617 anchor_.set_prev_page(&anchor_);
639 accounting_stats_.Clear(); 618 accounting_stats_.Clear();
640 } 619 }
641 620
642 621
643 #ifdef ENABLE_HEAP_PROTECTION 622 #ifdef ENABLE_HEAP_PROTECTION
644 623
645 void PagedSpace::Protect() { 624 void PagedSpace::Protect() {
646 Page* page = first_page_; 625 Page* page = first_page_;
647 while (page->is_valid()) { 626 while (page->is_valid()) {
648 MemoryAllocator::ProtectChunkFromPage(page); 627 MemoryAllocator::ProtectChunkFromPage(page);
649 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); 628 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
650 } 629 }
651 } 630 }
652 631
653 632
654 void PagedSpace::Unprotect() { 633 void PagedSpace::Unprotect() {
655 Page* page = first_page_; 634 Page* page = first_page_;
656 while (page->is_valid()) { 635 while (page->is_valid()) {
657 MemoryAllocator::UnprotectChunkFromPage(page); 636 MemoryAllocator::UnprotectChunkFromPage(page);
658 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); 637 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
659 } 638 }
660 } 639 }
661 640
662 #endif 641 #endif
663 642
664 643
665 MaybeObject* PagedSpace::FindObject(Address addr) { 644 MaybeObject* PagedSpace::FindObject(Address addr) {
666 // Note: this function can only be called before or after mark-compact GC 645 // Note: this function can only be called on precisely swept spaces.
667 // because it accesses map pointers.
668 ASSERT(!MarkCompactCollector::in_use()); 646 ASSERT(!MarkCompactCollector::in_use());
669 647
670 if (!Contains(addr)) return Failure::Exception(); 648 if (!Contains(addr)) return Failure::Exception();
671 649
672 Page* p = Page::FromAddress(addr); 650 Page* p = Page::FromAddress(addr);
673 ASSERT(IsUsed(p)); 651 HeapObjectIterator it(p, NULL);
674 Address cur = p->ObjectAreaStart(); 652 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
675 Address end = p->AllocationTop(); 653 Address cur = obj->address();
676 while (cur < end) {
677 HeapObject* obj = HeapObject::FromAddress(cur);
678 Address next = cur + obj->Size(); 654 Address next = cur + obj->Size();
679 if ((cur <= addr) && (addr < next)) return obj; 655 if ((cur <= addr) && (addr < next)) return obj;
680 cur = next;
681 } 656 }
682 657
683 UNREACHABLE(); 658 UNREACHABLE();
684 return Failure::Exception(); 659 return Failure::Exception();
685 } 660 }
686 661
687 662
688 bool PagedSpace::IsUsed(Page* page) { 663 void PagedSpace::SetAllocationInfo(Address top, Address limit) {
689 PageIterator it(this, PageIterator::PAGES_IN_USE); 664 Free(allocation_info_.top, allocation_info_.limit - allocation_info_.top);
690 while (it.has_next()) { 665 allocation_info_.top = top;
691 if (page == it.next()) return true; 666 allocation_info_.limit = limit;
692 } 667 ASSERT(allocation_info_.VerifyPagedAllocation());
693 return false;
694 }
695
696
697 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
698 alloc_info->top = p->ObjectAreaStart();
699 alloc_info->limit = p->ObjectAreaEnd();
700 ASSERT(alloc_info->VerifyPagedAllocation());
701 } 668 }
702 669
703 670
704 bool PagedSpace::Expand() { 671 bool PagedSpace::Expand() {
705 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 672 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
706 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 673 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
707 674
708 if (Capacity() == max_capacity_) return false; 675 if (Capacity() == max_capacity_) return false;
709 676
710 ASSERT(Capacity() < max_capacity_); 677 ASSERT(Capacity() < max_capacity_);
711 // Last page must be valid and its next page is invalid.
712 ASSERT(last_page_->is_valid() && !last_page_->next_page()->is_valid());
713 678
714 // We are going to exceed capacity for this space. 679 // Are we going to exceed capacity for this space?
715 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 680 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
716 681
717 Page* p = MemoryAllocator::AllocatePage(this, executable()); 682 Page* p = MemoryAllocator::AllocatePage(this, executable());
718 if (!p->is_valid()) return false; 683 if (p == NULL) return false;
719 684
720 accounting_stats_.ExpandSpace(Page::kObjectAreaSize);
721 ASSERT(Capacity() <= max_capacity_); 685 ASSERT(Capacity() <= max_capacity_);
722 686
723 last_page_->set_next_page(p); 687 p->InsertAfter(anchor_.prev_page());
724 last_page_ = p;
725
726 ASSERT(!last_page_->next_page()->is_valid());
727 688
728 return true; 689 return true;
729 } 690 }
730 691
731 692
732 #ifdef DEBUG 693 #ifdef DEBUG
733 int PagedSpace::CountTotalPages() { 694 int PagedSpace::CountTotalPages() {
695 PageIterator it(this);
734 int count = 0; 696 int count = 0;
735 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { 697 while (it.has_next()) {
698 it.next();
736 count++; 699 count++;
737 } 700 }
738 return count; 701 return count;
739 } 702 }
740 #endif 703 #endif
741 704
742 705
743 void PagedSpace::Shrink() { 706 void PagedSpace::Shrink() {
744 Page* top_page = AllocationTopPage();
745 ASSERT(top_page->is_valid());
746
747 // TODO(gc) release half of pages? 707 // TODO(gc) release half of pages?
748 if (top_page->next_page()->is_valid()) {
749 int pages_freed = 0;
750 Page* page = top_page->next_page();
751 Page* next_page;
752 while (page->is_valid()) {
753 next_page = page->next_page();
754 MemoryAllocator::Free(page);
755 pages_freed++;
756 page = next_page;
757 }
758 top_page->set_next_page(Page::FromAddress(NULL));
759 last_page_ = top_page;
760
761 accounting_stats_.ShrinkSpace(pages_freed * Page::kObjectAreaSize);
762 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
763 }
764 } 708 }
765 709
766 710
767 bool PagedSpace::EnsureCapacity(int capacity) { 711 bool PagedSpace::EnsureCapacity(int capacity) {
768 while (Capacity() < capacity) { 712 while (Capacity() < capacity) {
769 // Expand the space until it has the required capacity or expansion fails. 713 // Expand the space until it has the required capacity or expansion fails.
770 if (!Expand()) return false; 714 if (!Expand()) return false;
771 } 715 }
772 return true; 716 return true;
773 } 717 }
774 718
775 719
776 #ifdef DEBUG 720 #ifdef DEBUG
777 void PagedSpace::Print() { } 721 void PagedSpace::Print() { }
778 #endif 722 #endif
779 723
780 724
781 #ifdef DEBUG 725 #ifdef DEBUG
782 // We do not assume that the PageIterator works, because it depends on the
783 // invariants we are checking during verification.
784 void PagedSpace::Verify(ObjectVisitor* visitor) { 726 void PagedSpace::Verify(ObjectVisitor* visitor) {
785 // The allocation pointer should be valid, and it should be in a page in the 727 // We can only iterate over the pages if they were swept precisely.
786 // space. 728 if (was_swept_conservatively_) return;
787 ASSERT(allocation_info_.VerifyPagedAllocation());
788 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
789 729
790 // Loop over all the pages. 730 bool allocation_pointer_found_in_space =
791 bool above_allocation_top = false; 731 (allocation_info_.top != allocation_info_.limit);
792 Page* current_page = first_page_; 732 PageIterator page_iterator(this);
793 while (current_page->is_valid()) { 733 while (page_iterator.has_next()) {
794 if (above_allocation_top) { 734 Page* page = page_iterator.next();
795 // We don't care what's above the allocation top. 735 ASSERT(page->owner() == this);
796 } else { 736 if (page == Page::FromAllocationTop(allocation_info_.top)) {
797 Address top = current_page->AllocationTop(); 737 allocation_pointer_found_in_space = true;
798 if (current_page == top_page) { 738 }
799 ASSERT(top == allocation_info_.top); 739 ASSERT(!page->IsFlagSet(MemoryChunk::WAS_SWEPT_CONSERVATIVELY));
800 // The next page will be above the allocation top. 740 HeapObjectIterator it(page, NULL);
801 above_allocation_top = true; 741 Address end_of_previous_object = page->ObjectAreaStart();
802 } 742 Address top = page->ObjectAreaEnd();
743 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
744 ASSERT(end_of_previous_object <= object->address());
803 745
804 HeapObjectIterator it(current_page, NULL); 746 // The first word should be a map, and we expect all map pointers to
805 Address end_of_previous_object = current_page->ObjectAreaStart(); 747 // be in map space.
806 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { 748 Map* map = object->map();
807 ASSERT(end_of_previous_object <= object->address()); 749 ASSERT(map->IsMap());
750 ASSERT(Heap::map_space()->Contains(map));
808 751
809 // The first word should be a map, and we expect all map pointers to 752 // Perform space-specific object verification.
810 // be in map space. 753 VerifyObject(object);
811 Map* map = object->map();
812 ASSERT(map->IsMap());
813 ASSERT(Heap::map_space()->Contains(map));
814 754
815 // Perform space-specific object verification. 755 // The object itself should look OK.
816 VerifyObject(object); 756 object->Verify();
817 757
818 // The object itself should look OK. 758 // All the interior pointers should be contained in the heap.
819 object->Verify(); 759 int size = object->Size();
760 object->IterateBody(map->instance_type(), size, visitor);
820 761
821 // All the interior pointers should be contained in the heap and 762 ASSERT(object->address() + size <= top);
822 // have page regions covering intergenerational references should be 763 end_of_previous_object = object->address() + size;
823 // marked dirty.
824 int size = object->Size();
825 object->IterateBody(map->instance_type(), size, visitor);
826
827 ASSERT(object->address() + size <= top);
828 end_of_previous_object = object->address() + size;
829 }
830 } 764 }
831
832 current_page = current_page->next_page();
833 } 765 }
834 } 766 }
835 #endif 767 #endif
836 768
837 769
838 // ----------------------------------------------------------------------------- 770 // -----------------------------------------------------------------------------
839 // NewSpace implementation 771 // NewSpace implementation
840 772
841 773
842 bool NewSpace::Setup(int maximum_semispace_capacity) { 774 bool NewSpace::Setup(int maximum_semispace_capacity) {
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after
1385 1317
1386 1318
1387 // ----------------------------------------------------------------------------- 1319 // -----------------------------------------------------------------------------
1388 // Free lists for old object spaces implementation 1320 // Free lists for old object spaces implementation
1389 1321
1390 void FreeListNode::set_size(int size_in_bytes) { 1322 void FreeListNode::set_size(int size_in_bytes) {
1391 ASSERT(size_in_bytes > 0); 1323 ASSERT(size_in_bytes > 0);
1392 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1324 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1393 1325
1394 // We write a map and possibly size information to the block. If the block 1326 // We write a map and possibly size information to the block. If the block
1395 // is big enough to be a ByteArray with at least one extra word (the next 1327 // is big enough to be a FreeSpace with at least one extra word (the next
1396 // pointer), we set its map to be the byte array map and its size to an 1328 // pointer), we set its map to be the free space map and its size to an
1397 // appropriate array length for the desired size from HeapObject::Size(). 1329 // appropriate array length for the desired size from HeapObject::Size().
1398 // If the block is too small (eg, one or two words), to hold both a size 1330 // If the block is too small (eg, one or two words), to hold both a size
1399 // field and a next pointer, we give it a filler map that gives it the 1331 // field and a next pointer, we give it a filler map that gives it the
1400 // correct size. 1332 // correct size.
1401 if (size_in_bytes > ByteArray::kHeaderSize) { 1333 if (size_in_bytes > FreeSpace::kHeaderSize) {
1402 set_map(Heap::raw_unchecked_byte_array_map()); 1334 set_map(Heap::raw_unchecked_free_space_map());
1403 // Can't use ByteArray::cast because it fails during deserialization. 1335 // Can't use FreeSpace::cast because it fails during deserialization.
1404 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); 1336 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1405 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); 1337 this_as_free_space->set_size(size_in_bytes);
1406 } else if (size_in_bytes == kPointerSize) { 1338 } else if (size_in_bytes == kPointerSize) {
1407 set_map(Heap::raw_unchecked_one_pointer_filler_map()); 1339 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1408 } else if (size_in_bytes == 2 * kPointerSize) { 1340 } else if (size_in_bytes == 2 * kPointerSize) {
1409 set_map(Heap::raw_unchecked_two_pointer_filler_map()); 1341 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1410 } else { 1342 } else {
1411 UNREACHABLE(); 1343 UNREACHABLE();
1412 } 1344 }
1413 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during 1345 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1414 // deserialization because the byte array map is not done yet. 1346 // deserialization because the free space map is not done yet.
1415 } 1347 }
1416 1348
1417 1349
1418 Address FreeListNode::next() { 1350 FreeListNode* FreeListNode::next() {
1419 ASSERT(IsFreeListNode(this)); 1351 ASSERT(IsFreeListNode(this));
1420 if (map() == Heap::raw_unchecked_byte_array_map()) { 1352 if (map() == Heap::raw_unchecked_free_space_map()) {
1353 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1354 return reinterpret_cast<FreeListNode*>(
1355 Memory::Address_at(address() + kNextOffset));
1356 } else {
1357 return reinterpret_cast<FreeListNode*>(
1358 Memory::Address_at(address() + kPointerSize));
1359 }
1360 }
1361
1362
1363 FreeListNode** FreeListNode::next_address() {
1364 ASSERT(IsFreeListNode(this));
1365 if (map() == Heap::raw_unchecked_free_space_map()) {
1421 ASSERT(Size() >= kNextOffset + kPointerSize); 1366 ASSERT(Size() >= kNextOffset + kPointerSize);
1422 return Memory::Address_at(address() + kNextOffset); 1367 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1423 } else { 1368 } else {
1424 return Memory::Address_at(address() + kPointerSize); 1369 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1425 } 1370 }
1426 } 1371 }
1427 1372
1428 1373
1429 void FreeListNode::set_next(Address next) { 1374 void FreeListNode::set_next(FreeListNode* next) {
1430 ASSERT(IsFreeListNode(this)); 1375 ASSERT(IsFreeListNode(this));
1431 if (map() == Heap::raw_unchecked_byte_array_map()) { 1376 // While we are booting the VM the free space map will actually be null. So
1432 ASSERT(Size() >= kNextOffset + kPointerSize); 1377 // we have to make sure that we don't try to use it for anything at that
1433 Memory::Address_at(address() + kNextOffset) = next; 1378 // stage.
1434 } else { 1379 if (map() == Heap::raw_unchecked_free_space_map()) {
1435 Memory::Address_at(address() + kPointerSize) = next; 1380 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1436 } 1381 Memory::Address_at(address() + kNextOffset) =
1437 } 1382 reinterpret_cast<Address>(next);
1438 1383 } else {
1439 1384 Memory::Address_at(address() + kPointerSize) =
1440 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { 1385 reinterpret_cast<Address>(next);
1386 }
1387 }
1388
1389
1390 OldSpaceFreeList::OldSpaceFreeList(PagedSpace* owner) : owner_(owner) {
1441 Reset(); 1391 Reset();
1442 } 1392 }
1443 1393
1444 1394
1445 void OldSpaceFreeList::Reset() { 1395 void OldSpaceFreeList::Reset() {
1446 available_ = 0; 1396 available_ = 0;
1447 for (int i = 0; i < kFreeListsLength; i++) { 1397 small_list_ = NULL;
1448 free_[i].head_node_ = NULL; 1398 medium_list_ = NULL;
1449 } 1399 large_list_ = NULL;
1450 needs_rebuild_ = false; 1400 huge_list_ = NULL;
1451 finger_ = kHead;
1452 free_[kHead].next_size_ = kEnd;
1453 }
1454
1455
1456 void OldSpaceFreeList::RebuildSizeList() {
1457 ASSERT(needs_rebuild_);
1458 int cur = kHead;
1459 for (int i = cur + 1; i < kFreeListsLength; i++) {
1460 if (free_[i].head_node_ != NULL) {
1461 free_[cur].next_size_ = i;
1462 cur = i;
1463 }
1464 }
1465 free_[cur].next_size_ = kEnd;
1466 needs_rebuild_ = false;
1467 } 1401 }
1468 1402
1469 1403
1470 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { 1404 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1471 #ifdef DEBUG 1405 #ifdef DEBUG
1472 MemoryAllocator::ZapBlock(start, size_in_bytes); 1406 MemoryAllocator::ZapBlock(start, size_in_bytes);
1473 #endif 1407 #endif
1408 if (size_in_bytes == 0) return 0;
1474 FreeListNode* node = FreeListNode::FromAddress(start); 1409 FreeListNode* node = FreeListNode::FromAddress(start);
1475 node->set_size(size_in_bytes); 1410 node->set_size(size_in_bytes);
1476 1411
1477 // We don't use the freelists in compacting mode. This makes it more like a 1412 // Early return to drop too-small blocks on the floor.
1478 // GC that only has mark-sweep-compact and doesn't have a mark-sweep 1413 if (size_in_bytes < kSmallListMin) return size_in_bytes;
1479 // collector. 1414
1480 if (FLAG_always_compact) { 1415 // Insert other blocks at the head of a free list of the appropriate
1481 return size_in_bytes; 1416 // magnitude.
1482 } 1417 if (size_in_bytes <= kSmallListMax) {
1483 1418 node->set_next(small_list_);
1484 // Early return to drop too-small blocks on the floor (one or two word 1419 small_list_ = node;
1485 // blocks cannot hold a map pointer, a size field, and a pointer to the 1420 } else if (size_in_bytes <= kMediumListMax) {
1486 // next block in the free list). 1421 node->set_next(medium_list_);
1487 if (size_in_bytes < kMinBlockSize) { 1422 medium_list_ = node;
1488 return size_in_bytes; 1423 } else if (size_in_bytes <= kLargeListMax) {
1489 } 1424 node->set_next(large_list_);
1490 1425 large_list_ = node;
1491 // Insert other blocks at the head of an exact free list. 1426 } else {
1492 int index = size_in_bytes >> kPointerSizeLog2; 1427 node->set_next(huge_list_);
1493 node->set_next(free_[index].head_node_); 1428 huge_list_ = node;
1494 free_[index].head_node_ = node->address(); 1429 }
1495 available_ += size_in_bytes; 1430 available_ += size_in_bytes;
1496 needs_rebuild_ = true; 1431 ASSERT(available_ == SumFreeLists());
1497 return 0; 1432 return 0;
1498 } 1433 }
1499 1434
1500 1435
1501 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { 1436 // Allocation on the old space free list. If it succeeds then a new linear
1437 // allocation space has been set up with the top and limit of the space. If
1438 // the allocation fails then NULL is returned, and the caller can perform a GC
1439 // or allocate a new page before retrying.
1440 HeapObject* OldSpaceFreeList::Allocate(int size_in_bytes) {
1502 ASSERT(0 < size_in_bytes); 1441 ASSERT(0 < size_in_bytes);
1503 ASSERT(size_in_bytes <= kMaxBlockSize); 1442 ASSERT(size_in_bytes <= kMaxBlockSize);
1504 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1443 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1505 1444 // Don't free list allocate if there is linear space available.
1506 if (needs_rebuild_) RebuildSizeList(); 1445 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1507 int index = size_in_bytes >> kPointerSizeLog2; 1446
1508 // Check for a perfect fit. 1447 FreeListNode* new_node = NULL;
1509 if (free_[index].head_node_ != NULL) { 1448 int new_node_size = 0;
1510 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); 1449
1511 // If this was the last block of its size, remove the size. 1450 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) {
1512 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index); 1451 new_node = small_list_;
1513 available_ -= size_in_bytes; 1452 new_node_size = new_node->Size();
1514 *wasted_bytes = 0; 1453 small_list_ = new_node->next();
1515 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1454 } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ != NULL) {
1516 return node; 1455 new_node = medium_list_;
1517 } 1456 new_node_size = new_node->Size();
1518 // Search the size list for the best fit. 1457 medium_list_ = new_node->next();
1519 int prev = finger_ < index ? finger_ : kHead; 1458 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) {
1520 int cur = FindSize(index, &prev); 1459 new_node = large_list_;
1521 ASSERT(index < cur); 1460 new_node_size = new_node->Size();
1522 if (cur == kEnd) { 1461 large_list_ = new_node->next();
1523 // No large enough size in list. 1462 } else {
1524 *wasted_bytes = 0; 1463 for (FreeListNode** cur = &huge_list_;
1525 return Failure::RetryAfterGC(owner_); 1464 *cur != NULL;
1526 } 1465 cur = (*cur)->next_address()) {
1527 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1466 ASSERT((*cur)->map() == Heap::raw_unchecked_free_space_map());
1528 int rem = cur - index; 1467 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1529 int rem_bytes = rem << kPointerSizeLog2; 1468 int size = cur_as_free_space->Size();
1530 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); 1469 if (size >= size_in_bytes) {
1531 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); 1470 // Large enough node found. Unlink it from the list.
1532 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + 1471 new_node = *cur;
1533 size_in_bytes); 1472 new_node_size = size;
1534 // Distinguish the cases prev < rem < cur and rem <= prev < cur 1473 *cur = new_node->next();
1535 // to avoid many redundant tests and calls to Insert/RemoveSize. 1474 break;
1536 if (prev < rem) { 1475 }
1537 // Simple case: insert rem between prev and cur.
1538 finger_ = prev;
1539 free_[prev].next_size_ = rem;
1540 // If this was the last block of size cur, remove the size.
1541 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
1542 free_[rem].next_size_ = free_[cur].next_size_;
1543 } else {
1544 free_[rem].next_size_ = cur;
1545 } 1476 }
1546 // Add the remainder block. 1477 if (new_node == NULL) return NULL;
1547 rem_node->set_size(rem_bytes); 1478 }
1548 rem_node->set_next(free_[rem].head_node_); 1479
1549 free_[rem].head_node_ = rem_node->address(); 1480 available_ -= new_node_size;
1550 } else { 1481 ASSERT(available_ == SumFreeLists());
1551 // If this was the last block of size cur, remove the size. 1482
1552 if ((free_[cur].head_node_ = cur_node->next()) == NULL) { 1483 int old_linear_size = owner_->limit() - owner_->top();
1553 finger_ = prev; 1484 // Mark the old linear allocation area with a free space map so it can be
1554 free_[prev].next_size_ = free_[cur].next_size_; 1485 // skipped when scanning the heap. This also puts it back in the free list
1555 } 1486 // if it is big enough.
1556 if (rem_bytes < kMinBlockSize) { 1487 owner_->Free(owner_->top(), old_linear_size);
1557 // Too-small remainder is wasted. 1488 IncrementalMarking::Step(size_in_bytes - old_linear_size);
1558 rem_node->set_size(rem_bytes); 1489
1559 available_ -= size_in_bytes + rem_bytes; 1490 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size.
1560 *wasted_bytes = rem_bytes; 1491
1561 return cur_node; 1492 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1562 } 1493
1563 // Add the remainder block and, if needed, insert its size. 1494 // Memory in the linear allocation area is counted as allocated. We may free
1564 rem_node->set_size(rem_bytes); 1495 // a little of this again immediately - see below.
1565 rem_node->set_next(free_[rem].head_node_); 1496 owner_->Allocate(new_node_size);
1566 free_[rem].head_node_ = rem_node->address(); 1497
1567 if (rem_node->next() == NULL) InsertSize(rem); 1498 if (new_node_size - size_in_bytes > kThreshold &&
1568 } 1499 IncrementalMarking::state() == IncrementalMarking::MARKING) {
1569 available_ -= size_in_bytes; 1500 // We don't want to give too large linear areas to the allocator while
1570 *wasted_bytes = 0; 1501 // incremental marking is going on, because we won't check again whether
1571 return cur_node; 1502 // we want to do another increment until the linear area is used up.
1572 } 1503 owner_->Free(new_node->address() + size_in_bytes + kThreshold,
1573 1504 new_node_size - size_in_bytes - kThreshold);
1574 1505 owner_->SetTop(new_node->address() + size_in_bytes,
1575 void OldSpaceFreeList::MarkNodes() { 1506 new_node->address() + size_in_bytes + kThreshold);
1576 for (int i = 0; i < kFreeListsLength; i++) { 1507 } else {
1577 Address cur_addr = free_[i].head_node_; 1508 // Normally we give the rest of the node to the allocator as its new
1578 while (cur_addr != NULL) { 1509 // linear allocation area.
1579 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); 1510 owner_->SetTop(new_node->address() + size_in_bytes,
1580 cur_addr = cur_node->next(); 1511 new_node->address() + new_node_size);
1581 IntrusiveMarking::SetMark(cur_node); 1512 }
1582 } 1513
1583 } 1514 return new_node;
1584 } 1515 }
1585 1516
1586 1517
1587 #ifdef DEBUG 1518 #ifdef DEBUG
1588 bool OldSpaceFreeList::Contains(FreeListNode* node) { 1519 intptr_t OldSpaceFreeList::SumFreeList(FreeListNode* cur) {
1589 for (int i = 0; i < kFreeListsLength; i++) { 1520 intptr_t sum = 0;
1590 Address cur_addr = free_[i].head_node_; 1521 while (cur != NULL) {
1591 while (cur_addr != NULL) { 1522 ASSERT(cur->map() == Heap::raw_unchecked_free_space_map());
1592 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); 1523 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
1593 if (cur_node == node) return true; 1524 sum += cur_as_free_space->Size();
1594 cur_addr = cur_node->next(); 1525 cur = cur->next();
1595 } 1526 }
1596 } 1527 return sum;
1597 return false; 1528 }
1598 } 1529
1599 1530
1600 1531 intptr_t OldSpaceFreeList::SumFreeLists() {
1601 void FreeListNode::Zap() { 1532 intptr_t sum = SumFreeList(small_list_);
1602 if (IsByteArray()) { 1533 sum += SumFreeList(medium_list_);
1603 ByteArray* ba = ByteArray::cast(this); 1534 sum += SumFreeList(large_list_);
1604 // Skip map, length and next pointer. 1535 sum += SumFreeList(huge_list_);
1605 Address payload_start = ba->GetDataStartAddress() + kPointerSize; 1536 return sum;
1606 Address payload_end = ba->address() + ba->Size();
1607 for (Address cur = payload_start;
1608 cur < payload_end;
1609 cur += kPointerSize) {
1610 *reinterpret_cast<uintptr_t*>(cur) = kFreeListZapValue;
1611 }
1612 }
1613 }
1614
1615
1616 void OldSpaceFreeList::Zap() {
1617 for (int i = 0; i < kFreeListsLength; i++) {
1618 Address cur_addr = free_[i].head_node_;
1619 while (cur_addr != NULL) {
1620 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1621 cur_node->Zap();
1622 cur_addr = cur_node->next();
1623 }
1624 }
1625 }
1626
1627
1628 void FixedSizeFreeList::Zap() {
1629 Address cur_addr = head_;
1630 while (cur_addr != NULL && cur_addr != tail_) {
1631 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1632 cur_node->Zap();
1633 cur_addr = cur_node->next();
1634 }
1635 } 1537 }
1636 #endif 1538 #endif
1637 1539
1638 1540
1639 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1640 : owner_(owner), object_size_(object_size) {
1641 Reset();
1642 }
1643
1644
1645 void FixedSizeFreeList::Reset() {
1646 available_ = 0;
1647 head_ = tail_ = NULL;
1648 }
1649
1650
1651 void FixedSizeFreeList::Free(Address start) {
1652 #ifdef DEBUG
1653 MemoryAllocator::ZapBlock(start, object_size_);
1654 #endif
1655 // We only use the freelists with mark-sweep.
1656 ASSERT(!MarkCompactCollector::IsCompacting());
1657 FreeListNode* node = FreeListNode::FromAddress(start);
1658 node->set_size(object_size_);
1659 node->set_next(NULL);
1660 if (head_ == NULL) {
1661 tail_ = head_ = node->address();
1662 } else {
1663 FreeListNode::FromAddress(tail_)->set_next(node->address());
1664 tail_ = node->address();
1665 }
1666 available_ += object_size_;
1667 }
1668
1669
1670 MaybeObject* FixedSizeFreeList::Allocate() {
1671 if (head_ == NULL) {
1672 return Failure::RetryAfterGC(owner_);
1673 }
1674
1675 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1676 FreeListNode* node = FreeListNode::FromAddress(head_);
1677 head_ = node->next();
1678 available_ -= object_size_;
1679 return node;
1680 }
1681
1682
1683 void FixedSizeFreeList::MarkNodes() {
1684 Address cur_addr = head_;
1685 while (cur_addr != NULL && cur_addr != tail_) {
1686 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1687 cur_addr = cur_node->next();
1688 IntrusiveMarking::SetMark(cur_node);
1689 }
1690 }
1691
1692
1693 // ----------------------------------------------------------------------------- 1541 // -----------------------------------------------------------------------------
1694 // OldSpace implementation 1542 // OldSpace implementation
1695 1543
1696 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1544 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1697 ASSERT(!will_compact); 1545 ASSERT(!will_compact);
1698 // Call prepare of the super class. 1546 // Call prepare of the super class.
1699 PagedSpace::PrepareForMarkCompact(will_compact); 1547 PagedSpace::PrepareForMarkCompact(will_compact);
1700 1548
1701 // During a non-compacting collection, everything below the linear
1702 // allocation pointer is considered allocated (everything above is
1703 // available) and we will rediscover available and wasted bytes during
1704 // the collection.
1705 accounting_stats_.AllocateBytes(free_list_.available());
1706 accounting_stats_.FillWastedBytes(Waste());
1707
1708 // Clear the free list before a full GC---it will be rebuilt afterward. 1549 // Clear the free list before a full GC---it will be rebuilt afterward.
1709 free_list_.Reset(); 1550 free_list_.Reset();
1710 } 1551 }
1711 1552
1712 1553
1713 bool NewSpace::ReserveSpace(int bytes) { 1554 bool NewSpace::ReserveSpace(int bytes) {
1714 // We can't reliably unpack a partial snapshot that needs more new space 1555 // We can't reliably unpack a partial snapshot that needs more new space
1715 // space than the minimum NewSpace size. 1556 // space than the minimum NewSpace size.
1716 ASSERT(bytes <= InitialCapacity()); 1557 ASSERT(bytes <= InitialCapacity());
1717 Address limit = allocation_info_.limit; 1558 Address limit = allocation_info_.limit;
1718 Address top = allocation_info_.top; 1559 Address top = allocation_info_.top;
1719 return limit - top >= bytes; 1560 return limit - top >= bytes;
1720 } 1561 }
1721 1562
1722 1563
1723 void PagedSpace::FreePages(Page* prev, Page* last) {
1724 if (last == AllocationTopPage()) {
1725 // Pages are already at the end of used pages.
1726 // Just mark them as continuous.
1727 Page* p = prev == NULL ? first_page_ : prev->next_page();
1728 Page* end_page = last->next_page();
1729 do {
1730 p->SetFlag(Page::IS_CONTINUOUS);
1731 p = p->next_page();
1732 } while (p != end_page);
1733 return;
1734 }
1735
1736 Page* first = NULL;
1737
1738 // Remove pages from the list.
1739 if (prev == NULL) {
1740 first = first_page_;
1741 first_page_ = last->next_page();
1742 } else {
1743 first = prev->next_page();
1744 prev->set_next_page(last->next_page());
1745 }
1746
1747 // Attach it after the last page.
1748 last_page_->set_next_page(first);
1749 last_page_ = last;
1750 last->set_next_page(NULL);
1751
1752 // Clean them up.
1753 do {
1754 first->InvalidateWatermark(true);
1755 first->SetAllocationWatermark(first->ObjectAreaStart());
1756 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
1757 first->SetFlag(Page::IS_CONTINUOUS);
1758 first->markbits()->Clear();
1759 first = first->next_page();
1760 } while (first->is_valid());
1761 }
1762
1763
1764 void PagedSpace::PrepareForMarkCompact(bool will_compact) { 1564 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
1765 ASSERT(!will_compact); 1565 ASSERT(!will_compact);
1766 } 1566 }
1767 1567
1768 1568
1769 bool PagedSpace::ReserveSpace(int bytes) { 1569 bool PagedSpace::ReserveSpace(int size_in_bytes) {
1770 Address limit = allocation_info_.limit; 1570 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
1771 Address top = allocation_info_.top; 1571 ASSERT(size_in_bytes == RoundUp(size_in_bytes, kPointerSize));
1772 if (limit - top >= bytes) return true; 1572 Address current_top = allocation_info_.top;
1573 Address new_top = current_top + size_in_bytes;
1574 if (new_top <= allocation_info_.limit) return true;
1773 1575
1774 // There wasn't enough space in the current page. Lets put the rest 1576 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
1775 // of the page on the free list and start a fresh page. 1577 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
1776 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); 1578 if (new_area == NULL) return false;
1777 1579
1778 Page* reserved_page = TopPageOf(allocation_info_); 1580 int old_linear_size = limit() - top();
1779 int bytes_left_to_reserve = bytes; 1581 // Mark the old linear allocation area with a free space so it can be
1780 while (bytes_left_to_reserve > 0) { 1582 // skipped when scanning the heap. This also puts it back in the free list
1781 if (!reserved_page->next_page()->is_valid()) { 1583 // if it is big enough.
1782 if (Heap::OldGenerationAllocationLimitReached()) return false; 1584 Free(top(), old_linear_size);
1783 Expand(); 1585
1784 } 1586 SetTop(new_area->address(), new_area->address() + size_in_bytes);
1785 bytes_left_to_reserve -= Page::kPageSize; 1587 Allocate(size_in_bytes);
1786 reserved_page = reserved_page->next_page();
1787 if (!reserved_page->is_valid()) return false;
1788 }
1789 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
1790 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
1791 SetAllocationInfo(&allocation_info_,
1792 TopPageOf(allocation_info_)->next_page());
1793 return true; 1588 return true;
1794 } 1589 }
1795 1590
1796 1591
1797 // You have to call this last, since the implementation from PagedSpace 1592 // You have to call this last, since the implementation from PagedSpace
1798 // doesn't know that memory was 'promised' to large object space. 1593 // doesn't know that memory was 'promised' to large object space.
1799 bool LargeObjectSpace::ReserveSpace(int bytes) { 1594 bool LargeObjectSpace::ReserveSpace(int bytes) {
1800 return Heap::OldGenerationSpaceAvailable() >= bytes; 1595 return Heap::OldGenerationSpaceAvailable() >= bytes;
1801 } 1596 }
1802 1597
1803 1598
1804 // Slow case for normal allocation. Try in order: (1) allocate in the next 1599 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
1805 // page in the space, (2) allocate off the space's free list, (3) expand the 1600 // Allocation in this space has failed.
1806 // space, (4) fail.
1807 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1808 // Linear allocation in this space has failed. If there is another page
1809 // in the space, move to that page and allocate there. This allocation
1810 // should succeed (size_in_bytes should not be greater than a page's
1811 // object area size).
1812 Page* current_page = TopPageOf(allocation_info_);
1813 if (current_page->next_page()->is_valid()) {
1814 return AllocateInNextPage(current_page, size_in_bytes);
1815 }
1816
1817 // There is no next page in this space. Try free list allocation unless that
1818 // is currently forbidden.
1819 if (!Heap::linear_allocation()) {
1820 int wasted_bytes;
1821 Object* result;
1822 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1823 accounting_stats_.WasteBytes(wasted_bytes);
1824 if (maybe->ToObject(&result)) {
1825 accounting_stats_.AllocateBytes(size_in_bytes);
1826
1827 HeapObject* obj = HeapObject::cast(result);
1828 Page* p = Page::FromAddress(obj->address());
1829
1830 if (obj->address() >= p->AllocationWatermark()) {
1831 // There should be no hole between the allocation watermark
1832 // and allocated object address.
1833 // Memory above the allocation watermark was not swept and
1834 // might contain garbage pointers to new space.
1835 ASSERT(obj->address() == p->AllocationWatermark());
1836 p->SetAllocationWatermark(obj->address() + size_in_bytes);
1837 }
1838
1839 if (!p->IsFlagSet(Page::IS_CONTINUOUS)) {
1840 // This page is not continuous so we have to mark objects
1841 // that should be visited by HeapObjectIterator.
1842 ASSERT(!Marking::IsMarked(obj));
1843 Marking::SetMark(obj);
1844 }
1845
1846 return obj;
1847 }
1848 }
1849 1601
1850 // Free list allocation failed and there is no next page. Fail if we have 1602 // Free list allocation failed and there is no next page. Fail if we have
1851 // hit the old generation size limit that should cause a garbage 1603 // hit the old generation size limit that should cause a garbage
1852 // collection. 1604 // collection.
1853 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 1605 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1854 return NULL; 1606 return NULL;
1855 } 1607 }
1856 1608
1857 // Try to expand the space and allocate in the new next page. 1609 // Try to expand the space and allocate in the new next page.
1858 ASSERT(!current_page->next_page()->is_valid());
1859 if (Expand()) { 1610 if (Expand()) {
1860 return AllocateInNextPage(current_page, size_in_bytes); 1611 return free_list_.Allocate(size_in_bytes);
1861 } 1612 }
1862 1613
1863 // Finally, fail. 1614 // Finally, fail.
1864 return NULL; 1615 return NULL;
1865 } 1616 }
1866 1617
1867 1618
1868 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1869 current_page->SetAllocationWatermark(allocation_info_.top);
1870 int free_size =
1871 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
1872 if (free_size > 0) {
1873 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
1874 accounting_stats_.WasteBytes(wasted_bytes);
1875 }
1876 }
1877
1878
1879 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
1880 current_page->SetAllocationWatermark(allocation_info_.top);
1881 int free_size =
1882 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
1883 // In the fixed space free list all the free list items have the right size.
1884 // We use up the rest of the page while preserving this invariant.
1885 while (free_size >= object_size_in_bytes_) {
1886 free_list_.Free(allocation_info_.top);
1887 allocation_info_.top += object_size_in_bytes_;
1888 free_size -= object_size_in_bytes_;
1889 accounting_stats_.WasteBytes(object_size_in_bytes_);
1890 }
1891 }
1892
1893
1894 // Add the block at the top of the page to the space's free list, set the
1895 // allocation info to the next page (assumed to be one), and allocate
1896 // linearly there.
1897 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
1898 int size_in_bytes) {
1899 ASSERT(current_page->next_page()->is_valid());
1900 Page* next_page = current_page->next_page();
1901 next_page->ClearGCFields();
1902 PutRestOfCurrentPageOnFreeList(current_page);
1903 SetAllocationInfo(&allocation_info_, next_page);
1904 return AllocateLinearly(&allocation_info_, size_in_bytes);
1905 }
1906
1907
1908 void OldSpace::DeallocateBlock(Address start,
1909 int size_in_bytes,
1910 bool add_to_freelist) {
1911 Free(start, size_in_bytes, add_to_freelist);
1912 }
1913
1914
1915 #ifdef DEBUG 1619 #ifdef DEBUG
1916 struct CommentStatistic { 1620 struct CommentStatistic {
1917 const char* comment; 1621 const char* comment;
1918 int size; 1622 int size;
1919 int count; 1623 int count;
1920 void Clear() { 1624 void Clear() {
1921 comment = NULL; 1625 comment = NULL;
1922 size = 0; 1626 size = 0;
1923 count = 0; 1627 count = 0;
1924 } 1628 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
2013 } 1717 }
2014 EnterComment(comment_txt, flat_delta); 1718 EnterComment(comment_txt, flat_delta);
2015 } 1719 }
2016 1720
2017 1721
2018 // Collects code size statistics: 1722 // Collects code size statistics:
2019 // - by code kind 1723 // - by code kind
2020 // - by code comment 1724 // - by code comment
2021 void PagedSpace::CollectCodeStatistics() { 1725 void PagedSpace::CollectCodeStatistics() {
2022 HeapObjectIterator obj_it(this); 1726 HeapObjectIterator obj_it(this);
2023 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 1727 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2024 if (obj->IsCode()) { 1728 if (obj->IsCode()) {
2025 Code* code = Code::cast(obj); 1729 Code* code = Code::cast(obj);
2026 code_kind_statistics[code->kind()] += code->Size(); 1730 code_kind_statistics[code->kind()] += code->Size();
2027 RelocIterator it(code); 1731 RelocIterator it(code);
2028 int delta = 0; 1732 int delta = 0;
2029 const byte* prev_pc = code->instruction_start(); 1733 const byte* prev_pc = code->instruction_start();
2030 while (!it.done()) { 1734 while (!it.done()) {
2031 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { 1735 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2032 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); 1736 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2033 CollectCommentStatistics(&it); 1737 CollectCommentStatistics(&it);
2034 prev_pc = it.rinfo()->pc(); 1738 prev_pc = it.rinfo()->pc();
2035 } 1739 }
2036 it.next(); 1740 it.next();
2037 } 1741 }
2038 1742
2039 ASSERT(code->instruction_start() <= prev_pc && 1743 ASSERT(code->instruction_start() <= prev_pc &&
2040 prev_pc <= code->instruction_end()); 1744 prev_pc <= code->instruction_end());
2041 delta += static_cast<int>(code->instruction_end() - prev_pc); 1745 delta += static_cast<int>(code->instruction_end() - prev_pc);
2042 EnterComment("NoComment", delta); 1746 EnterComment("NoComment", delta);
2043 } 1747 }
2044 } 1748 }
2045 } 1749 }
2046 1750
2047 1751
2048 void OldSpace::ReportStatistics() { 1752 void PagedSpace::ReportStatistics() {
2049 int pct = static_cast<int>(Available() * 100 / Capacity()); 1753 int pct = static_cast<int>(Available() * 100 / Capacity());
2050 PrintF(" capacity: %" V8_PTR_PREFIX "d" 1754 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2051 ", waste: %" V8_PTR_PREFIX "d" 1755 ", waste: %" V8_PTR_PREFIX "d"
2052 ", available: %" V8_PTR_PREFIX "d, %%%d\n", 1756 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2053 Capacity(), Waste(), Available(), pct); 1757 Capacity(), Waste(), Available(), pct);
2054 1758
2055 ClearHistograms(); 1759 ClearHistograms();
2056 HeapObjectIterator obj_it(this); 1760 HeapObjectIterator obj_it(this);
2057 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) 1761 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2058 CollectHistogramInfo(obj); 1762 CollectHistogramInfo(obj);
2059 ReportHistogram(true); 1763 ReportHistogram(true);
2060 } 1764 }
2061 #endif 1765 #endif
2062 1766
2063 // ----------------------------------------------------------------------------- 1767 // -----------------------------------------------------------------------------
2064 // FixedSpace implementation 1768 // FixedSpace implementation
2065 1769
2066 void FixedSpace::PrepareForMarkCompact(bool will_compact) { 1770 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2067 // Call prepare of the super class. 1771 // Call prepare of the super class.
2068 PagedSpace::PrepareForMarkCompact(will_compact); 1772 PagedSpace::PrepareForMarkCompact(will_compact);
2069 1773
2070 ASSERT(!will_compact); 1774 ASSERT(!will_compact);
2071 1775
2072 // During a non-compacting collection, everything below the linear 1776 // During a non-compacting collection, everything below the linear
2073 // allocation pointer except wasted top-of-page blocks is considered 1777 // allocation pointer except wasted top-of-page blocks is considered
2074 // allocated and we will rediscover available bytes during the 1778 // allocated and we will rediscover available bytes during the
2075 // collection. 1779 // collection.
2076 accounting_stats_.AllocateBytes(free_list_.available()); 1780 accounting_stats_.AllocateBytes(free_list_.available());
2077 1781
2078 // Clear the free list before a full GC---it will be rebuilt afterward. 1782 // Clear the free list before a full GC---it will be rebuilt afterward.
2079 free_list_.Reset(); 1783 free_list_.Reset();
2080 } 1784 }
2081 1785
2082 1786
2083 // Slow case for normal allocation. Try in order: (1) allocate in the next
2084 // page in the space, (2) allocate off the space's free list, (3) expand the
2085 // space, (4) fail.
2086 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2087 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2088 // Linear allocation in this space has failed. If there is another page
2089 // in the space, move to that page and allocate there. This allocation
2090 // should succeed.
2091 Page* current_page = TopPageOf(allocation_info_);
2092 if (current_page->next_page()->is_valid()) {
2093 return AllocateInNextPage(current_page, size_in_bytes);
2094 }
2095
2096 // There is no next page in this space. Try free list allocation unless
2097 // that is currently forbidden. The fixed space free list implicitly assumes
2098 // that all free blocks are of the fixed size.
2099 if (!Heap::linear_allocation()) {
2100 Object* result;
2101 MaybeObject* maybe = free_list_.Allocate();
2102 if (maybe->ToObject(&result)) {
2103 accounting_stats_.AllocateBytes(size_in_bytes);
2104 HeapObject* obj = HeapObject::cast(result);
2105 Page* p = Page::FromAddress(obj->address());
2106
2107 if (obj->address() >= p->AllocationWatermark()) {
2108 // There should be no hole between the allocation watermark
2109 // and allocated object address.
2110 // Memory above the allocation watermark was not swept and
2111 // might contain garbage pointers to new space.
2112 ASSERT(obj->address() == p->AllocationWatermark());
2113 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2114 }
2115
2116 return obj;
2117 }
2118 }
2119
2120 // Free list allocation failed and there is no next page. Fail if we have
2121 // hit the old generation size limit that should cause a garbage
2122 // collection.
2123 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2124 return NULL;
2125 }
2126
2127 // Try to expand the space and allocate in the new next page.
2128 ASSERT(!current_page->next_page()->is_valid());
2129 if (Expand()) {
2130 return AllocateInNextPage(current_page, size_in_bytes);
2131 }
2132
2133 // Finally, fail.
2134 return NULL;
2135 }
2136
2137
2138 // Move to the next page (there is assumed to be one) and allocate there.
2139 // The top of page block is always wasted, because it is too small to hold a
2140 // map.
2141 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2142 int size_in_bytes) {
2143 ASSERT(current_page->next_page()->is_valid());
2144 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2145 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2146 Page* next_page = current_page->next_page();
2147 next_page->ClearGCFields();
2148 current_page->SetAllocationWatermark(allocation_info_.top);
2149 accounting_stats_.WasteBytes(page_extra_);
2150 SetAllocationInfo(&allocation_info_, next_page);
2151 return AllocateLinearly(&allocation_info_, size_in_bytes);
2152 }
2153
2154
2155 void FixedSpace::DeallocateBlock(Address start,
2156 int size_in_bytes,
2157 bool add_to_freelist) {
2158 // Free-list elements in fixed space are assumed to have a fixed size.
2159 // We break the free block into chunks and add them to the free list
2160 // individually.
2161 int size = object_size_in_bytes();
2162 ASSERT(size_in_bytes % size == 0);
2163 Address end = start + size_in_bytes;
2164 for (Address a = start; a < end; a += size) {
2165 Free(a, add_to_freelist);
2166 }
2167 }
2168
2169
2170 #ifdef DEBUG
2171 void FixedSpace::ReportStatistics() {
2172 int pct = static_cast<int>(Available() * 100 / Capacity());
2173 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2174 ", waste: %" V8_PTR_PREFIX "d"
2175 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2176 Capacity(), Waste(), Available(), pct);
2177
2178 ClearHistograms();
2179 HeapObjectIterator obj_it(this);
2180 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2181 CollectHistogramInfo(obj);
2182 ReportHistogram(false);
2183 }
2184 #endif
2185
2186
2187 // ----------------------------------------------------------------------------- 1787 // -----------------------------------------------------------------------------
2188 // MapSpace implementation 1788 // MapSpace implementation
2189 1789
2190 void MapSpace::PrepareForMarkCompact(bool will_compact) { 1790 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2191 // Call prepare of the super class. 1791 // Call prepare of the super class.
2192 FixedSpace::PrepareForMarkCompact(will_compact); 1792 FixedSpace::PrepareForMarkCompact(will_compact);
2193 } 1793 }
2194 1794
2195 1795
2196 #ifdef DEBUG 1796 #ifdef DEBUG
2197 void MapSpace::VerifyObject(HeapObject* object) { 1797 void MapSpace::VerifyObject(HeapObject* object) {
2198 // The object should be a map or a free-list node. 1798 // The object should be a map or a free-list node.
2199 ASSERT(object->IsMap() || object->IsByteArray()); 1799 ASSERT(object->IsMap() || object->IsFreeSpace());
2200 } 1800 }
2201 #endif 1801 #endif
2202 1802
2203 1803
2204 // ----------------------------------------------------------------------------- 1804 // -----------------------------------------------------------------------------
2205 // GlobalPropertyCellSpace implementation 1805 // GlobalPropertyCellSpace implementation
2206 1806
2207 #ifdef DEBUG 1807 #ifdef DEBUG
2208 void CellSpace::VerifyObject(HeapObject* object) { 1808 void CellSpace::VerifyObject(HeapObject* object) {
2209 // The object should be a global object property cell or a free-list node. 1809 // The object should be a global object property cell or a free-list node.
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
2514 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2114 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2515 if (obj->IsCode()) { 2115 if (obj->IsCode()) {
2516 Code* code = Code::cast(obj); 2116 Code* code = Code::cast(obj);
2517 code_kind_statistics[code->kind()] += code->Size(); 2117 code_kind_statistics[code->kind()] += code->Size();
2518 } 2118 }
2519 } 2119 }
2520 } 2120 }
2521 #endif // DEBUG 2121 #endif // DEBUG
2522 2122
2523 } } // namespace v8::internal 2123 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | src/spaces-inl.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698