Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/spaces.cc

Issue 2255004: Cardmarking writebarrier. (Closed)
Patch Set: Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 23 matching lines...) Expand all
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 // For contiguous spaces, top should be in the space (or at the end) and limit 37 // For contiguous spaces, top should be in the space (or at the end) and limit
38 // should be the end of the space. 38 // should be the end of the space.
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \ 40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \ 41 && (info).top <= (space).high() \
42 && (info).limit == (space).high()) 42 && (info).limit == (space).high())
43 43
44 intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
44 45
45 // ---------------------------------------------------------------------------- 46 // ----------------------------------------------------------------------------
46 // HeapObjectIterator 47 // HeapObjectIterator
47 48
48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL); 50 Initialize(space->bottom(), space->top(), NULL);
50 } 51 }
51 52
52 53
53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, 54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
132 } 133 }
133 } 134 }
134 #endif 135 #endif
135 stop_page_ = space->last_page_; 136 stop_page_ = space->last_page_;
136 break; 137 break;
137 } 138 }
138 } 139 }
139 140
140 141
141 // ----------------------------------------------------------------------------- 142 // -----------------------------------------------------------------------------
142 // Page
143
144 #ifdef DEBUG
145 Page::RSetState Page::rset_state_ = Page::IN_USE;
146 #endif
147
148 // -----------------------------------------------------------------------------
149 // CodeRange 143 // CodeRange
150 144
151 List<CodeRange::FreeBlock> CodeRange::free_list_(0); 145 List<CodeRange::FreeBlock> CodeRange::free_list_(0);
152 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); 146 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
153 int CodeRange::current_allocation_block_index_ = 0; 147 int CodeRange::current_allocation_block_index_ = 0;
154 VirtualMemory* CodeRange::code_range_ = NULL; 148 VirtualMemory* CodeRange::code_range_ = NULL;
155 149
156 150
157 bool CodeRange::Setup(const size_t requested) { 151 bool CodeRange::Setup(const size_t requested) {
158 ASSERT(code_range_ == NULL); 152 ASSERT(code_range_ == NULL);
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
517 size_t chunk_size = chunks_[chunk_id].size(); 511 size_t chunk_size = chunks_[chunk_id].size();
518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); 512 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
519 ASSERT(pages_in_chunk <= 513 ASSERT(pages_in_chunk <=
520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); 514 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
521 #endif 515 #endif
522 516
523 Address page_addr = low; 517 Address page_addr = low;
524 for (int i = 0; i < pages_in_chunk; i++) { 518 for (int i = 0; i < pages_in_chunk; i++) {
525 Page* p = Page::FromAddress(page_addr); 519 Page* p = Page::FromAddress(page_addr);
526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; 520 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
521 p->InvalidateWatermark(true);
527 p->SetIsLargeObjectPage(false); 522 p->SetIsLargeObjectPage(false);
523 p->SetAllocationWatermark(p->ObjectAreaStart());
524 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
528 page_addr += Page::kPageSize; 525 page_addr += Page::kPageSize;
529 } 526 }
530 527
531 // Set the next page of the last page to 0. 528 // Set the next page of the last page to 0.
532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); 529 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
533 last_page->opaque_header = OffsetFrom(0) | chunk_id; 530 last_page->opaque_header = OffsetFrom(0) | chunk_id;
534 531
535 return Page::FromAddress(low); 532 return Page::FromAddress(low);
536 } 533 }
537 534
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 671
675 if (prev->is_valid()) { 672 if (prev->is_valid()) {
676 SetNextPage(prev, Page::FromAddress(page_addr)); 673 SetNextPage(prev, Page::FromAddress(page_addr));
677 } 674 }
678 675
679 for (int i = 0; i < pages_in_chunk; i++) { 676 for (int i = 0; i < pages_in_chunk; i++) {
680 Page* p = Page::FromAddress(page_addr); 677 Page* p = Page::FromAddress(page_addr);
681 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; 678 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
682 page_addr += Page::kPageSize; 679 page_addr += Page::kPageSize;
683 680
681 p->InvalidateWatermark(true);
684 if (p->WasInUseBeforeMC()) { 682 if (p->WasInUseBeforeMC()) {
685 *last_page_in_use = p; 683 *last_page_in_use = p;
686 } 684 }
687 } 685 }
688 686
689 // Set the next page of the last page to 0. 687 // Set the next page of the last page to 0.
690 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); 688 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
691 last_page->opaque_header = OffsetFrom(0) | chunk_id; 689 last_page->opaque_header = OffsetFrom(0) | chunk_id;
692 690
693 if (last_page->WasInUseBeforeMC()) { 691 if (last_page->WasInUseBeforeMC()) {
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 if (!first_page_->is_valid()) return false; 735 if (!first_page_->is_valid()) return false;
738 } 736 }
739 737
740 // We are sure that the first page is valid and that we have at least one 738 // We are sure that the first page is valid and that we have at least one
741 // page. 739 // page.
742 ASSERT(first_page_->is_valid()); 740 ASSERT(first_page_->is_valid());
743 ASSERT(num_pages > 0); 741 ASSERT(num_pages > 0);
744 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); 742 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
745 ASSERT(Capacity() <= max_capacity_); 743 ASSERT(Capacity() <= max_capacity_);
746 744
747 // Sequentially initialize remembered sets in the newly allocated 745 // Sequentially clear region marks in the newly allocated
748 // pages and cache the current last page in the space. 746 // pages and cache the current last page in the space.
749 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { 747 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
750 p->ClearRSet(); 748 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
751 last_page_ = p; 749 last_page_ = p;
752 } 750 }
753 751
754 // Use first_page_ for allocation. 752 // Use first_page_ for allocation.
755 SetAllocationInfo(&allocation_info_, first_page_); 753 SetAllocationInfo(&allocation_info_, first_page_);
756 754
757 page_list_is_chunk_ordered_ = true; 755 page_list_is_chunk_ordered_ = true;
758 756
759 return true; 757 return true;
760 } 758 }
(...skipping 26 matching lines...) Expand all
787 Page* page = first_page_; 785 Page* page = first_page_;
788 while (page->is_valid()) { 786 while (page->is_valid()) {
789 MemoryAllocator::UnprotectChunkFromPage(page); 787 MemoryAllocator::UnprotectChunkFromPage(page);
790 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); 788 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
791 } 789 }
792 } 790 }
793 791
794 #endif 792 #endif
795 793
796 794
797 void PagedSpace::ClearRSet() { 795 void PagedSpace::MarkAllPagesClean() {
798 PageIterator it(this, PageIterator::ALL_PAGES); 796 PageIterator it(this, PageIterator::ALL_PAGES);
799 while (it.has_next()) { 797 while (it.has_next()) {
800 it.next()->ClearRSet(); 798 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
801 } 799 }
802 } 800 }
803 801
804 802
805 Object* PagedSpace::FindObject(Address addr) { 803 Object* PagedSpace::FindObject(Address addr) {
806 // Note: this function can only be called before or after mark-compact GC 804 // Note: this function can only be called before or after mark-compact GC
807 // because it accesses map pointers. 805 // because it accesses map pointers.
808 ASSERT(!MarkCompactCollector::in_use()); 806 ASSERT(!MarkCompactCollector::in_use());
809 807
810 if (!Contains(addr)) return Failure::Exception(); 808 if (!Contains(addr)) return Failure::Exception();
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 ASSERT(current_page->next_page()->is_valid()); 891 ASSERT(current_page->next_page()->is_valid());
894 // We do not add the top of page block for current page to the space's 892 // We do not add the top of page block for current page to the space's
895 // free list---the block may contain live objects so we cannot write 893 // free list---the block may contain live objects so we cannot write
896 // bookkeeping information to it. Instead, we will recover top of page 894 // bookkeeping information to it. Instead, we will recover top of page
897 // blocks when we move objects to their new locations. 895 // blocks when we move objects to their new locations.
898 // 896 //
899 // We do however write the allocation pointer to the page. The encoding 897 // We do however write the allocation pointer to the page. The encoding
900 // of forwarding addresses is as an offset in terms of live bytes, so we 898 // of forwarding addresses is as an offset in terms of live bytes, so we
901 // need quick access to the allocation top of each page to decode 899 // need quick access to the allocation top of each page to decode
902 // forwarding addresses. 900 // forwarding addresses.
903 current_page->mc_relocation_top = mc_forwarding_info_.top; 901 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
902 current_page->next_page()->InvalidateWatermark(true);
904 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); 903 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
905 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); 904 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
906 } 905 }
907 906
908 907
909 bool PagedSpace::Expand(Page* last_page) { 908 bool PagedSpace::Expand(Page* last_page) {
910 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 909 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
911 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 910 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
912 911
913 if (Capacity() == max_capacity_) return false; 912 if (Capacity() == max_capacity_) return false;
914 913
915 ASSERT(Capacity() < max_capacity_); 914 ASSERT(Capacity() < max_capacity_);
916 // Last page must be valid and its next page is invalid. 915 // Last page must be valid and its next page is invalid.
917 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); 916 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
918 917
919 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; 918 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
920 if (available_pages <= 0) return false; 919 if (available_pages <= 0) return false;
921 920
922 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); 921 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
923 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); 922 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
924 if (!p->is_valid()) return false; 923 if (!p->is_valid()) return false;
925 924
926 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); 925 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
927 ASSERT(Capacity() <= max_capacity_); 926 ASSERT(Capacity() <= max_capacity_);
928 927
929 MemoryAllocator::SetNextPage(last_page, p); 928 MemoryAllocator::SetNextPage(last_page, p);
930 929
931 // Sequentially clear remembered set of new pages and and cache the 930 // Sequentially clear region marks of new pages and and cache the
932 // new last page in the space. 931 // new last page in the space.
933 while (p->is_valid()) { 932 while (p->is_valid()) {
934 p->ClearRSet(); 933 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
935 last_page_ = p; 934 last_page_ = p;
936 p = p->next_page(); 935 p = p->next_page();
937 } 936 }
938 937
939 return true; 938 return true;
940 } 939 }
941 940
942 941
943 #ifdef DEBUG 942 #ifdef DEBUG
944 int PagedSpace::CountTotalPages() { 943 int PagedSpace::CountTotalPages() {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1023 Page* top_page = Page::FromAllocationTop(allocation_info_.top); 1022 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1024 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); 1023 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1025 1024
1026 // Loop over all the pages. 1025 // Loop over all the pages.
1027 bool above_allocation_top = false; 1026 bool above_allocation_top = false;
1028 Page* current_page = first_page_; 1027 Page* current_page = first_page_;
1029 while (current_page->is_valid()) { 1028 while (current_page->is_valid()) {
1030 if (above_allocation_top) { 1029 if (above_allocation_top) {
1031 // We don't care what's above the allocation top. 1030 // We don't care what's above the allocation top.
1032 } else { 1031 } else {
1033 // Unless this is the last page in the space containing allocated
1034 // objects, the allocation top should be at a constant offset from the
1035 // object area end.
1036 Address top = current_page->AllocationTop(); 1032 Address top = current_page->AllocationTop();
1037 if (current_page == top_page) { 1033 if (current_page == top_page) {
1038 ASSERT(top == allocation_info_.top); 1034 ASSERT(top == allocation_info_.top);
1039 // The next page will be above the allocation top. 1035 // The next page will be above the allocation top.
1040 above_allocation_top = true; 1036 above_allocation_top = true;
1041 } else {
1042 ASSERT(top == PageAllocationLimit(current_page));
1043 } 1037 }
1044 1038
1045 // It should be packed with objects from the bottom to the top. 1039 // It should be packed with objects from the bottom to the top.
1046 Address current = current_page->ObjectAreaStart(); 1040 Address current = current_page->ObjectAreaStart();
1047 while (current < top) { 1041 while (current < top) {
1048 HeapObject* object = HeapObject::FromAddress(current); 1042 HeapObject* object = HeapObject::FromAddress(current);
1049 1043
1050 // The first word should be a map, and we expect all map pointers to 1044 // The first word should be a map, and we expect all map pointers to
1051 // be in map space. 1045 // be in map space.
1052 Map* map = object->map(); 1046 Map* map = object->map();
1053 ASSERT(map->IsMap()); 1047 ASSERT(map->IsMap());
1054 ASSERT(Heap::map_space()->Contains(map)); 1048 ASSERT(Heap::map_space()->Contains(map));
1055 1049
1056 // Perform space-specific object verification. 1050 // Perform space-specific object verification.
1057 VerifyObject(object); 1051 VerifyObject(object);
1058 1052
1059 // The object itself should look OK. 1053 // The object itself should look OK.
1060 object->Verify(); 1054 object->Verify();
1061 1055
1062 // All the interior pointers should be contained in the heap and 1056 // All the interior pointers should be contained in the heap and
1063 // have their remembered set bits set if required as determined 1057 // have page regions covering intergenerational references should be
1064 // by the visitor. 1058 // marked dirty.
1065 int size = object->Size(); 1059 int size = object->Size();
1066 object->IterateBody(map->instance_type(), size, visitor); 1060 object->IterateBody(map->instance_type(), size, visitor);
1067 1061
1068 current += size; 1062 current += size;
1069 } 1063 }
1070 1064
1071 // The allocation pointer should not be in the middle of an object. 1065 // The allocation pointer should not be in the middle of an object.
1072 ASSERT(current == top); 1066 ASSERT(current == top);
1073 } 1067 }
1074 1068
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1113 return false; 1107 return false;
1114 } 1108 }
1115 if (!from_space_.Setup(start + maximum_semispace_capacity, 1109 if (!from_space_.Setup(start + maximum_semispace_capacity,
1116 initial_semispace_capacity, 1110 initial_semispace_capacity,
1117 maximum_semispace_capacity)) { 1111 maximum_semispace_capacity)) {
1118 return false; 1112 return false;
1119 } 1113 }
1120 1114
1121 start_ = start; 1115 start_ = start;
1122 address_mask_ = ~(size - 1); 1116 address_mask_ = ~(size - 1);
1123 object_mask_ = address_mask_ | kHeapObjectTag; 1117 object_mask_ = address_mask_ | kHeapObjectTagMask;
1124 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; 1118 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1125 1119
1126 allocation_info_.top = to_space_.low(); 1120 allocation_info_.top = to_space_.low();
1127 allocation_info_.limit = to_space_.high(); 1121 allocation_info_.limit = to_space_.high();
1128 mc_forwarding_info_.top = NULL; 1122 mc_forwarding_info_.top = NULL;
1129 mc_forwarding_info_.limit = NULL; 1123 mc_forwarding_info_.limit = NULL;
1130 1124
1131 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1125 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1132 return true; 1126 return true;
1133 } 1127 }
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
1317 // otherwise. In the mark-compact collector, the memory region of the from 1311 // otherwise. In the mark-compact collector, the memory region of the from
1318 // space is used as the marking stack. It requires contiguous memory 1312 // space is used as the marking stack. It requires contiguous memory
1319 // addresses. 1313 // addresses.
1320 initial_capacity_ = initial_capacity; 1314 initial_capacity_ = initial_capacity;
1321 capacity_ = initial_capacity; 1315 capacity_ = initial_capacity;
1322 maximum_capacity_ = maximum_capacity; 1316 maximum_capacity_ = maximum_capacity;
1323 committed_ = false; 1317 committed_ = false;
1324 1318
1325 start_ = start; 1319 start_ = start;
1326 address_mask_ = ~(maximum_capacity - 1); 1320 address_mask_ = ~(maximum_capacity - 1);
1327 object_mask_ = address_mask_ | kHeapObjectTag; 1321 object_mask_ = address_mask_ | kHeapObjectTagMask;
1328 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; 1322 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1329 age_mark_ = start_; 1323 age_mark_ = start_;
1330 1324
1331 return Commit(); 1325 return Commit();
1332 } 1326 }
1333 1327
1334 1328
1335 void SemiSpace::TearDown() { 1329 void SemiSpace::TearDown() {
1336 start_ = NULL; 1330 start_ = NULL;
1337 capacity_ = 0; 1331 capacity_ = 0;
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after
1627 ASSERT(size_in_bytes > 0); 1621 ASSERT(size_in_bytes > 0);
1628 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1622 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1629 1623
1630 // We write a map and possibly size information to the block. If the block 1624 // We write a map and possibly size information to the block. If the block
1631 // is big enough to be a ByteArray with at least one extra word (the next 1625 // is big enough to be a ByteArray with at least one extra word (the next
1632 // pointer), we set its map to be the byte array map and its size to an 1626 // pointer), we set its map to be the byte array map and its size to an
1633 // appropriate array length for the desired size from HeapObject::Size(). 1627 // appropriate array length for the desired size from HeapObject::Size().
1634 // If the block is too small (eg, one or two words), to hold both a size 1628 // If the block is too small (eg, one or two words), to hold both a size
1635 // field and a next pointer, we give it a filler map that gives it the 1629 // field and a next pointer, we give it a filler map that gives it the
1636 // correct size. 1630 // correct size.
1637 if (size_in_bytes > ByteArray::kAlignedSize) { 1631 if (size_in_bytes > ByteArray::kHeaderSize) {
1638 set_map(Heap::raw_unchecked_byte_array_map()); 1632 set_map(Heap::raw_unchecked_byte_array_map());
1639 // Can't use ByteArray::cast because it fails during deserialization. 1633 // Can't use ByteArray::cast because it fails during deserialization.
1640 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); 1634 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1641 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); 1635 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1642 } else if (size_in_bytes == kPointerSize) { 1636 } else if (size_in_bytes == kPointerSize) {
1643 set_map(Heap::raw_unchecked_one_pointer_filler_map()); 1637 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1644 } else if (size_in_bytes == 2 * kPointerSize) { 1638 } else if (size_in_bytes == 2 * kPointerSize) {
1645 set_map(Heap::raw_unchecked_two_pointer_filler_map()); 1639 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1646 } else { 1640 } else {
1647 UNREACHABLE(); 1641 UNREACHABLE();
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
1824 1818
1825 1819
1826 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) 1820 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1827 : owner_(owner), object_size_(object_size) { 1821 : owner_(owner), object_size_(object_size) {
1828 Reset(); 1822 Reset();
1829 } 1823 }
1830 1824
1831 1825
1832 void FixedSizeFreeList::Reset() { 1826 void FixedSizeFreeList::Reset() {
1833 available_ = 0; 1827 available_ = 0;
1834 head_ = NULL; 1828 head_ = tail_ = NULL;
1835 } 1829 }
1836 1830
1837 1831
1838 void FixedSizeFreeList::Free(Address start) { 1832 void FixedSizeFreeList::Free(Address start) {
1839 #ifdef DEBUG 1833 #ifdef DEBUG
1840 MemoryAllocator::ZapBlock(start, object_size_); 1834 MemoryAllocator::ZapBlock(start, object_size_);
1841 #endif 1835 #endif
1842 // We only use the freelists with mark-sweep. 1836 // We only use the freelists with mark-sweep.
1843 ASSERT(!MarkCompactCollector::IsCompacting()); 1837 ASSERT(!MarkCompactCollector::IsCompacting());
1844 FreeListNode* node = FreeListNode::FromAddress(start); 1838 FreeListNode* node = FreeListNode::FromAddress(start);
1845 node->set_size(object_size_); 1839 node->set_size(object_size_);
1846 node->set_next(head_); 1840 node->set_next(NULL);
1847 head_ = node->address(); 1841 if (head_ == NULL) {
1842 tail_ = head_ = node->address();
1843 } else {
1844 FreeListNode::FromAddress(tail_)->set_next(node->address());
1845 tail_ = node->address();
1846 }
1848 available_ += object_size_; 1847 available_ += object_size_;
1849 } 1848 }
1850 1849
1851 1850
1852 Object* FixedSizeFreeList::Allocate() { 1851 Object* FixedSizeFreeList::Allocate() {
1853 if (head_ == NULL) { 1852 if (head_ == NULL) {
1854 return Failure::RetryAfterGC(object_size_, owner_); 1853 return Failure::RetryAfterGC(object_size_, owner_);
1855 } 1854 }
1856 1855
1857 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1856 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1900 ASSERT(Waste() == 0); 1899 ASSERT(Waste() == 0);
1901 ASSERT(AvailableFree() == 0); 1900 ASSERT(AvailableFree() == 0);
1902 1901
1903 // Build the free list for the space. 1902 // Build the free list for the space.
1904 int computed_size = 0; 1903 int computed_size = 0;
1905 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); 1904 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
1906 while (it.has_next()) { 1905 while (it.has_next()) {
1907 Page* p = it.next(); 1906 Page* p = it.next();
1908 // Space below the relocation pointer is allocated. 1907 // Space below the relocation pointer is allocated.
1909 computed_size += 1908 computed_size +=
1910 static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); 1909 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
1911 if (it.has_next()) { 1910 if (it.has_next()) {
1912 // Free the space at the top of the page. We cannot use 1911 // Free the space at the top of the page.
1913 // p->mc_relocation_top after the call to Free (because Free will clear
1914 // remembered set bits).
1915 int extra_size = 1912 int extra_size =
1916 static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); 1913 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
1917 if (extra_size > 0) { 1914 if (extra_size > 0) {
1918 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); 1915 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
1916 extra_size);
1919 // The bytes we have just "freed" to add to the free list were 1917 // The bytes we have just "freed" to add to the free list were
1920 // already accounted as available. 1918 // already accounted as available.
1921 accounting_stats_.WasteBytes(wasted_bytes); 1919 accounting_stats_.WasteBytes(wasted_bytes);
1922 } 1920 }
1923 } 1921 }
1924 } 1922 }
1925 1923
1926 // Make sure the computed size - based on the used portion of the pages in 1924 // Make sure the computed size - based on the used portion of the pages in
1927 // use - matches the size obtained while computing forwarding addresses. 1925 // use - matches the size obtained while computing forwarding addresses.
1928 ASSERT(computed_size == Size()); 1926 ASSERT(computed_size == Size());
(...skipping 27 matching lines...) Expand all
1956 MemoryAllocator::SetNextPage(prev, last->next_page()); 1954 MemoryAllocator::SetNextPage(prev, last->next_page());
1957 } 1955 }
1958 1956
1959 // Attach it after the last page. 1957 // Attach it after the last page.
1960 MemoryAllocator::SetNextPage(last_page_, first); 1958 MemoryAllocator::SetNextPage(last_page_, first);
1961 last_page_ = last; 1959 last_page_ = last;
1962 MemoryAllocator::SetNextPage(last, NULL); 1960 MemoryAllocator::SetNextPage(last, NULL);
1963 1961
1964 // Clean them up. 1962 // Clean them up.
1965 do { 1963 do {
1966 first->ClearRSet(); 1964 first->InvalidateWatermark(true);
1965 first->SetAllocationWatermark(first->ObjectAreaStart());
1966 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
1967 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
1967 first = first->next_page(); 1968 first = first->next_page();
1968 } while (first != NULL); 1969 } while (first != NULL);
1969 1970
1970 // Order of pages in this space might no longer be consistent with 1971 // Order of pages in this space might no longer be consistent with
1971 // order of pages in chunks. 1972 // order of pages in chunks.
1972 page_list_is_chunk_ordered_ = false; 1973 page_list_is_chunk_ordered_ = false;
1973 } 1974 }
1974 1975
1975 1976
1976 void PagedSpace::PrepareForMarkCompact(bool will_compact) { 1977 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
(...skipping 19 matching lines...) Expand all
1996 MemoryAllocator::RelinkPageListInChunkOrder(this, 1997 MemoryAllocator::RelinkPageListInChunkOrder(this,
1997 &first_page_, 1998 &first_page_,
1998 &last_page_, 1999 &last_page_,
1999 &new_last_in_use); 2000 &new_last_in_use);
2000 ASSERT(new_last_in_use->is_valid()); 2001 ASSERT(new_last_in_use->is_valid());
2001 2002
2002 if (new_last_in_use != last_in_use) { 2003 if (new_last_in_use != last_in_use) {
2003 // Current allocation top points to a page which is now in the middle 2004 // Current allocation top points to a page which is now in the middle
2004 // of page list. We should move allocation top forward to the new last 2005 // of page list. We should move allocation top forward to the new last
2005 // used page so various object iterators will continue to work properly. 2006 // used page so various object iterators will continue to work properly.
2007 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2006 2008
2007 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - 2009 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2008 last_in_use->AllocationTop()); 2010 last_in_use->AllocationTop());
2009 2011
2010 if (size_in_bytes > 0) { 2012 if (size_in_bytes > 0) {
2011 // There is still some space left on this page. Create a fake 2013 // There is still some space left on this page. Create a fake
2012 // object which will occupy all free space on this page. 2014 // object which will occupy all free space on this page.
2013 // Otherwise iterators would not be able to scan this page 2015 // Otherwise iterators would not be able to scan this page
2014 // correctly. 2016 // correctly.
2015 2017
(...skipping 12 matching lines...) Expand all
2028 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); 2030 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2029 while (pages_in_use_iterator.has_next()) { 2031 while (pages_in_use_iterator.has_next()) {
2030 Page* p = pages_in_use_iterator.next(); 2032 Page* p = pages_in_use_iterator.next();
2031 if (!p->WasInUseBeforeMC()) { 2033 if (!p->WasInUseBeforeMC()) {
2032 // Empty page is in the middle of a sequence of used pages. 2034 // Empty page is in the middle of a sequence of used pages.
2033 // Create a fake object which will occupy all free space on this page. 2035 // Create a fake object which will occupy all free space on this page.
2034 // Otherwise iterators would not be able to scan this page correctly. 2036 // Otherwise iterators would not be able to scan this page correctly.
2035 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - 2037 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2036 p->ObjectAreaStart()); 2038 p->ObjectAreaStart());
2037 2039
2040 p->SetAllocationWatermark(p->ObjectAreaStart());
2038 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); 2041 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
2039 } 2042 }
2040 } 2043 }
2041 2044
2042 page_list_is_chunk_ordered_ = true; 2045 page_list_is_chunk_ordered_ = true;
2043 } 2046 }
2044 } 2047 }
2045 } 2048 }
2046 2049
2047 2050
(...skipping 11 matching lines...) Expand all
2059 while (bytes_left_to_reserve > 0) { 2062 while (bytes_left_to_reserve > 0) {
2060 if (!reserved_page->next_page()->is_valid()) { 2063 if (!reserved_page->next_page()->is_valid()) {
2061 if (Heap::OldGenerationAllocationLimitReached()) return false; 2064 if (Heap::OldGenerationAllocationLimitReached()) return false;
2062 Expand(reserved_page); 2065 Expand(reserved_page);
2063 } 2066 }
2064 bytes_left_to_reserve -= Page::kPageSize; 2067 bytes_left_to_reserve -= Page::kPageSize;
2065 reserved_page = reserved_page->next_page(); 2068 reserved_page = reserved_page->next_page();
2066 if (!reserved_page->is_valid()) return false; 2069 if (!reserved_page->is_valid()) return false;
2067 } 2070 }
2068 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); 2071 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2072 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2069 SetAllocationInfo(&allocation_info_, 2073 SetAllocationInfo(&allocation_info_,
2070 TopPageOf(allocation_info_)->next_page()); 2074 TopPageOf(allocation_info_)->next_page());
2071 return true; 2075 return true;
2072 } 2076 }
2073 2077
2074 2078
2075 // You have to call this last, since the implementation from PagedSpace 2079 // You have to call this last, since the implementation from PagedSpace
2076 // doesn't know that memory was 'promised' to large object space. 2080 // doesn't know that memory was 'promised' to large object space.
2077 bool LargeObjectSpace::ReserveSpace(int bytes) { 2081 bool LargeObjectSpace::ReserveSpace(int bytes) {
2078 return Heap::OldGenerationSpaceAvailable() >= bytes; 2082 return Heap::OldGenerationSpaceAvailable() >= bytes;
(...skipping 14 matching lines...) Expand all
2093 } 2097 }
2094 2098
2095 // There is no next page in this space. Try free list allocation unless that 2099 // There is no next page in this space. Try free list allocation unless that
2096 // is currently forbidden. 2100 // is currently forbidden.
2097 if (!Heap::linear_allocation()) { 2101 if (!Heap::linear_allocation()) {
2098 int wasted_bytes; 2102 int wasted_bytes;
2099 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); 2103 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2100 accounting_stats_.WasteBytes(wasted_bytes); 2104 accounting_stats_.WasteBytes(wasted_bytes);
2101 if (!result->IsFailure()) { 2105 if (!result->IsFailure()) {
2102 accounting_stats_.AllocateBytes(size_in_bytes); 2106 accounting_stats_.AllocateBytes(size_in_bytes);
2103 return HeapObject::cast(result); 2107
2108 HeapObject* obj = HeapObject::cast(result);
2109 Page* p = Page::FromAddress(obj->address());
2110
2111 if (obj->address() >= p->AllocationWatermark()) {
2112 // There should be no hole between the allocation watermark
2113 // and allocated object address.
2114 // Memory above the allocation watermark was not swept and
2115 // might contain garbage pointers to new space.
2116 ASSERT(obj->address() == p->AllocationWatermark());
2117 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2118 }
2119
2120 return obj;
2104 } 2121 }
2105 } 2122 }
2106 2123
2107 // Free list allocation failed and there is no next page. Fail if we have 2124 // Free list allocation failed and there is no next page. Fail if we have
2108 // hit the old generation size limit that should cause a garbage 2125 // hit the old generation size limit that should cause a garbage
2109 // collection. 2126 // collection.
2110 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 2127 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2111 return NULL; 2128 return NULL;
2112 } 2129 }
2113 2130
2114 // Try to expand the space and allocate in the new next page. 2131 // Try to expand the space and allocate in the new next page.
2115 ASSERT(!current_page->next_page()->is_valid()); 2132 ASSERT(!current_page->next_page()->is_valid());
2116 if (Expand(current_page)) { 2133 if (Expand(current_page)) {
2117 return AllocateInNextPage(current_page, size_in_bytes); 2134 return AllocateInNextPage(current_page, size_in_bytes);
2118 } 2135 }
2119 2136
2120 // Finally, fail. 2137 // Finally, fail.
2121 return NULL; 2138 return NULL;
2122 } 2139 }
2123 2140
2124 2141
2125 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { 2142 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2143 current_page->SetAllocationWatermark(allocation_info_.top);
2126 int free_size = 2144 int free_size =
2127 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); 2145 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2128 if (free_size > 0) { 2146 if (free_size > 0) {
2129 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); 2147 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2130 accounting_stats_.WasteBytes(wasted_bytes); 2148 accounting_stats_.WasteBytes(wasted_bytes);
2131 } 2149 }
2132 } 2150 }
2133 2151
2134 2152
2135 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { 2153 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2154 current_page->SetAllocationWatermark(allocation_info_.top);
2136 int free_size = 2155 int free_size =
2137 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); 2156 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2138 // In the fixed space free list all the free list items have the right size. 2157 // In the fixed space free list all the free list items have the right size.
2139 // We use up the rest of the page while preserving this invariant. 2158 // We use up the rest of the page while preserving this invariant.
2140 while (free_size >= object_size_in_bytes_) { 2159 while (free_size >= object_size_in_bytes_) {
2141 free_list_.Free(allocation_info_.top); 2160 free_list_.Free(allocation_info_.top);
2142 allocation_info_.top += object_size_in_bytes_; 2161 allocation_info_.top += object_size_in_bytes_;
2143 free_size -= object_size_in_bytes_; 2162 free_size -= object_size_in_bytes_;
2144 accounting_stats_.WasteBytes(object_size_in_bytes_); 2163 accounting_stats_.WasteBytes(object_size_in_bytes_);
2145 } 2164 }
2146 } 2165 }
2147 2166
2148 2167
2149 // Add the block at the top of the page to the space's free list, set the 2168 // Add the block at the top of the page to the space's free list, set the
2150 // allocation info to the next page (assumed to be one), and allocate 2169 // allocation info to the next page (assumed to be one), and allocate
2151 // linearly there. 2170 // linearly there.
2152 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, 2171 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2153 int size_in_bytes) { 2172 int size_in_bytes) {
2154 ASSERT(current_page->next_page()->is_valid()); 2173 ASSERT(current_page->next_page()->is_valid());
2174 Page* next_page = current_page->next_page();
2175 next_page->ClearGCFields();
2155 PutRestOfCurrentPageOnFreeList(current_page); 2176 PutRestOfCurrentPageOnFreeList(current_page);
2156 SetAllocationInfo(&allocation_info_, current_page->next_page()); 2177 SetAllocationInfo(&allocation_info_, next_page);
2157 return AllocateLinearly(&allocation_info_, size_in_bytes); 2178 return AllocateLinearly(&allocation_info_, size_in_bytes);
2158 } 2179 }
2159 2180
2160 2181
2161 #ifdef DEBUG 2182 #ifdef DEBUG
2162 struct CommentStatistic { 2183 struct CommentStatistic {
2163 const char* comment; 2184 const char* comment;
2164 int size; 2185 int size;
2165 int count; 2186 int count;
2166 void Clear() { 2187 void Clear() {
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
2289 } 2310 }
2290 } 2311 }
2291 } 2312 }
2292 2313
2293 2314
2294 void OldSpace::ReportStatistics() { 2315 void OldSpace::ReportStatistics() {
2295 int pct = Available() * 100 / Capacity(); 2316 int pct = Available() * 100 / Capacity();
2296 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", 2317 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2297 Capacity(), Waste(), Available(), pct); 2318 Capacity(), Waste(), Available(), pct);
2298 2319
2299 // Report remembered set statistics.
2300 int rset_marked_pointers = 0;
2301 int rset_marked_arrays = 0;
2302 int rset_marked_array_elements = 0;
2303 int cross_gen_pointers = 0;
2304 int cross_gen_array_elements = 0;
2305
2306 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2307 while (page_it.has_next()) {
2308 Page* p = page_it.next();
2309
2310 for (Address rset_addr = p->RSetStart();
2311 rset_addr < p->RSetEnd();
2312 rset_addr += kIntSize) {
2313 int rset = Memory::int_at(rset_addr);
2314 if (rset != 0) {
2315 // Bits were set
2316 int intoff =
2317 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2318 int bitoff = 0;
2319 for (; bitoff < kBitsPerInt; ++bitoff) {
2320 if ((rset & (1 << bitoff)) != 0) {
2321 int bitpos = intoff*kBitsPerByte + bitoff;
2322 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2323 Object** obj = reinterpret_cast<Object**>(slot);
2324 if (*obj == Heap::raw_unchecked_fixed_array_map()) {
2325 rset_marked_arrays++;
2326 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
2327
2328 rset_marked_array_elements += fa->length();
2329 // Manually inline FixedArray::IterateBody
2330 Address elm_start = slot + FixedArray::kHeaderSize;
2331 Address elm_stop = elm_start + fa->length() * kPointerSize;
2332 for (Address elm_addr = elm_start;
2333 elm_addr < elm_stop; elm_addr += kPointerSize) {
2334 // Filter non-heap-object pointers
2335 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
2336 if (Heap::InNewSpace(*elm_p))
2337 cross_gen_array_elements++;
2338 }
2339 } else {
2340 rset_marked_pointers++;
2341 if (Heap::InNewSpace(*obj))
2342 cross_gen_pointers++;
2343 }
2344 }
2345 }
2346 }
2347 }
2348 }
2349
2350 pct = rset_marked_pointers == 0 ?
2351 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2352 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2353 rset_marked_pointers, cross_gen_pointers, pct);
2354 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
2355 PrintF(" elements %d, ", rset_marked_array_elements);
2356 pct = rset_marked_array_elements == 0 ? 0
2357 : cross_gen_array_elements * 100 / rset_marked_array_elements;
2358 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
2359 PrintF(" total rset-marked bits %d\n",
2360 (rset_marked_pointers + rset_marked_arrays));
2361 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
2362 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
2363 (rset_marked_pointers + rset_marked_array_elements);
2364 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
2365 (rset_marked_pointers + rset_marked_array_elements),
2366 (cross_gen_pointers + cross_gen_array_elements),
2367 pct);
2368
2369 ClearHistograms(); 2320 ClearHistograms();
2370 HeapObjectIterator obj_it(this); 2321 HeapObjectIterator obj_it(this);
2371 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) 2322 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2372 CollectHistogramInfo(obj); 2323 CollectHistogramInfo(obj);
2373 ReportHistogram(true); 2324 ReportHistogram(true);
2374 } 2325 }
2375
2376
2377 // Dump the range of remembered set words between [start, end) corresponding
2378 // to the pointers starting at object_p. The allocation_top is an object
2379 // pointer which should not be read past. This is important for large object
2380 // pages, where some bits in the remembered set range do not correspond to
2381 // allocated addresses.
2382 static void PrintRSetRange(Address start, Address end, Object** object_p,
2383 Address allocation_top) {
2384 Address rset_address = start;
2385
2386 // If the range starts on on odd numbered word (eg, for large object extra
2387 // remembered set ranges), print some spaces.
2388 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
2389 PrintF(" ");
2390 }
2391
2392 // Loop over all the words in the range.
2393 while (rset_address < end) {
2394 uint32_t rset_word = Memory::uint32_at(rset_address);
2395 int bit_position = 0;
2396
2397 // Loop over all the bits in the word.
2398 while (bit_position < kBitsPerInt) {
2399 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
2400 // Print a bar at the allocation pointer.
2401 PrintF("|");
2402 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
2403 // Do not dereference object_p past the allocation pointer.
2404 PrintF("#");
2405 } else if ((rset_word & (1 << bit_position)) == 0) {
2406 // Print a dot for zero bits.
2407 PrintF(".");
2408 } else if (Heap::InNewSpace(*object_p)) {
2409 // Print an X for one bits for pointers to new space.
2410 PrintF("X");
2411 } else {
2412 // Print a circle for one bits for pointers to old space.
2413 PrintF("o");
2414 }
2415
2416 // Print a space after every 8th bit except the last.
2417 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2418 PrintF(" ");
2419 }
2420
2421 // Advance to next bit.
2422 bit_position++;
2423 object_p++;
2424 }
2425
2426 // Print a newline after every odd numbered word, otherwise a space.
2427 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
2428 PrintF("\n");
2429 } else {
2430 PrintF(" ");
2431 }
2432
2433 // Advance to next remembered set word.
2434 rset_address += kIntSize;
2435 }
2436 }
2437
2438
2439 void PagedSpace::DoPrintRSet(const char* space_name) {
2440 PageIterator it(this, PageIterator::PAGES_IN_USE);
2441 while (it.has_next()) {
2442 Page* p = it.next();
2443 PrintF("%s page 0x%x:\n", space_name, p);
2444 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2445 reinterpret_cast<Object**>(p->ObjectAreaStart()),
2446 p->AllocationTop());
2447 PrintF("\n");
2448 }
2449 }
2450
2451
2452 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2453 #endif 2326 #endif
2454 2327
2455 // ----------------------------------------------------------------------------- 2328 // -----------------------------------------------------------------------------
2456 // FixedSpace implementation 2329 // FixedSpace implementation
2457 2330
2458 void FixedSpace::PrepareForMarkCompact(bool will_compact) { 2331 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2459 // Call prepare of the super class. 2332 // Call prepare of the super class.
2460 PagedSpace::PrepareForMarkCompact(will_compact); 2333 PagedSpace::PrepareForMarkCompact(will_compact);
2461 2334
2462 if (will_compact) { 2335 if (will_compact) {
(...skipping 29 matching lines...) Expand all
2492 // Update allocation_top of each page in use and compute waste. 2365 // Update allocation_top of each page in use and compute waste.
2493 int computed_size = 0; 2366 int computed_size = 0;
2494 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); 2367 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2495 while (it.has_next()) { 2368 while (it.has_next()) {
2496 Page* page = it.next(); 2369 Page* page = it.next();
2497 Address page_top = page->AllocationTop(); 2370 Address page_top = page->AllocationTop();
2498 computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); 2371 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2499 if (it.has_next()) { 2372 if (it.has_next()) {
2500 accounting_stats_.WasteBytes( 2373 accounting_stats_.WasteBytes(
2501 static_cast<int>(page->ObjectAreaEnd() - page_top)); 2374 static_cast<int>(page->ObjectAreaEnd() - page_top));
2375 page->SetAllocationWatermark(page_top);
2502 } 2376 }
2503 } 2377 }
2504 2378
2505 // Make sure the computed size - based on the used portion of the 2379 // Make sure the computed size - based on the used portion of the
2506 // pages in use - matches the size we adjust during allocation. 2380 // pages in use - matches the size we adjust during allocation.
2507 ASSERT(computed_size == Size()); 2381 ASSERT(computed_size == Size());
2508 } 2382 }
2509 2383
2510 2384
2511 // Slow case for normal allocation. Try in order: (1) allocate in the next 2385 // Slow case for normal allocation. Try in order: (1) allocate in the next
2512 // page in the space, (2) allocate off the space's free list, (3) expand the 2386 // page in the space, (2) allocate off the space's free list, (3) expand the
2513 // space, (4) fail. 2387 // space, (4) fail.
2514 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { 2388 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2515 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); 2389 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2516 // Linear allocation in this space has failed. If there is another page 2390 // Linear allocation in this space has failed. If there is another page
2517 // in the space, move to that page and allocate there. This allocation 2391 // in the space, move to that page and allocate there. This allocation
2518 // should succeed. 2392 // should succeed.
2519 Page* current_page = TopPageOf(allocation_info_); 2393 Page* current_page = TopPageOf(allocation_info_);
2520 if (current_page->next_page()->is_valid()) { 2394 if (current_page->next_page()->is_valid()) {
2521 return AllocateInNextPage(current_page, size_in_bytes); 2395 return AllocateInNextPage(current_page, size_in_bytes);
2522 } 2396 }
2523 2397
2524 // There is no next page in this space. Try free list allocation unless 2398 // There is no next page in this space. Try free list allocation unless
2525 // that is currently forbidden. The fixed space free list implicitly assumes 2399 // that is currently forbidden. The fixed space free list implicitly assumes
2526 // that all free blocks are of the fixed size. 2400 // that all free blocks are of the fixed size.
2527 if (!Heap::linear_allocation()) { 2401 if (!Heap::linear_allocation()) {
2528 Object* result = free_list_.Allocate(); 2402 Object* result = free_list_.Allocate();
2529 if (!result->IsFailure()) { 2403 if (!result->IsFailure()) {
2530 accounting_stats_.AllocateBytes(size_in_bytes); 2404 accounting_stats_.AllocateBytes(size_in_bytes);
2531 return HeapObject::cast(result); 2405 HeapObject* obj = HeapObject::cast(result);
2406 Page* p = Page::FromAddress(obj->address());
2407
2408 if (obj->address() >= p->AllocationWatermark()) {
2409 // There should be no hole between the allocation watermark
2410 // and allocated object address.
2411 // Memory above the allocation watermark was not swept and
2412 // might contain garbage pointers to new space.
2413 ASSERT(obj->address() == p->AllocationWatermark());
2414 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2415 }
2416
2417 return obj;
2532 } 2418 }
2533 } 2419 }
2534 2420
2535 // Free list allocation failed and there is no next page. Fail if we have 2421 // Free list allocation failed and there is no next page. Fail if we have
2536 // hit the old generation size limit that should cause a garbage 2422 // hit the old generation size limit that should cause a garbage
2537 // collection. 2423 // collection.
2538 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 2424 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2539 return NULL; 2425 return NULL;
2540 } 2426 }
2541 2427
2542 // Try to expand the space and allocate in the new next page. 2428 // Try to expand the space and allocate in the new next page.
2543 ASSERT(!current_page->next_page()->is_valid()); 2429 ASSERT(!current_page->next_page()->is_valid());
2544 if (Expand(current_page)) { 2430 if (Expand(current_page)) {
2545 return AllocateInNextPage(current_page, size_in_bytes); 2431 return AllocateInNextPage(current_page, size_in_bytes);
2546 } 2432 }
2547 2433
2548 // Finally, fail. 2434 // Finally, fail.
2549 return NULL; 2435 return NULL;
2550 } 2436 }
2551 2437
2552 2438
2553 // Move to the next page (there is assumed to be one) and allocate there. 2439 // Move to the next page (there is assumed to be one) and allocate there.
2554 // The top of page block is always wasted, because it is too small to hold a 2440 // The top of page block is always wasted, because it is too small to hold a
2555 // map. 2441 // map.
2556 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, 2442 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2557 int size_in_bytes) { 2443 int size_in_bytes) {
2558 ASSERT(current_page->next_page()->is_valid()); 2444 ASSERT(current_page->next_page()->is_valid());
2559 ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); 2445 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2560 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); 2446 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2447 Page* next_page = current_page->next_page();
2448 next_page->ClearGCFields();
2449 current_page->SetAllocationWatermark(allocation_info_.top);
2561 accounting_stats_.WasteBytes(page_extra_); 2450 accounting_stats_.WasteBytes(page_extra_);
2562 SetAllocationInfo(&allocation_info_, current_page->next_page()); 2451 SetAllocationInfo(&allocation_info_, next_page);
2563 return AllocateLinearly(&allocation_info_, size_in_bytes); 2452 return AllocateLinearly(&allocation_info_, size_in_bytes);
2564 } 2453 }
2565 2454
2566 2455
2567 #ifdef DEBUG 2456 #ifdef DEBUG
2568 void FixedSpace::ReportStatistics() { 2457 void FixedSpace::ReportStatistics() {
2569 int pct = Available() * 100 / Capacity(); 2458 int pct = Available() * 100 / Capacity();
2570 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", 2459 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2571 Capacity(), Waste(), Available(), pct); 2460 Capacity(), Waste(), Available(), pct);
2572 2461
2573 // Report remembered set statistics.
2574 int rset_marked_pointers = 0;
2575 int cross_gen_pointers = 0;
2576
2577 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2578 while (page_it.has_next()) {
2579 Page* p = page_it.next();
2580
2581 for (Address rset_addr = p->RSetStart();
2582 rset_addr < p->RSetEnd();
2583 rset_addr += kIntSize) {
2584 int rset = Memory::int_at(rset_addr);
2585 if (rset != 0) {
2586 // Bits were set
2587 int intoff =
2588 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2589 int bitoff = 0;
2590 for (; bitoff < kBitsPerInt; ++bitoff) {
2591 if ((rset & (1 << bitoff)) != 0) {
2592 int bitpos = intoff*kBitsPerByte + bitoff;
2593 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2594 Object** obj = reinterpret_cast<Object**>(slot);
2595 rset_marked_pointers++;
2596 if (Heap::InNewSpace(*obj))
2597 cross_gen_pointers++;
2598 }
2599 }
2600 }
2601 }
2602 }
2603
2604 pct = rset_marked_pointers == 0 ?
2605 0 : cross_gen_pointers * 100 / rset_marked_pointers;
2606 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2607 rset_marked_pointers, cross_gen_pointers, pct);
2608
2609 ClearHistograms(); 2462 ClearHistograms();
2610 HeapObjectIterator obj_it(this); 2463 HeapObjectIterator obj_it(this);
2611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) 2464 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2612 CollectHistogramInfo(obj); 2465 CollectHistogramInfo(obj);
2613 ReportHistogram(false); 2466 ReportHistogram(false);
2614 } 2467 }
2615
2616
2617 void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2618 #endif 2468 #endif
2619 2469
2620 2470
2621 // ----------------------------------------------------------------------------- 2471 // -----------------------------------------------------------------------------
2622 // MapSpace implementation 2472 // MapSpace implementation
2623 2473
2624 void MapSpace::PrepareForMarkCompact(bool will_compact) { 2474 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2625 // Call prepare of the super class. 2475 // Call prepare of the super class.
2626 FixedSpace::PrepareForMarkCompact(will_compact); 2476 FixedSpace::PrepareForMarkCompact(will_compact);
2627 2477
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
2786 if (chunk == NULL) { 2636 if (chunk == NULL) {
2787 return Failure::RetryAfterGC(requested_size, identity()); 2637 return Failure::RetryAfterGC(requested_size, identity());
2788 } 2638 }
2789 2639
2790 size_ += static_cast<int>(chunk_size); 2640 size_ += static_cast<int>(chunk_size);
2791 page_count_++; 2641 page_count_++;
2792 chunk->set_next(first_chunk_); 2642 chunk->set_next(first_chunk_);
2793 chunk->set_size(chunk_size); 2643 chunk->set_size(chunk_size);
2794 first_chunk_ = chunk; 2644 first_chunk_ = chunk;
2795 2645
2796 // Set the object address and size in the page header and clear its 2646 // Initialize page header.
2797 // remembered set.
2798 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2647 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2799 Address object_address = page->ObjectAreaStart(); 2648 Address object_address = page->ObjectAreaStart();
2800 // Clear the low order bit of the second word in the page to flag it as a 2649 // Clear the low order bit of the second word in the page to flag it as a
2801 // large object page. If the chunk_size happened to be written there, its 2650 // large object page. If the chunk_size happened to be written there, its
2802 // low order bit should already be clear. 2651 // low order bit should already be clear.
2803 ASSERT((chunk_size & 0x1) == 0); 2652 ASSERT((chunk_size & 0x1) == 0);
2804 page->SetIsLargeObjectPage(true); 2653 page->SetIsLargeObjectPage(true);
2805 page->ClearRSet(); 2654 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2806 int extra_bytes = requested_size - object_size;
2807 if (extra_bytes > 0) {
2808 // The extra memory for the remembered set should be cleared.
2809 memset(object_address + object_size, 0, extra_bytes);
2810 }
2811
2812 return HeapObject::FromAddress(object_address); 2655 return HeapObject::FromAddress(object_address);
2813 } 2656 }
2814 2657
2815 2658
2816 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { 2659 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2817 ASSERT(0 < size_in_bytes); 2660 ASSERT(0 < size_in_bytes);
2818 return AllocateRawInternal(size_in_bytes, 2661 return AllocateRawInternal(size_in_bytes,
2819 size_in_bytes, 2662 size_in_bytes,
2820 EXECUTABLE); 2663 EXECUTABLE);
2821 } 2664 }
2822 2665
2823 2666
2824 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { 2667 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2825 ASSERT(0 < size_in_bytes); 2668 ASSERT(0 < size_in_bytes);
2826 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes); 2669 return AllocateRawInternal(size_in_bytes,
2827 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2828 size_in_bytes, 2670 size_in_bytes,
2829 NOT_EXECUTABLE); 2671 NOT_EXECUTABLE);
2830 } 2672 }
2831 2673
2832 2674
2833 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) { 2675 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2834 ASSERT(0 < size_in_bytes); 2676 ASSERT(0 < size_in_bytes);
2835 return AllocateRawInternal(size_in_bytes, 2677 return AllocateRawInternal(size_in_bytes,
2836 size_in_bytes, 2678 size_in_bytes,
2837 NOT_EXECUTABLE); 2679 NOT_EXECUTABLE);
2838 } 2680 }
2839 2681
2840 2682
2841 // GC support 2683 // GC support
2842 Object* LargeObjectSpace::FindObject(Address a) { 2684 Object* LargeObjectSpace::FindObject(Address a) {
2843 for (LargeObjectChunk* chunk = first_chunk_; 2685 for (LargeObjectChunk* chunk = first_chunk_;
2844 chunk != NULL; 2686 chunk != NULL;
2845 chunk = chunk->next()) { 2687 chunk = chunk->next()) {
2846 Address chunk_address = chunk->address(); 2688 Address chunk_address = chunk->address();
2847 if (chunk_address <= a && a < chunk_address + chunk->size()) { 2689 if (chunk_address <= a && a < chunk_address + chunk->size()) {
2848 return chunk->GetObject(); 2690 return chunk->GetObject();
2849 } 2691 }
2850 } 2692 }
2851 return Failure::Exception(); 2693 return Failure::Exception();
2852 } 2694 }
2853 2695
2854 2696 void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
2855 void LargeObjectSpace::ClearRSet() {
2856 ASSERT(Page::is_rset_in_use());
2857
2858 LargeObjectIterator it(this);
2859 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2860 // We only have code, sequential strings, or fixed arrays in large
2861 // object space, and only fixed arrays need remembered set support.
2862 if (object->IsFixedArray()) {
2863 // Clear the normal remembered set region of the page;
2864 Page* page = Page::FromAddress(object->address());
2865 page->ClearRSet();
2866
2867 // Clear the extra remembered set.
2868 int size = object->Size();
2869 int extra_rset_bytes = ExtraRSetBytesFor(size);
2870 memset(object->address() + size, 0, extra_rset_bytes);
2871 }
2872 }
2873 }
2874
2875
2876 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2877 ASSERT(Page::is_rset_in_use());
2878
2879 static void* lo_rset_histogram = StatsTable::CreateHistogram(
2880 "V8.RSetLO",
2881 0,
2882 // Keeping this histogram's buckets the same as the paged space histogram.
2883 Page::kObjectAreaSize / kPointerSize,
2884 30);
2885
2886 LargeObjectIterator it(this); 2697 LargeObjectIterator it(this);
2887 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { 2698 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2888 // We only have code, sequential strings, or fixed arrays in large 2699 // We only have code, sequential strings, or fixed arrays in large
2889 // object space, and only fixed arrays can possibly contain pointers to 2700 // object space, and only fixed arrays can possibly contain pointers to
2890 // the young generation. 2701 // the young generation.
2891 if (object->IsFixedArray()) { 2702 if (object->IsFixedArray()) {
2892 // Iterate the normal page remembered set range.
2893 Page* page = Page::FromAddress(object->address()); 2703 Page* page = Page::FromAddress(object->address());
2894 Address object_end = object->address() + object->Size(); 2704 uint32_t marks = page->GetRegionMarks();
2895 int count = Heap::IterateRSetRange(page->ObjectAreaStart(), 2705 uint32_t newmarks = Page::kAllRegionsCleanMarks;
2896 Min(page->ObjectAreaEnd(), object_end),
2897 page->RSetStart(),
2898 copy_object_func);
2899 2706
2900 // Iterate the extra array elements. 2707 if (marks != Page::kAllRegionsCleanMarks) {
2901 if (object_end > page->ObjectAreaEnd()) { 2708 // For a large page a single dirty mark corresponds to several
2902 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end, 2709 // regions (modulo 32). So we treat a large page as a sequence of
2903 object_end, copy_object_func); 2710 // normal pages of size Page::kPageSize having same dirty marks
2904 } 2711 // and subsequently iterate dirty regions on each of these pages.
2905 if (lo_rset_histogram != NULL) { 2712 Address start = object->address();
2906 StatsTable::AddHistogramSample(lo_rset_histogram, count); 2713 Address end = page->ObjectAreaEnd();
2714 Address object_end = start + object->Size();
2715
2716 // Iterate regions of the first normal page covering object.
2717 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2718 newmarks |=
2719 Heap::IterateDirtyRegions(marks >> first_region_number,
2720 start,
2721 end,
2722 &Heap::IteratePointersInDirtyRegion,
2723 copy_object) << first_region_number;
2724
2725 start = end;
2726 end = start + Page::kPageSize;
2727 while (end <= object_end) {
2728 // Iterate next 32 regions.
2729 newmarks |=
2730 Heap::IterateDirtyRegions(marks,
2731 start,
2732 end,
2733 &Heap::IteratePointersInDirtyRegion,
2734 copy_object);
2735 start = end;
2736 end = start + Page::kPageSize;
2737 }
2738
2739 if (start != object_end) {
2740 // Iterate the last piece of an object which is less than
2741 // Page::kPageSize.
2742 newmarks |=
2743 Heap::IterateDirtyRegions(marks,
2744 start,
2745 object_end,
2746 &Heap::IteratePointersInDirtyRegion,
2747 copy_object);
2748 }
2749
2750 page->SetRegionMarks(newmarks);
2907 } 2751 }
2908 } 2752 }
2909 } 2753 }
2910 } 2754 }
2911 2755
2912 2756
2913 void LargeObjectSpace::FreeUnmarkedObjects() { 2757 void LargeObjectSpace::FreeUnmarkedObjects() {
2914 LargeObjectChunk* previous = NULL; 2758 LargeObjectChunk* previous = NULL;
2915 LargeObjectChunk* current = first_chunk_; 2759 LargeObjectChunk* current = first_chunk_;
2916 while (current != NULL) { 2760 while (current != NULL) {
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
2988 2832
2989 // Byte arrays and strings don't have interior pointers. 2833 // Byte arrays and strings don't have interior pointers.
2990 if (object->IsCode()) { 2834 if (object->IsCode()) {
2991 VerifyPointersVisitor code_visitor; 2835 VerifyPointersVisitor code_visitor;
2992 object->IterateBody(map->instance_type(), 2836 object->IterateBody(map->instance_type(),
2993 object->Size(), 2837 object->Size(),
2994 &code_visitor); 2838 &code_visitor);
2995 } else if (object->IsFixedArray()) { 2839 } else if (object->IsFixedArray()) {
2996 // We loop over fixed arrays ourselves, rather then using the visitor, 2840 // We loop over fixed arrays ourselves, rather then using the visitor,
2997 // because the visitor doesn't support the start/offset iteration 2841 // because the visitor doesn't support the start/offset iteration
2998 // needed for IsRSetSet. 2842 // needed for IsRegionDirty.
2999 FixedArray* array = FixedArray::cast(object); 2843 FixedArray* array = FixedArray::cast(object);
3000 for (int j = 0; j < array->length(); j++) { 2844 for (int j = 0; j < array->length(); j++) {
3001 Object* element = array->get(j); 2845 Object* element = array->get(j);
3002 if (element->IsHeapObject()) { 2846 if (element->IsHeapObject()) {
3003 HeapObject* element_object = HeapObject::cast(element); 2847 HeapObject* element_object = HeapObject::cast(element);
3004 ASSERT(Heap::Contains(element_object)); 2848 ASSERT(Heap::Contains(element_object));
3005 ASSERT(element_object->map()->IsMap()); 2849 ASSERT(element_object->map()->IsMap());
3006 if (Heap::InNewSpace(element_object)) { 2850 if (Heap::InNewSpace(element_object)) {
3007 ASSERT(Page::IsRSetSet(object->address(), 2851 Address array_addr = object->address();
3008 FixedArray::kHeaderSize + j * kPointerSize)); 2852 Address element_addr = array_addr + FixedArray::kHeaderSize +
2853 j * kPointerSize;
2854
2855 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3009 } 2856 }
3010 } 2857 }
3011 } 2858 }
3012 } 2859 }
3013 } 2860 }
3014 } 2861 }
3015 2862
3016 2863
3017 void LargeObjectSpace::Print() { 2864 void LargeObjectSpace::Print() {
3018 LargeObjectIterator it(this); 2865 LargeObjectIterator it(this);
(...skipping 20 matching lines...) Expand all
3039 2886
3040 void LargeObjectSpace::CollectCodeStatistics() { 2887 void LargeObjectSpace::CollectCodeStatistics() {
3041 LargeObjectIterator obj_it(this); 2888 LargeObjectIterator obj_it(this);
3042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2889 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3043 if (obj->IsCode()) { 2890 if (obj->IsCode()) {
3044 Code* code = Code::cast(obj); 2891 Code* code = Code::cast(obj);
3045 code_kind_statistics[code->kind()] += code->Size(); 2892 code_kind_statistics[code->kind()] += code->Size();
3046 } 2893 }
3047 } 2894 }
3048 } 2895 }
3049
3050
3051 void LargeObjectSpace::PrintRSet() {
3052 LargeObjectIterator it(this);
3053 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3054 if (object->IsFixedArray()) {
3055 Page* page = Page::FromAddress(object->address());
3056
3057 Address allocation_top = object->address() + object->Size();
3058 PrintF("large page 0x%x:\n", page);
3059 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
3060 reinterpret_cast<Object**>(object->address()),
3061 allocation_top);
3062 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
3063 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
3064 kBitsPerInt);
3065 PrintF("------------------------------------------------------------"
3066 "-----------\n");
3067 PrintRSetRange(allocation_top,
3068 allocation_top + extra_rset_bits / kBitsPerByte,
3069 reinterpret_cast<Object**>(object->address()
3070 + Page::kObjectAreaSize),
3071 allocation_top);
3072 PrintF("\n");
3073 }
3074 }
3075 }
3076 #endif // DEBUG 2896 #endif // DEBUG
3077 2897
3078 } } // namespace v8::internal 2898 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698