OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 23 matching lines...) Expand all Loading... |
34 namespace v8 { | 34 namespace v8 { |
35 namespace internal { | 35 namespace internal { |
36 | 36 |
37 // For contiguous spaces, top should be in the space (or at the end) and limit | 37 // For contiguous spaces, top should be in the space (or at the end) and limit |
38 // should be the end of the space. | 38 // should be the end of the space. |
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
40 ASSERT((space).low() <= (info).top \ | 40 ASSERT((space).low() <= (info).top \ |
41 && (info).top <= (space).high() \ | 41 && (info).top <= (space).high() \ |
42 && (info).limit == (space).high()) | 42 && (info).limit == (space).high()) |
43 | 43 |
44 intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; | |
45 | 44 |
46 // ---------------------------------------------------------------------------- | 45 // ---------------------------------------------------------------------------- |
47 // HeapObjectIterator | 46 // HeapObjectIterator |
48 | 47 |
49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
50 Initialize(space->bottom(), space->top(), NULL); | 49 Initialize(space->bottom(), space->top(), NULL); |
51 } | 50 } |
52 | 51 |
53 | 52 |
54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 } | 132 } |
134 } | 133 } |
135 #endif | 134 #endif |
136 stop_page_ = space->last_page_; | 135 stop_page_ = space->last_page_; |
137 break; | 136 break; |
138 } | 137 } |
139 } | 138 } |
140 | 139 |
141 | 140 |
142 // ----------------------------------------------------------------------------- | 141 // ----------------------------------------------------------------------------- |
| 142 // Page |
| 143 |
| 144 #ifdef DEBUG |
| 145 Page::RSetState Page::rset_state_ = Page::IN_USE; |
| 146 #endif |
| 147 |
| 148 // ----------------------------------------------------------------------------- |
143 // CodeRange | 149 // CodeRange |
144 | 150 |
145 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | 151 List<CodeRange::FreeBlock> CodeRange::free_list_(0); |
146 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | 152 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); |
147 int CodeRange::current_allocation_block_index_ = 0; | 153 int CodeRange::current_allocation_block_index_ = 0; |
148 VirtualMemory* CodeRange::code_range_ = NULL; | 154 VirtualMemory* CodeRange::code_range_ = NULL; |
149 | 155 |
150 | 156 |
151 bool CodeRange::Setup(const size_t requested) { | 157 bool CodeRange::Setup(const size_t requested) { |
152 ASSERT(code_range_ == NULL); | 158 ASSERT(code_range_ == NULL); |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
511 size_t chunk_size = chunks_[chunk_id].size(); | 517 size_t chunk_size = chunks_[chunk_id].size(); |
512 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); |
513 ASSERT(pages_in_chunk <= | 519 ASSERT(pages_in_chunk <= |
514 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); | 520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); |
515 #endif | 521 #endif |
516 | 522 |
517 Address page_addr = low; | 523 Address page_addr = low; |
518 for (int i = 0; i < pages_in_chunk; i++) { | 524 for (int i = 0; i < pages_in_chunk; i++) { |
519 Page* p = Page::FromAddress(page_addr); | 525 Page* p = Page::FromAddress(page_addr); |
520 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | 526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
521 p->InvalidateWatermark(true); | |
522 p->SetIsLargeObjectPage(false); | 527 p->SetIsLargeObjectPage(false); |
523 p->SetAllocationWatermark(p->ObjectAreaStart()); | |
524 p->SetCachedAllocationWatermark(p->ObjectAreaStart()); | |
525 page_addr += Page::kPageSize; | 528 page_addr += Page::kPageSize; |
526 } | 529 } |
527 | 530 |
528 // Set the next page of the last page to 0. | 531 // Set the next page of the last page to 0. |
529 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); | 532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); |
530 last_page->opaque_header = OffsetFrom(0) | chunk_id; | 533 last_page->opaque_header = OffsetFrom(0) | chunk_id; |
531 | 534 |
532 return Page::FromAddress(low); | 535 return Page::FromAddress(low); |
533 } | 536 } |
534 | 537 |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
671 | 674 |
672 if (prev->is_valid()) { | 675 if (prev->is_valid()) { |
673 SetNextPage(prev, Page::FromAddress(page_addr)); | 676 SetNextPage(prev, Page::FromAddress(page_addr)); |
674 } | 677 } |
675 | 678 |
676 for (int i = 0; i < pages_in_chunk; i++) { | 679 for (int i = 0; i < pages_in_chunk; i++) { |
677 Page* p = Page::FromAddress(page_addr); | 680 Page* p = Page::FromAddress(page_addr); |
678 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | 681 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
679 page_addr += Page::kPageSize; | 682 page_addr += Page::kPageSize; |
680 | 683 |
681 p->InvalidateWatermark(true); | |
682 if (p->WasInUseBeforeMC()) { | 684 if (p->WasInUseBeforeMC()) { |
683 *last_page_in_use = p; | 685 *last_page_in_use = p; |
684 } | 686 } |
685 } | 687 } |
686 | 688 |
687 // Set the next page of the last page to 0. | 689 // Set the next page of the last page to 0. |
688 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); | 690 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); |
689 last_page->opaque_header = OffsetFrom(0) | chunk_id; | 691 last_page->opaque_header = OffsetFrom(0) | chunk_id; |
690 | 692 |
691 if (last_page->WasInUseBeforeMC()) { | 693 if (last_page->WasInUseBeforeMC()) { |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
735 if (!first_page_->is_valid()) return false; | 737 if (!first_page_->is_valid()) return false; |
736 } | 738 } |
737 | 739 |
738 // We are sure that the first page is valid and that we have at least one | 740 // We are sure that the first page is valid and that we have at least one |
739 // page. | 741 // page. |
740 ASSERT(first_page_->is_valid()); | 742 ASSERT(first_page_->is_valid()); |
741 ASSERT(num_pages > 0); | 743 ASSERT(num_pages > 0); |
742 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); | 744 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
743 ASSERT(Capacity() <= max_capacity_); | 745 ASSERT(Capacity() <= max_capacity_); |
744 | 746 |
745 // Sequentially clear region marks in the newly allocated | 747 // Sequentially initialize remembered sets in the newly allocated |
746 // pages and cache the current last page in the space. | 748 // pages and cache the current last page in the space. |
747 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { | 749 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { |
748 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | 750 p->ClearRSet(); |
749 last_page_ = p; | 751 last_page_ = p; |
750 } | 752 } |
751 | 753 |
752 // Use first_page_ for allocation. | 754 // Use first_page_ for allocation. |
753 SetAllocationInfo(&allocation_info_, first_page_); | 755 SetAllocationInfo(&allocation_info_, first_page_); |
754 | 756 |
755 page_list_is_chunk_ordered_ = true; | 757 page_list_is_chunk_ordered_ = true; |
756 | 758 |
757 return true; | 759 return true; |
758 } | 760 } |
(...skipping 26 matching lines...) Expand all Loading... |
785 Page* page = first_page_; | 787 Page* page = first_page_; |
786 while (page->is_valid()) { | 788 while (page->is_valid()) { |
787 MemoryAllocator::UnprotectChunkFromPage(page); | 789 MemoryAllocator::UnprotectChunkFromPage(page); |
788 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 790 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); |
789 } | 791 } |
790 } | 792 } |
791 | 793 |
792 #endif | 794 #endif |
793 | 795 |
794 | 796 |
795 void PagedSpace::MarkAllPagesClean() { | 797 void PagedSpace::ClearRSet() { |
796 PageIterator it(this, PageIterator::ALL_PAGES); | 798 PageIterator it(this, PageIterator::ALL_PAGES); |
797 while (it.has_next()) { | 799 while (it.has_next()) { |
798 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | 800 it.next()->ClearRSet(); |
799 } | 801 } |
800 } | 802 } |
801 | 803 |
802 | 804 |
803 Object* PagedSpace::FindObject(Address addr) { | 805 Object* PagedSpace::FindObject(Address addr) { |
804 // Note: this function can only be called before or after mark-compact GC | 806 // Note: this function can only be called before or after mark-compact GC |
805 // because it accesses map pointers. | 807 // because it accesses map pointers. |
806 ASSERT(!MarkCompactCollector::in_use()); | 808 ASSERT(!MarkCompactCollector::in_use()); |
807 | 809 |
808 if (!Contains(addr)) return Failure::Exception(); | 810 if (!Contains(addr)) return Failure::Exception(); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
891 ASSERT(current_page->next_page()->is_valid()); | 893 ASSERT(current_page->next_page()->is_valid()); |
892 // We do not add the top of page block for current page to the space's | 894 // We do not add the top of page block for current page to the space's |
893 // free list---the block may contain live objects so we cannot write | 895 // free list---the block may contain live objects so we cannot write |
894 // bookkeeping information to it. Instead, we will recover top of page | 896 // bookkeeping information to it. Instead, we will recover top of page |
895 // blocks when we move objects to their new locations. | 897 // blocks when we move objects to their new locations. |
896 // | 898 // |
897 // We do however write the allocation pointer to the page. The encoding | 899 // We do however write the allocation pointer to the page. The encoding |
898 // of forwarding addresses is as an offset in terms of live bytes, so we | 900 // of forwarding addresses is as an offset in terms of live bytes, so we |
899 // need quick access to the allocation top of each page to decode | 901 // need quick access to the allocation top of each page to decode |
900 // forwarding addresses. | 902 // forwarding addresses. |
901 current_page->SetAllocationWatermark(mc_forwarding_info_.top); | 903 current_page->mc_relocation_top = mc_forwarding_info_.top; |
902 current_page->next_page()->InvalidateWatermark(true); | |
903 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); | 904 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); |
904 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); | 905 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
905 } | 906 } |
906 | 907 |
907 | 908 |
908 bool PagedSpace::Expand(Page* last_page) { | 909 bool PagedSpace::Expand(Page* last_page) { |
909 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 910 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
910 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 911 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
911 | 912 |
912 if (Capacity() == max_capacity_) return false; | 913 if (Capacity() == max_capacity_) return false; |
913 | 914 |
914 ASSERT(Capacity() < max_capacity_); | 915 ASSERT(Capacity() < max_capacity_); |
915 // Last page must be valid and its next page is invalid. | 916 // Last page must be valid and its next page is invalid. |
916 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); | 917 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); |
917 | 918 |
918 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; | 919 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; |
919 if (available_pages <= 0) return false; | 920 if (available_pages <= 0) return false; |
920 | 921 |
921 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); | 922 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); |
922 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); | 923 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); |
923 if (!p->is_valid()) return false; | 924 if (!p->is_valid()) return false; |
924 | 925 |
925 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); | 926 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); |
926 ASSERT(Capacity() <= max_capacity_); | 927 ASSERT(Capacity() <= max_capacity_); |
927 | 928 |
928 MemoryAllocator::SetNextPage(last_page, p); | 929 MemoryAllocator::SetNextPage(last_page, p); |
929 | 930 |
930 // Sequentially clear region marks of new pages and and cache the | 931 // Sequentially clear remembered set of new pages and and cache the |
931 // new last page in the space. | 932 // new last page in the space. |
932 while (p->is_valid()) { | 933 while (p->is_valid()) { |
933 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | 934 p->ClearRSet(); |
934 last_page_ = p; | 935 last_page_ = p; |
935 p = p->next_page(); | 936 p = p->next_page(); |
936 } | 937 } |
937 | 938 |
938 return true; | 939 return true; |
939 } | 940 } |
940 | 941 |
941 | 942 |
942 #ifdef DEBUG | 943 #ifdef DEBUG |
943 int PagedSpace::CountTotalPages() { | 944 int PagedSpace::CountTotalPages() { |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1022 Page* top_page = Page::FromAllocationTop(allocation_info_.top); | 1023 Page* top_page = Page::FromAllocationTop(allocation_info_.top); |
1023 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); | 1024 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); |
1024 | 1025 |
1025 // Loop over all the pages. | 1026 // Loop over all the pages. |
1026 bool above_allocation_top = false; | 1027 bool above_allocation_top = false; |
1027 Page* current_page = first_page_; | 1028 Page* current_page = first_page_; |
1028 while (current_page->is_valid()) { | 1029 while (current_page->is_valid()) { |
1029 if (above_allocation_top) { | 1030 if (above_allocation_top) { |
1030 // We don't care what's above the allocation top. | 1031 // We don't care what's above the allocation top. |
1031 } else { | 1032 } else { |
| 1033 // Unless this is the last page in the space containing allocated |
| 1034 // objects, the allocation top should be at a constant offset from the |
| 1035 // object area end. |
1032 Address top = current_page->AllocationTop(); | 1036 Address top = current_page->AllocationTop(); |
1033 if (current_page == top_page) { | 1037 if (current_page == top_page) { |
1034 ASSERT(top == allocation_info_.top); | 1038 ASSERT(top == allocation_info_.top); |
1035 // The next page will be above the allocation top. | 1039 // The next page will be above the allocation top. |
1036 above_allocation_top = true; | 1040 above_allocation_top = true; |
| 1041 } else { |
| 1042 ASSERT(top == PageAllocationLimit(current_page)); |
1037 } | 1043 } |
1038 | 1044 |
1039 // It should be packed with objects from the bottom to the top. | 1045 // It should be packed with objects from the bottom to the top. |
1040 Address current = current_page->ObjectAreaStart(); | 1046 Address current = current_page->ObjectAreaStart(); |
1041 while (current < top) { | 1047 while (current < top) { |
1042 HeapObject* object = HeapObject::FromAddress(current); | 1048 HeapObject* object = HeapObject::FromAddress(current); |
1043 | 1049 |
1044 // The first word should be a map, and we expect all map pointers to | 1050 // The first word should be a map, and we expect all map pointers to |
1045 // be in map space. | 1051 // be in map space. |
1046 Map* map = object->map(); | 1052 Map* map = object->map(); |
1047 ASSERT(map->IsMap()); | 1053 ASSERT(map->IsMap()); |
1048 ASSERT(Heap::map_space()->Contains(map)); | 1054 ASSERT(Heap::map_space()->Contains(map)); |
1049 | 1055 |
1050 // Perform space-specific object verification. | 1056 // Perform space-specific object verification. |
1051 VerifyObject(object); | 1057 VerifyObject(object); |
1052 | 1058 |
1053 // The object itself should look OK. | 1059 // The object itself should look OK. |
1054 object->Verify(); | 1060 object->Verify(); |
1055 | 1061 |
1056 // All the interior pointers should be contained in the heap and | 1062 // All the interior pointers should be contained in the heap and |
1057 // have page regions covering intergenerational references should be | 1063 // have their remembered set bits set if required as determined |
1058 // marked dirty. | 1064 // by the visitor. |
1059 int size = object->Size(); | 1065 int size = object->Size(); |
1060 object->IterateBody(map->instance_type(), size, visitor); | 1066 object->IterateBody(map->instance_type(), size, visitor); |
1061 | 1067 |
1062 current += size; | 1068 current += size; |
1063 } | 1069 } |
1064 | 1070 |
1065 // The allocation pointer should not be in the middle of an object. | 1071 // The allocation pointer should not be in the middle of an object. |
1066 ASSERT(current == top); | 1072 ASSERT(current == top); |
1067 } | 1073 } |
1068 | 1074 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1107 return false; | 1113 return false; |
1108 } | 1114 } |
1109 if (!from_space_.Setup(start + maximum_semispace_capacity, | 1115 if (!from_space_.Setup(start + maximum_semispace_capacity, |
1110 initial_semispace_capacity, | 1116 initial_semispace_capacity, |
1111 maximum_semispace_capacity)) { | 1117 maximum_semispace_capacity)) { |
1112 return false; | 1118 return false; |
1113 } | 1119 } |
1114 | 1120 |
1115 start_ = start; | 1121 start_ = start; |
1116 address_mask_ = ~(size - 1); | 1122 address_mask_ = ~(size - 1); |
1117 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1123 object_mask_ = address_mask_ | kHeapObjectTag; |
1118 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1124 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
1119 | 1125 |
1120 allocation_info_.top = to_space_.low(); | 1126 allocation_info_.top = to_space_.low(); |
1121 allocation_info_.limit = to_space_.high(); | 1127 allocation_info_.limit = to_space_.high(); |
1122 mc_forwarding_info_.top = NULL; | 1128 mc_forwarding_info_.top = NULL; |
1123 mc_forwarding_info_.limit = NULL; | 1129 mc_forwarding_info_.limit = NULL; |
1124 | 1130 |
1125 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1131 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1126 return true; | 1132 return true; |
1127 } | 1133 } |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1311 // otherwise. In the mark-compact collector, the memory region of the from | 1317 // otherwise. In the mark-compact collector, the memory region of the from |
1312 // space is used as the marking stack. It requires contiguous memory | 1318 // space is used as the marking stack. It requires contiguous memory |
1313 // addresses. | 1319 // addresses. |
1314 initial_capacity_ = initial_capacity; | 1320 initial_capacity_ = initial_capacity; |
1315 capacity_ = initial_capacity; | 1321 capacity_ = initial_capacity; |
1316 maximum_capacity_ = maximum_capacity; | 1322 maximum_capacity_ = maximum_capacity; |
1317 committed_ = false; | 1323 committed_ = false; |
1318 | 1324 |
1319 start_ = start; | 1325 start_ = start; |
1320 address_mask_ = ~(maximum_capacity - 1); | 1326 address_mask_ = ~(maximum_capacity - 1); |
1321 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1327 object_mask_ = address_mask_ | kHeapObjectTag; |
1322 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1328 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
1323 age_mark_ = start_; | 1329 age_mark_ = start_; |
1324 | 1330 |
1325 return Commit(); | 1331 return Commit(); |
1326 } | 1332 } |
1327 | 1333 |
1328 | 1334 |
1329 void SemiSpace::TearDown() { | 1335 void SemiSpace::TearDown() { |
1330 start_ = NULL; | 1336 start_ = NULL; |
1331 capacity_ = 0; | 1337 capacity_ = 0; |
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1621 ASSERT(size_in_bytes > 0); | 1627 ASSERT(size_in_bytes > 0); |
1622 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1628 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1623 | 1629 |
1624 // We write a map and possibly size information to the block. If the block | 1630 // We write a map and possibly size information to the block. If the block |
1625 // is big enough to be a ByteArray with at least one extra word (the next | 1631 // is big enough to be a ByteArray with at least one extra word (the next |
1626 // pointer), we set its map to be the byte array map and its size to an | 1632 // pointer), we set its map to be the byte array map and its size to an |
1627 // appropriate array length for the desired size from HeapObject::Size(). | 1633 // appropriate array length for the desired size from HeapObject::Size(). |
1628 // If the block is too small (eg, one or two words), to hold both a size | 1634 // If the block is too small (eg, one or two words), to hold both a size |
1629 // field and a next pointer, we give it a filler map that gives it the | 1635 // field and a next pointer, we give it a filler map that gives it the |
1630 // correct size. | 1636 // correct size. |
1631 if (size_in_bytes > ByteArray::kHeaderSize) { | 1637 if (size_in_bytes > ByteArray::kAlignedSize) { |
1632 set_map(Heap::raw_unchecked_byte_array_map()); | 1638 set_map(Heap::raw_unchecked_byte_array_map()); |
1633 // Can't use ByteArray::cast because it fails during deserialization. | 1639 // Can't use ByteArray::cast because it fails during deserialization. |
1634 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); | 1640 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
1635 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); | 1641 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); |
1636 } else if (size_in_bytes == kPointerSize) { | 1642 } else if (size_in_bytes == kPointerSize) { |
1637 set_map(Heap::raw_unchecked_one_pointer_filler_map()); | 1643 set_map(Heap::raw_unchecked_one_pointer_filler_map()); |
1638 } else if (size_in_bytes == 2 * kPointerSize) { | 1644 } else if (size_in_bytes == 2 * kPointerSize) { |
1639 set_map(Heap::raw_unchecked_two_pointer_filler_map()); | 1645 set_map(Heap::raw_unchecked_two_pointer_filler_map()); |
1640 } else { | 1646 } else { |
1641 UNREACHABLE(); | 1647 UNREACHABLE(); |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1894 ASSERT(Waste() == 0); | 1900 ASSERT(Waste() == 0); |
1895 ASSERT(AvailableFree() == 0); | 1901 ASSERT(AvailableFree() == 0); |
1896 | 1902 |
1897 // Build the free list for the space. | 1903 // Build the free list for the space. |
1898 int computed_size = 0; | 1904 int computed_size = 0; |
1899 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | 1905 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); |
1900 while (it.has_next()) { | 1906 while (it.has_next()) { |
1901 Page* p = it.next(); | 1907 Page* p = it.next(); |
1902 // Space below the relocation pointer is allocated. | 1908 // Space below the relocation pointer is allocated. |
1903 computed_size += | 1909 computed_size += |
1904 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart()); | 1910 static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); |
1905 if (it.has_next()) { | 1911 if (it.has_next()) { |
1906 // Free the space at the top of the page. | 1912 // Free the space at the top of the page. We cannot use |
| 1913 // p->mc_relocation_top after the call to Free (because Free will clear |
| 1914 // remembered set bits). |
1907 int extra_size = | 1915 int extra_size = |
1908 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark()); | 1916 static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); |
1909 if (extra_size > 0) { | 1917 if (extra_size > 0) { |
1910 int wasted_bytes = free_list_.Free(p->AllocationWatermark(), | 1918 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); |
1911 extra_size); | |
1912 // The bytes we have just "freed" to add to the free list were | 1919 // The bytes we have just "freed" to add to the free list were |
1913 // already accounted as available. | 1920 // already accounted as available. |
1914 accounting_stats_.WasteBytes(wasted_bytes); | 1921 accounting_stats_.WasteBytes(wasted_bytes); |
1915 } | 1922 } |
1916 } | 1923 } |
1917 } | 1924 } |
1918 | 1925 |
1919 // Make sure the computed size - based on the used portion of the pages in | 1926 // Make sure the computed size - based on the used portion of the pages in |
1920 // use - matches the size obtained while computing forwarding addresses. | 1927 // use - matches the size obtained while computing forwarding addresses. |
1921 ASSERT(computed_size == Size()); | 1928 ASSERT(computed_size == Size()); |
(...skipping 27 matching lines...) Expand all Loading... |
1949 MemoryAllocator::SetNextPage(prev, last->next_page()); | 1956 MemoryAllocator::SetNextPage(prev, last->next_page()); |
1950 } | 1957 } |
1951 | 1958 |
1952 // Attach it after the last page. | 1959 // Attach it after the last page. |
1953 MemoryAllocator::SetNextPage(last_page_, first); | 1960 MemoryAllocator::SetNextPage(last_page_, first); |
1954 last_page_ = last; | 1961 last_page_ = last; |
1955 MemoryAllocator::SetNextPage(last, NULL); | 1962 MemoryAllocator::SetNextPage(last, NULL); |
1956 | 1963 |
1957 // Clean them up. | 1964 // Clean them up. |
1958 do { | 1965 do { |
1959 first->InvalidateWatermark(true); | 1966 first->ClearRSet(); |
1960 first->SetAllocationWatermark(first->ObjectAreaStart()); | |
1961 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); | |
1962 first->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
1963 first = first->next_page(); | 1967 first = first->next_page(); |
1964 } while (first != NULL); | 1968 } while (first != NULL); |
1965 | 1969 |
1966 // Order of pages in this space might no longer be consistent with | 1970 // Order of pages in this space might no longer be consistent with |
1967 // order of pages in chunks. | 1971 // order of pages in chunks. |
1968 page_list_is_chunk_ordered_ = false; | 1972 page_list_is_chunk_ordered_ = false; |
1969 } | 1973 } |
1970 | 1974 |
1971 | 1975 |
1972 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | 1976 void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
(...skipping 19 matching lines...) Expand all Loading... |
1992 MemoryAllocator::RelinkPageListInChunkOrder(this, | 1996 MemoryAllocator::RelinkPageListInChunkOrder(this, |
1993 &first_page_, | 1997 &first_page_, |
1994 &last_page_, | 1998 &last_page_, |
1995 &new_last_in_use); | 1999 &new_last_in_use); |
1996 ASSERT(new_last_in_use->is_valid()); | 2000 ASSERT(new_last_in_use->is_valid()); |
1997 | 2001 |
1998 if (new_last_in_use != last_in_use) { | 2002 if (new_last_in_use != last_in_use) { |
1999 // Current allocation top points to a page which is now in the middle | 2003 // Current allocation top points to a page which is now in the middle |
2000 // of page list. We should move allocation top forward to the new last | 2004 // of page list. We should move allocation top forward to the new last |
2001 // used page so various object iterators will continue to work properly. | 2005 // used page so various object iterators will continue to work properly. |
2002 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); | |
2003 | 2006 |
2004 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - | 2007 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
2005 last_in_use->AllocationTop()); | 2008 last_in_use->AllocationTop()); |
2006 | 2009 |
2007 if (size_in_bytes > 0) { | 2010 if (size_in_bytes > 0) { |
2008 // There is still some space left on this page. Create a fake | 2011 // There is still some space left on this page. Create a fake |
2009 // object which will occupy all free space on this page. | 2012 // object which will occupy all free space on this page. |
2010 // Otherwise iterators would not be able to scan this page | 2013 // Otherwise iterators would not be able to scan this page |
2011 // correctly. | 2014 // correctly. |
2012 | 2015 |
(...skipping 12 matching lines...) Expand all Loading... |
2025 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); | 2028 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
2026 while (pages_in_use_iterator.has_next()) { | 2029 while (pages_in_use_iterator.has_next()) { |
2027 Page* p = pages_in_use_iterator.next(); | 2030 Page* p = pages_in_use_iterator.next(); |
2028 if (!p->WasInUseBeforeMC()) { | 2031 if (!p->WasInUseBeforeMC()) { |
2029 // Empty page is in the middle of a sequence of used pages. | 2032 // Empty page is in the middle of a sequence of used pages. |
2030 // Create a fake object which will occupy all free space on this page. | 2033 // Create a fake object which will occupy all free space on this page. |
2031 // Otherwise iterators would not be able to scan this page correctly. | 2034 // Otherwise iterators would not be able to scan this page correctly. |
2032 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - | 2035 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
2033 p->ObjectAreaStart()); | 2036 p->ObjectAreaStart()); |
2034 | 2037 |
2035 p->SetAllocationWatermark(p->ObjectAreaStart()); | |
2036 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); | 2038 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
2037 } | 2039 } |
2038 } | 2040 } |
2039 | 2041 |
2040 page_list_is_chunk_ordered_ = true; | 2042 page_list_is_chunk_ordered_ = true; |
2041 } | 2043 } |
2042 } | 2044 } |
2043 } | 2045 } |
2044 | 2046 |
2045 | 2047 |
(...skipping 11 matching lines...) Expand all Loading... |
2057 while (bytes_left_to_reserve > 0) { | 2059 while (bytes_left_to_reserve > 0) { |
2058 if (!reserved_page->next_page()->is_valid()) { | 2060 if (!reserved_page->next_page()->is_valid()) { |
2059 if (Heap::OldGenerationAllocationLimitReached()) return false; | 2061 if (Heap::OldGenerationAllocationLimitReached()) return false; |
2060 Expand(reserved_page); | 2062 Expand(reserved_page); |
2061 } | 2063 } |
2062 bytes_left_to_reserve -= Page::kPageSize; | 2064 bytes_left_to_reserve -= Page::kPageSize; |
2063 reserved_page = reserved_page->next_page(); | 2065 reserved_page = reserved_page->next_page(); |
2064 if (!reserved_page->is_valid()) return false; | 2066 if (!reserved_page->is_valid()) return false; |
2065 } | 2067 } |
2066 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); | 2068 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); |
2067 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); | |
2068 SetAllocationInfo(&allocation_info_, | 2069 SetAllocationInfo(&allocation_info_, |
2069 TopPageOf(allocation_info_)->next_page()); | 2070 TopPageOf(allocation_info_)->next_page()); |
2070 return true; | 2071 return true; |
2071 } | 2072 } |
2072 | 2073 |
2073 | 2074 |
2074 // You have to call this last, since the implementation from PagedSpace | 2075 // You have to call this last, since the implementation from PagedSpace |
2075 // doesn't know that memory was 'promised' to large object space. | 2076 // doesn't know that memory was 'promised' to large object space. |
2076 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2077 bool LargeObjectSpace::ReserveSpace(int bytes) { |
2077 return Heap::OldGenerationSpaceAvailable() >= bytes; | 2078 return Heap::OldGenerationSpaceAvailable() >= bytes; |
(...skipping 14 matching lines...) Expand all Loading... |
2092 } | 2093 } |
2093 | 2094 |
2094 // There is no next page in this space. Try free list allocation unless that | 2095 // There is no next page in this space. Try free list allocation unless that |
2095 // is currently forbidden. | 2096 // is currently forbidden. |
2096 if (!Heap::linear_allocation()) { | 2097 if (!Heap::linear_allocation()) { |
2097 int wasted_bytes; | 2098 int wasted_bytes; |
2098 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); | 2099 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); |
2099 accounting_stats_.WasteBytes(wasted_bytes); | 2100 accounting_stats_.WasteBytes(wasted_bytes); |
2100 if (!result->IsFailure()) { | 2101 if (!result->IsFailure()) { |
2101 accounting_stats_.AllocateBytes(size_in_bytes); | 2102 accounting_stats_.AllocateBytes(size_in_bytes); |
2102 | 2103 return HeapObject::cast(result); |
2103 HeapObject* obj = HeapObject::cast(result); | |
2104 Page* p = Page::FromAddress(obj->address()); | |
2105 | |
2106 if (obj->address() >= p->AllocationWatermark()) { | |
2107 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
2108 } | |
2109 | |
2110 return obj; | |
2111 } | 2104 } |
2112 } | 2105 } |
2113 | 2106 |
2114 // Free list allocation failed and there is no next page. Fail if we have | 2107 // Free list allocation failed and there is no next page. Fail if we have |
2115 // hit the old generation size limit that should cause a garbage | 2108 // hit the old generation size limit that should cause a garbage |
2116 // collection. | 2109 // collection. |
2117 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2110 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
2118 return NULL; | 2111 return NULL; |
2119 } | 2112 } |
2120 | 2113 |
2121 // Try to expand the space and allocate in the new next page. | 2114 // Try to expand the space and allocate in the new next page. |
2122 ASSERT(!current_page->next_page()->is_valid()); | 2115 ASSERT(!current_page->next_page()->is_valid()); |
2123 if (Expand(current_page)) { | 2116 if (Expand(current_page)) { |
2124 return AllocateInNextPage(current_page, size_in_bytes); | 2117 return AllocateInNextPage(current_page, size_in_bytes); |
2125 } | 2118 } |
2126 | 2119 |
2127 // Finally, fail. | 2120 // Finally, fail. |
2128 return NULL; | 2121 return NULL; |
2129 } | 2122 } |
2130 | 2123 |
2131 | 2124 |
2132 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | 2125 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
2133 current_page->SetAllocationWatermark(allocation_info_.top); | |
2134 int free_size = | 2126 int free_size = |
2135 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | 2127 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
2136 if (free_size > 0) { | 2128 if (free_size > 0) { |
2137 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | 2129 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); |
2138 accounting_stats_.WasteBytes(wasted_bytes); | 2130 accounting_stats_.WasteBytes(wasted_bytes); |
2139 } | 2131 } |
2140 } | 2132 } |
2141 | 2133 |
2142 | 2134 |
2143 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | 2135 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { |
2144 current_page->SetAllocationWatermark(allocation_info_.top); | |
2145 int free_size = | 2136 int free_size = |
2146 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | 2137 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
2147 // In the fixed space free list all the free list items have the right size. | 2138 // In the fixed space free list all the free list items have the right size. |
2148 // We use up the rest of the page while preserving this invariant. | 2139 // We use up the rest of the page while preserving this invariant. |
2149 while (free_size >= object_size_in_bytes_) { | 2140 while (free_size >= object_size_in_bytes_) { |
2150 free_list_.Free(allocation_info_.top); | 2141 free_list_.Free(allocation_info_.top); |
2151 allocation_info_.top += object_size_in_bytes_; | 2142 allocation_info_.top += object_size_in_bytes_; |
2152 free_size -= object_size_in_bytes_; | 2143 free_size -= object_size_in_bytes_; |
2153 accounting_stats_.WasteBytes(object_size_in_bytes_); | 2144 accounting_stats_.WasteBytes(object_size_in_bytes_); |
2154 } | 2145 } |
2155 } | 2146 } |
2156 | 2147 |
2157 | 2148 |
2158 // Add the block at the top of the page to the space's free list, set the | 2149 // Add the block at the top of the page to the space's free list, set the |
2159 // allocation info to the next page (assumed to be one), and allocate | 2150 // allocation info to the next page (assumed to be one), and allocate |
2160 // linearly there. | 2151 // linearly there. |
2161 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, | 2152 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
2162 int size_in_bytes) { | 2153 int size_in_bytes) { |
2163 ASSERT(current_page->next_page()->is_valid()); | 2154 ASSERT(current_page->next_page()->is_valid()); |
2164 current_page->next_page()->InvalidateWatermark(true); | |
2165 PutRestOfCurrentPageOnFreeList(current_page); | 2155 PutRestOfCurrentPageOnFreeList(current_page); |
2166 SetAllocationInfo(&allocation_info_, current_page->next_page()); | 2156 SetAllocationInfo(&allocation_info_, current_page->next_page()); |
2167 return AllocateLinearly(&allocation_info_, size_in_bytes); | 2157 return AllocateLinearly(&allocation_info_, size_in_bytes); |
2168 } | 2158 } |
2169 | 2159 |
2170 | 2160 |
2171 #ifdef DEBUG | 2161 #ifdef DEBUG |
2172 struct CommentStatistic { | 2162 struct CommentStatistic { |
2173 const char* comment; | 2163 const char* comment; |
2174 int size; | 2164 int size; |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2299 } | 2289 } |
2300 } | 2290 } |
2301 } | 2291 } |
2302 | 2292 |
2303 | 2293 |
2304 void OldSpace::ReportStatistics() { | 2294 void OldSpace::ReportStatistics() { |
2305 int pct = Available() * 100 / Capacity(); | 2295 int pct = Available() * 100 / Capacity(); |
2306 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2296 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
2307 Capacity(), Waste(), Available(), pct); | 2297 Capacity(), Waste(), Available(), pct); |
2308 | 2298 |
| 2299 // Report remembered set statistics. |
| 2300 int rset_marked_pointers = 0; |
| 2301 int rset_marked_arrays = 0; |
| 2302 int rset_marked_array_elements = 0; |
| 2303 int cross_gen_pointers = 0; |
| 2304 int cross_gen_array_elements = 0; |
| 2305 |
| 2306 PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
| 2307 while (page_it.has_next()) { |
| 2308 Page* p = page_it.next(); |
| 2309 |
| 2310 for (Address rset_addr = p->RSetStart(); |
| 2311 rset_addr < p->RSetEnd(); |
| 2312 rset_addr += kIntSize) { |
| 2313 int rset = Memory::int_at(rset_addr); |
| 2314 if (rset != 0) { |
| 2315 // Bits were set |
| 2316 int intoff = |
| 2317 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
| 2318 int bitoff = 0; |
| 2319 for (; bitoff < kBitsPerInt; ++bitoff) { |
| 2320 if ((rset & (1 << bitoff)) != 0) { |
| 2321 int bitpos = intoff*kBitsPerByte + bitoff; |
| 2322 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
| 2323 Object** obj = reinterpret_cast<Object**>(slot); |
| 2324 if (*obj == Heap::raw_unchecked_fixed_array_map()) { |
| 2325 rset_marked_arrays++; |
| 2326 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); |
| 2327 |
| 2328 rset_marked_array_elements += fa->length(); |
| 2329 // Manually inline FixedArray::IterateBody |
| 2330 Address elm_start = slot + FixedArray::kHeaderSize; |
| 2331 Address elm_stop = elm_start + fa->length() * kPointerSize; |
| 2332 for (Address elm_addr = elm_start; |
| 2333 elm_addr < elm_stop; elm_addr += kPointerSize) { |
| 2334 // Filter non-heap-object pointers |
| 2335 Object** elm_p = reinterpret_cast<Object**>(elm_addr); |
| 2336 if (Heap::InNewSpace(*elm_p)) |
| 2337 cross_gen_array_elements++; |
| 2338 } |
| 2339 } else { |
| 2340 rset_marked_pointers++; |
| 2341 if (Heap::InNewSpace(*obj)) |
| 2342 cross_gen_pointers++; |
| 2343 } |
| 2344 } |
| 2345 } |
| 2346 } |
| 2347 } |
| 2348 } |
| 2349 |
| 2350 pct = rset_marked_pointers == 0 ? |
| 2351 0 : cross_gen_pointers * 100 / rset_marked_pointers; |
| 2352 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
| 2353 rset_marked_pointers, cross_gen_pointers, pct); |
| 2354 PrintF(" rset_marked arrays %d, ", rset_marked_arrays); |
| 2355 PrintF(" elements %d, ", rset_marked_array_elements); |
| 2356 pct = rset_marked_array_elements == 0 ? 0 |
| 2357 : cross_gen_array_elements * 100 / rset_marked_array_elements; |
| 2358 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct); |
| 2359 PrintF(" total rset-marked bits %d\n", |
| 2360 (rset_marked_pointers + rset_marked_arrays)); |
| 2361 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0 |
| 2362 : (cross_gen_pointers + cross_gen_array_elements) * 100 / |
| 2363 (rset_marked_pointers + rset_marked_array_elements); |
| 2364 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n", |
| 2365 (rset_marked_pointers + rset_marked_array_elements), |
| 2366 (cross_gen_pointers + cross_gen_array_elements), |
| 2367 pct); |
| 2368 |
2309 ClearHistograms(); | 2369 ClearHistograms(); |
2310 HeapObjectIterator obj_it(this); | 2370 HeapObjectIterator obj_it(this); |
2311 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2371 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
2312 CollectHistogramInfo(obj); | 2372 CollectHistogramInfo(obj); |
2313 ReportHistogram(true); | 2373 ReportHistogram(true); |
2314 } | 2374 } |
| 2375 |
| 2376 |
| 2377 // Dump the range of remembered set words between [start, end) corresponding |
| 2378 // to the pointers starting at object_p. The allocation_top is an object |
| 2379 // pointer which should not be read past. This is important for large object |
| 2380 // pages, where some bits in the remembered set range do not correspond to |
| 2381 // allocated addresses. |
| 2382 static void PrintRSetRange(Address start, Address end, Object** object_p, |
| 2383 Address allocation_top) { |
| 2384 Address rset_address = start; |
| 2385 |
| 2386 // If the range starts on on odd numbered word (eg, for large object extra |
| 2387 // remembered set ranges), print some spaces. |
| 2388 if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) { |
| 2389 PrintF(" "); |
| 2390 } |
| 2391 |
| 2392 // Loop over all the words in the range. |
| 2393 while (rset_address < end) { |
| 2394 uint32_t rset_word = Memory::uint32_at(rset_address); |
| 2395 int bit_position = 0; |
| 2396 |
| 2397 // Loop over all the bits in the word. |
| 2398 while (bit_position < kBitsPerInt) { |
| 2399 if (object_p == reinterpret_cast<Object**>(allocation_top)) { |
| 2400 // Print a bar at the allocation pointer. |
| 2401 PrintF("|"); |
| 2402 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) { |
| 2403 // Do not dereference object_p past the allocation pointer. |
| 2404 PrintF("#"); |
| 2405 } else if ((rset_word & (1 << bit_position)) == 0) { |
| 2406 // Print a dot for zero bits. |
| 2407 PrintF("."); |
| 2408 } else if (Heap::InNewSpace(*object_p)) { |
| 2409 // Print an X for one bits for pointers to new space. |
| 2410 PrintF("X"); |
| 2411 } else { |
| 2412 // Print a circle for one bits for pointers to old space. |
| 2413 PrintF("o"); |
| 2414 } |
| 2415 |
| 2416 // Print a space after every 8th bit except the last. |
| 2417 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) { |
| 2418 PrintF(" "); |
| 2419 } |
| 2420 |
| 2421 // Advance to next bit. |
| 2422 bit_position++; |
| 2423 object_p++; |
| 2424 } |
| 2425 |
| 2426 // Print a newline after every odd numbered word, otherwise a space. |
| 2427 if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) { |
| 2428 PrintF("\n"); |
| 2429 } else { |
| 2430 PrintF(" "); |
| 2431 } |
| 2432 |
| 2433 // Advance to next remembered set word. |
| 2434 rset_address += kIntSize; |
| 2435 } |
| 2436 } |
| 2437 |
| 2438 |
| 2439 void PagedSpace::DoPrintRSet(const char* space_name) { |
| 2440 PageIterator it(this, PageIterator::PAGES_IN_USE); |
| 2441 while (it.has_next()) { |
| 2442 Page* p = it.next(); |
| 2443 PrintF("%s page 0x%x:\n", space_name, p); |
| 2444 PrintRSetRange(p->RSetStart(), p->RSetEnd(), |
| 2445 reinterpret_cast<Object**>(p->ObjectAreaStart()), |
| 2446 p->AllocationTop()); |
| 2447 PrintF("\n"); |
| 2448 } |
| 2449 } |
| 2450 |
| 2451 |
| 2452 void OldSpace::PrintRSet() { DoPrintRSet("old"); } |
2315 #endif | 2453 #endif |
2316 | 2454 |
2317 // ----------------------------------------------------------------------------- | 2455 // ----------------------------------------------------------------------------- |
2318 // FixedSpace implementation | 2456 // FixedSpace implementation |
2319 | 2457 |
2320 void FixedSpace::PrepareForMarkCompact(bool will_compact) { | 2458 void FixedSpace::PrepareForMarkCompact(bool will_compact) { |
2321 // Call prepare of the super class. | 2459 // Call prepare of the super class. |
2322 PagedSpace::PrepareForMarkCompact(will_compact); | 2460 PagedSpace::PrepareForMarkCompact(will_compact); |
2323 | 2461 |
2324 if (will_compact) { | 2462 if (will_compact) { |
(...skipping 29 matching lines...) Expand all Loading... |
2354 // Update allocation_top of each page in use and compute waste. | 2492 // Update allocation_top of each page in use and compute waste. |
2355 int computed_size = 0; | 2493 int computed_size = 0; |
2356 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | 2494 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); |
2357 while (it.has_next()) { | 2495 while (it.has_next()) { |
2358 Page* page = it.next(); | 2496 Page* page = it.next(); |
2359 Address page_top = page->AllocationTop(); | 2497 Address page_top = page->AllocationTop(); |
2360 computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); | 2498 computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); |
2361 if (it.has_next()) { | 2499 if (it.has_next()) { |
2362 accounting_stats_.WasteBytes( | 2500 accounting_stats_.WasteBytes( |
2363 static_cast<int>(page->ObjectAreaEnd() - page_top)); | 2501 static_cast<int>(page->ObjectAreaEnd() - page_top)); |
2364 page->SetAllocationWatermark(page_top); | |
2365 } | 2502 } |
2366 } | 2503 } |
2367 | 2504 |
2368 // Make sure the computed size - based on the used portion of the | 2505 // Make sure the computed size - based on the used portion of the |
2369 // pages in use - matches the size we adjust during allocation. | 2506 // pages in use - matches the size we adjust during allocation. |
2370 ASSERT(computed_size == Size()); | 2507 ASSERT(computed_size == Size()); |
2371 } | 2508 } |
2372 | 2509 |
2373 | 2510 |
2374 // Slow case for normal allocation. Try in order: (1) allocate in the next | 2511 // Slow case for normal allocation. Try in order: (1) allocate in the next |
2375 // page in the space, (2) allocate off the space's free list, (3) expand the | 2512 // page in the space, (2) allocate off the space's free list, (3) expand the |
2376 // space, (4) fail. | 2513 // space, (4) fail. |
2377 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { | 2514 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { |
2378 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | 2515 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
2379 // Linear allocation in this space has failed. If there is another page | 2516 // Linear allocation in this space has failed. If there is another page |
2380 // in the space, move to that page and allocate there. This allocation | 2517 // in the space, move to that page and allocate there. This allocation |
2381 // should succeed. | 2518 // should succeed. |
2382 Page* current_page = TopPageOf(allocation_info_); | 2519 Page* current_page = TopPageOf(allocation_info_); |
2383 if (current_page->next_page()->is_valid()) { | 2520 if (current_page->next_page()->is_valid()) { |
2384 return AllocateInNextPage(current_page, size_in_bytes); | 2521 return AllocateInNextPage(current_page, size_in_bytes); |
2385 } | 2522 } |
2386 | 2523 |
2387 // There is no next page in this space. Try free list allocation unless | 2524 // There is no next page in this space. Try free list allocation unless |
2388 // that is currently forbidden. The fixed space free list implicitly assumes | 2525 // that is currently forbidden. The fixed space free list implicitly assumes |
2389 // that all free blocks are of the fixed size. | 2526 // that all free blocks are of the fixed size. |
2390 if (!Heap::linear_allocation()) { | 2527 if (!Heap::linear_allocation()) { |
2391 Object* result = free_list_.Allocate(); | 2528 Object* result = free_list_.Allocate(); |
2392 if (!result->IsFailure()) { | 2529 if (!result->IsFailure()) { |
2393 accounting_stats_.AllocateBytes(size_in_bytes); | 2530 accounting_stats_.AllocateBytes(size_in_bytes); |
2394 HeapObject* obj = HeapObject::cast(result); | 2531 return HeapObject::cast(result); |
2395 Page* p = Page::FromAddress(obj->address()); | |
2396 | |
2397 if (obj->address() >= p->AllocationWatermark()) { | |
2398 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
2399 } | |
2400 | |
2401 return obj; | |
2402 } | 2532 } |
2403 } | 2533 } |
2404 | 2534 |
2405 // Free list allocation failed and there is no next page. Fail if we have | 2535 // Free list allocation failed and there is no next page. Fail if we have |
2406 // hit the old generation size limit that should cause a garbage | 2536 // hit the old generation size limit that should cause a garbage |
2407 // collection. | 2537 // collection. |
2408 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2538 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
2409 return NULL; | 2539 return NULL; |
2410 } | 2540 } |
2411 | 2541 |
2412 // Try to expand the space and allocate in the new next page. | 2542 // Try to expand the space and allocate in the new next page. |
2413 ASSERT(!current_page->next_page()->is_valid()); | 2543 ASSERT(!current_page->next_page()->is_valid()); |
2414 if (Expand(current_page)) { | 2544 if (Expand(current_page)) { |
2415 return AllocateInNextPage(current_page, size_in_bytes); | 2545 return AllocateInNextPage(current_page, size_in_bytes); |
2416 } | 2546 } |
2417 | 2547 |
2418 // Finally, fail. | 2548 // Finally, fail. |
2419 return NULL; | 2549 return NULL; |
2420 } | 2550 } |
2421 | 2551 |
2422 | 2552 |
2423 // Move to the next page (there is assumed to be one) and allocate there. | 2553 // Move to the next page (there is assumed to be one) and allocate there. |
2424 // The top of page block is always wasted, because it is too small to hold a | 2554 // The top of page block is always wasted, because it is too small to hold a |
2425 // map. | 2555 // map. |
2426 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, | 2556 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, |
2427 int size_in_bytes) { | 2557 int size_in_bytes) { |
2428 ASSERT(current_page->next_page()->is_valid()); | 2558 ASSERT(current_page->next_page()->is_valid()); |
2429 ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); | 2559 ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); |
2430 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | 2560 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
2431 current_page->next_page()->InvalidateWatermark(true); | |
2432 current_page->SetAllocationWatermark(allocation_info_.top); | |
2433 accounting_stats_.WasteBytes(page_extra_); | 2561 accounting_stats_.WasteBytes(page_extra_); |
2434 SetAllocationInfo(&allocation_info_, current_page->next_page()); | 2562 SetAllocationInfo(&allocation_info_, current_page->next_page()); |
2435 return AllocateLinearly(&allocation_info_, size_in_bytes); | 2563 return AllocateLinearly(&allocation_info_, size_in_bytes); |
2436 } | 2564 } |
2437 | 2565 |
2438 | 2566 |
2439 #ifdef DEBUG | 2567 #ifdef DEBUG |
2440 void FixedSpace::ReportStatistics() { | 2568 void FixedSpace::ReportStatistics() { |
2441 int pct = Available() * 100 / Capacity(); | 2569 int pct = Available() * 100 / Capacity(); |
2442 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2570 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
2443 Capacity(), Waste(), Available(), pct); | 2571 Capacity(), Waste(), Available(), pct); |
2444 | 2572 |
| 2573 // Report remembered set statistics. |
| 2574 int rset_marked_pointers = 0; |
| 2575 int cross_gen_pointers = 0; |
| 2576 |
| 2577 PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
| 2578 while (page_it.has_next()) { |
| 2579 Page* p = page_it.next(); |
| 2580 |
| 2581 for (Address rset_addr = p->RSetStart(); |
| 2582 rset_addr < p->RSetEnd(); |
| 2583 rset_addr += kIntSize) { |
| 2584 int rset = Memory::int_at(rset_addr); |
| 2585 if (rset != 0) { |
| 2586 // Bits were set |
| 2587 int intoff = |
| 2588 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
| 2589 int bitoff = 0; |
| 2590 for (; bitoff < kBitsPerInt; ++bitoff) { |
| 2591 if ((rset & (1 << bitoff)) != 0) { |
| 2592 int bitpos = intoff*kBitsPerByte + bitoff; |
| 2593 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
| 2594 Object** obj = reinterpret_cast<Object**>(slot); |
| 2595 rset_marked_pointers++; |
| 2596 if (Heap::InNewSpace(*obj)) |
| 2597 cross_gen_pointers++; |
| 2598 } |
| 2599 } |
| 2600 } |
| 2601 } |
| 2602 } |
| 2603 |
| 2604 pct = rset_marked_pointers == 0 ? |
| 2605 0 : cross_gen_pointers * 100 / rset_marked_pointers; |
| 2606 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", |
| 2607 rset_marked_pointers, cross_gen_pointers, pct); |
| 2608 |
2445 ClearHistograms(); | 2609 ClearHistograms(); |
2446 HeapObjectIterator obj_it(this); | 2610 HeapObjectIterator obj_it(this); |
2447 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
2448 CollectHistogramInfo(obj); | 2612 CollectHistogramInfo(obj); |
2449 ReportHistogram(false); | 2613 ReportHistogram(false); |
2450 } | 2614 } |
| 2615 |
| 2616 |
| 2617 void FixedSpace::PrintRSet() { DoPrintRSet(name_); } |
2451 #endif | 2618 #endif |
2452 | 2619 |
2453 | 2620 |
2454 // ----------------------------------------------------------------------------- | 2621 // ----------------------------------------------------------------------------- |
2455 // MapSpace implementation | 2622 // MapSpace implementation |
2456 | 2623 |
2457 void MapSpace::PrepareForMarkCompact(bool will_compact) { | 2624 void MapSpace::PrepareForMarkCompact(bool will_compact) { |
2458 // Call prepare of the super class. | 2625 // Call prepare of the super class. |
2459 FixedSpace::PrepareForMarkCompact(will_compact); | 2626 FixedSpace::PrepareForMarkCompact(will_compact); |
2460 | 2627 |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2619 if (chunk == NULL) { | 2786 if (chunk == NULL) { |
2620 return Failure::RetryAfterGC(requested_size, identity()); | 2787 return Failure::RetryAfterGC(requested_size, identity()); |
2621 } | 2788 } |
2622 | 2789 |
2623 size_ += static_cast<int>(chunk_size); | 2790 size_ += static_cast<int>(chunk_size); |
2624 page_count_++; | 2791 page_count_++; |
2625 chunk->set_next(first_chunk_); | 2792 chunk->set_next(first_chunk_); |
2626 chunk->set_size(chunk_size); | 2793 chunk->set_size(chunk_size); |
2627 first_chunk_ = chunk; | 2794 first_chunk_ = chunk; |
2628 | 2795 |
2629 // Initialize page header. | 2796 // Set the object address and size in the page header and clear its |
| 2797 // remembered set. |
2630 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); | 2798 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
2631 Address object_address = page->ObjectAreaStart(); | 2799 Address object_address = page->ObjectAreaStart(); |
2632 // Clear the low order bit of the second word in the page to flag it as a | 2800 // Clear the low order bit of the second word in the page to flag it as a |
2633 // large object page. If the chunk_size happened to be written there, its | 2801 // large object page. If the chunk_size happened to be written there, its |
2634 // low order bit should already be clear. | 2802 // low order bit should already be clear. |
2635 ASSERT((chunk_size & 0x1) == 0); | 2803 ASSERT((chunk_size & 0x1) == 0); |
2636 page->SetIsLargeObjectPage(true); | 2804 page->SetIsLargeObjectPage(true); |
2637 page->SetRegionMarks(Page::kAllRegionsCleanMarks); | 2805 page->ClearRSet(); |
| 2806 int extra_bytes = requested_size - object_size; |
| 2807 if (extra_bytes > 0) { |
| 2808 // The extra memory for the remembered set should be cleared. |
| 2809 memset(object_address + object_size, 0, extra_bytes); |
| 2810 } |
| 2811 |
2638 return HeapObject::FromAddress(object_address); | 2812 return HeapObject::FromAddress(object_address); |
2639 } | 2813 } |
2640 | 2814 |
2641 | 2815 |
2642 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { | 2816 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { |
2643 ASSERT(0 < size_in_bytes); | 2817 ASSERT(0 < size_in_bytes); |
2644 return AllocateRawInternal(size_in_bytes, | 2818 return AllocateRawInternal(size_in_bytes, |
2645 size_in_bytes, | 2819 size_in_bytes, |
2646 EXECUTABLE); | 2820 EXECUTABLE); |
2647 } | 2821 } |
2648 | 2822 |
2649 | 2823 |
2650 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { | 2824 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { |
2651 ASSERT(0 < size_in_bytes); | 2825 ASSERT(0 < size_in_bytes); |
2652 return AllocateRawInternal(size_in_bytes, | 2826 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes); |
| 2827 return AllocateRawInternal(size_in_bytes + extra_rset_bytes, |
2653 size_in_bytes, | 2828 size_in_bytes, |
2654 NOT_EXECUTABLE); | 2829 NOT_EXECUTABLE); |
2655 } | 2830 } |
2656 | 2831 |
2657 | 2832 |
2658 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) { | 2833 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) { |
2659 ASSERT(0 < size_in_bytes); | 2834 ASSERT(0 < size_in_bytes); |
2660 return AllocateRawInternal(size_in_bytes, | 2835 return AllocateRawInternal(size_in_bytes, |
2661 size_in_bytes, | 2836 size_in_bytes, |
2662 NOT_EXECUTABLE); | 2837 NOT_EXECUTABLE); |
2663 } | 2838 } |
2664 | 2839 |
2665 | 2840 |
2666 // GC support | 2841 // GC support |
2667 Object* LargeObjectSpace::FindObject(Address a) { | 2842 Object* LargeObjectSpace::FindObject(Address a) { |
2668 for (LargeObjectChunk* chunk = first_chunk_; | 2843 for (LargeObjectChunk* chunk = first_chunk_; |
2669 chunk != NULL; | 2844 chunk != NULL; |
2670 chunk = chunk->next()) { | 2845 chunk = chunk->next()) { |
2671 Address chunk_address = chunk->address(); | 2846 Address chunk_address = chunk->address(); |
2672 if (chunk_address <= a && a < chunk_address + chunk->size()) { | 2847 if (chunk_address <= a && a < chunk_address + chunk->size()) { |
2673 return chunk->GetObject(); | 2848 return chunk->GetObject(); |
2674 } | 2849 } |
2675 } | 2850 } |
2676 return Failure::Exception(); | 2851 return Failure::Exception(); |
2677 } | 2852 } |
2678 | 2853 |
2679 void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) { | 2854 |
| 2855 void LargeObjectSpace::ClearRSet() { |
| 2856 ASSERT(Page::is_rset_in_use()); |
| 2857 |
| 2858 LargeObjectIterator it(this); |
| 2859 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
| 2860 // We only have code, sequential strings, or fixed arrays in large |
| 2861 // object space, and only fixed arrays need remembered set support. |
| 2862 if (object->IsFixedArray()) { |
| 2863 // Clear the normal remembered set region of the page; |
| 2864 Page* page = Page::FromAddress(object->address()); |
| 2865 page->ClearRSet(); |
| 2866 |
| 2867 // Clear the extra remembered set. |
| 2868 int size = object->Size(); |
| 2869 int extra_rset_bytes = ExtraRSetBytesFor(size); |
| 2870 memset(object->address() + size, 0, extra_rset_bytes); |
| 2871 } |
| 2872 } |
| 2873 } |
| 2874 |
| 2875 |
| 2876 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) { |
| 2877 ASSERT(Page::is_rset_in_use()); |
| 2878 |
| 2879 static void* lo_rset_histogram = StatsTable::CreateHistogram( |
| 2880 "V8.RSetLO", |
| 2881 0, |
| 2882 // Keeping this histogram's buckets the same as the paged space histogram. |
| 2883 Page::kObjectAreaSize / kPointerSize, |
| 2884 30); |
| 2885 |
2680 LargeObjectIterator it(this); | 2886 LargeObjectIterator it(this); |
2681 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | 2887 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
2682 // We only have code, sequential strings, or fixed arrays in large | 2888 // We only have code, sequential strings, or fixed arrays in large |
2683 // object space, and only fixed arrays can possibly contain pointers to | 2889 // object space, and only fixed arrays can possibly contain pointers to |
2684 // the young generation. | 2890 // the young generation. |
2685 if (object->IsFixedArray()) { | 2891 if (object->IsFixedArray()) { |
| 2892 // Iterate the normal page remembered set range. |
2686 Page* page = Page::FromAddress(object->address()); | 2893 Page* page = Page::FromAddress(object->address()); |
2687 uint32_t marks = page->GetRegionMarks(); | 2894 Address object_end = object->address() + object->Size(); |
2688 uint32_t newmarks = Page::kAllRegionsCleanMarks; | 2895 int count = Heap::IterateRSetRange(page->ObjectAreaStart(), |
| 2896 Min(page->ObjectAreaEnd(), object_end), |
| 2897 page->RSetStart(), |
| 2898 copy_object_func); |
2689 | 2899 |
2690 if (marks != Page::kAllRegionsCleanMarks) { | 2900 // Iterate the extra array elements. |
2691 // For a large page a single dirty mark corresponds to several | 2901 if (object_end > page->ObjectAreaEnd()) { |
2692 // regions (modulo 32). So we treat a large page as a sequence of | 2902 count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end, |
2693 // normal pages of size Page::kPageSize having same dirty marks | 2903 object_end, copy_object_func); |
2694 // and subsequently iterate dirty regions on each of these pages. | 2904 } |
2695 Address start = object->address(); | 2905 if (lo_rset_histogram != NULL) { |
2696 Address end = page->ObjectAreaEnd(); | 2906 StatsTable::AddHistogramSample(lo_rset_histogram, count); |
2697 Address object_end = start + object->Size(); | |
2698 | |
2699 // Iterate regions of the first normal page covering object. | |
2700 uint32_t first_region_number = page->GetRegionNumberForAddress(start); | |
2701 newmarks |= | |
2702 Heap::IterateDirtyRegions(marks >> first_region_number, | |
2703 start, | |
2704 end, | |
2705 &Heap::IteratePointersInDirtyRegion, | |
2706 copy_object) << first_region_number; | |
2707 | |
2708 start = end; | |
2709 end = start + Page::kPageSize; | |
2710 while (end <= object_end) { | |
2711 // Iterate next 32 regions. | |
2712 newmarks |= | |
2713 Heap::IterateDirtyRegions(marks, | |
2714 start, | |
2715 end, | |
2716 &Heap::IteratePointersInDirtyRegion, | |
2717 copy_object); | |
2718 start = end; | |
2719 end = start + Page::kPageSize; | |
2720 } | |
2721 | |
2722 if (start != object_end) { | |
2723 // Iterate the last piece of an object which is less than | |
2724 // Page::kPageSize. | |
2725 newmarks |= | |
2726 Heap::IterateDirtyRegions(marks, | |
2727 start, | |
2728 object_end, | |
2729 &Heap::IteratePointersInDirtyRegion, | |
2730 copy_object); | |
2731 } | |
2732 | |
2733 page->SetRegionMarks(newmarks); | |
2734 } | 2907 } |
2735 } | 2908 } |
2736 } | 2909 } |
2737 } | 2910 } |
2738 | 2911 |
2739 | 2912 |
2740 void LargeObjectSpace::FreeUnmarkedObjects() { | 2913 void LargeObjectSpace::FreeUnmarkedObjects() { |
2741 LargeObjectChunk* previous = NULL; | 2914 LargeObjectChunk* previous = NULL; |
2742 LargeObjectChunk* current = first_chunk_; | 2915 LargeObjectChunk* current = first_chunk_; |
2743 while (current != NULL) { | 2916 while (current != NULL) { |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2815 | 2988 |
2816 // Byte arrays and strings don't have interior pointers. | 2989 // Byte arrays and strings don't have interior pointers. |
2817 if (object->IsCode()) { | 2990 if (object->IsCode()) { |
2818 VerifyPointersVisitor code_visitor; | 2991 VerifyPointersVisitor code_visitor; |
2819 object->IterateBody(map->instance_type(), | 2992 object->IterateBody(map->instance_type(), |
2820 object->Size(), | 2993 object->Size(), |
2821 &code_visitor); | 2994 &code_visitor); |
2822 } else if (object->IsFixedArray()) { | 2995 } else if (object->IsFixedArray()) { |
2823 // We loop over fixed arrays ourselves, rather then using the visitor, | 2996 // We loop over fixed arrays ourselves, rather then using the visitor, |
2824 // because the visitor doesn't support the start/offset iteration | 2997 // because the visitor doesn't support the start/offset iteration |
2825 // needed for IsRegionDirty. | 2998 // needed for IsRSetSet. |
2826 FixedArray* array = FixedArray::cast(object); | 2999 FixedArray* array = FixedArray::cast(object); |
2827 for (int j = 0; j < array->length(); j++) { | 3000 for (int j = 0; j < array->length(); j++) { |
2828 Object* element = array->get(j); | 3001 Object* element = array->get(j); |
2829 if (element->IsHeapObject()) { | 3002 if (element->IsHeapObject()) { |
2830 HeapObject* element_object = HeapObject::cast(element); | 3003 HeapObject* element_object = HeapObject::cast(element); |
2831 ASSERT(Heap::Contains(element_object)); | 3004 ASSERT(Heap::Contains(element_object)); |
2832 ASSERT(element_object->map()->IsMap()); | 3005 ASSERT(element_object->map()->IsMap()); |
2833 if (Heap::InNewSpace(element_object)) { | 3006 if (Heap::InNewSpace(element_object)) { |
2834 Address array_addr = object->address(); | 3007 ASSERT(Page::IsRSetSet(object->address(), |
2835 Address element_addr = array_addr + FixedArray::kHeaderSize + | 3008 FixedArray::kHeaderSize + j * kPointerSize)); |
2836 j * kPointerSize; | |
2837 | |
2838 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); | |
2839 } | 3009 } |
2840 } | 3010 } |
2841 } | 3011 } |
2842 } | 3012 } |
2843 } | 3013 } |
2844 } | 3014 } |
2845 | 3015 |
2846 | 3016 |
2847 void LargeObjectSpace::Print() { | 3017 void LargeObjectSpace::Print() { |
2848 LargeObjectIterator it(this); | 3018 LargeObjectIterator it(this); |
(...skipping 20 matching lines...) Expand all Loading... |
2869 | 3039 |
2870 void LargeObjectSpace::CollectCodeStatistics() { | 3040 void LargeObjectSpace::CollectCodeStatistics() { |
2871 LargeObjectIterator obj_it(this); | 3041 LargeObjectIterator obj_it(this); |
2872 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
2873 if (obj->IsCode()) { | 3043 if (obj->IsCode()) { |
2874 Code* code = Code::cast(obj); | 3044 Code* code = Code::cast(obj); |
2875 code_kind_statistics[code->kind()] += code->Size(); | 3045 code_kind_statistics[code->kind()] += code->Size(); |
2876 } | 3046 } |
2877 } | 3047 } |
2878 } | 3048 } |
| 3049 |
| 3050 |
| 3051 void LargeObjectSpace::PrintRSet() { |
| 3052 LargeObjectIterator it(this); |
| 3053 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
| 3054 if (object->IsFixedArray()) { |
| 3055 Page* page = Page::FromAddress(object->address()); |
| 3056 |
| 3057 Address allocation_top = object->address() + object->Size(); |
| 3058 PrintF("large page 0x%x:\n", page); |
| 3059 PrintRSetRange(page->RSetStart(), page->RSetEnd(), |
| 3060 reinterpret_cast<Object**>(object->address()), |
| 3061 allocation_top); |
| 3062 int extra_array_bytes = object->Size() - Page::kObjectAreaSize; |
| 3063 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize, |
| 3064 kBitsPerInt); |
| 3065 PrintF("------------------------------------------------------------" |
| 3066 "-----------\n"); |
| 3067 PrintRSetRange(allocation_top, |
| 3068 allocation_top + extra_rset_bits / kBitsPerByte, |
| 3069 reinterpret_cast<Object**>(object->address() |
| 3070 + Page::kObjectAreaSize), |
| 3071 allocation_top); |
| 3072 PrintF("\n"); |
| 3073 } |
| 3074 } |
| 3075 } |
2879 #endif // DEBUG | 3076 #endif // DEBUG |
2880 | 3077 |
2881 } } // namespace v8::internal | 3078 } } // namespace v8::internal |
OLD | NEW |