OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 855 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 | 866 |
867 | 867 |
868 void PagedSpace::MarkAllPagesClean() { | 868 void PagedSpace::MarkAllPagesClean() { |
869 PageIterator it(this, PageIterator::ALL_PAGES); | 869 PageIterator it(this, PageIterator::ALL_PAGES); |
870 while (it.has_next()) { | 870 while (it.has_next()) { |
871 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | 871 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); |
872 } | 872 } |
873 } | 873 } |
874 | 874 |
875 | 875 |
876 Object* PagedSpace::FindObject(Address addr) { | 876 MaybeObject* PagedSpace::FindObject(Address addr) { |
877 // Note: this function can only be called before or after mark-compact GC | 877 // Note: this function can only be called before or after mark-compact GC |
878 // because it accesses map pointers. | 878 // because it accesses map pointers. |
879 ASSERT(!MarkCompactCollector::in_use()); | 879 ASSERT(!MarkCompactCollector::in_use()); |
880 | 880 |
881 if (!Contains(addr)) return Failure::Exception(); | 881 if (!Contains(addr)) return Failure::Exception(); |
882 | 882 |
883 Page* p = Page::FromAddress(addr); | 883 Page* p = Page::FromAddress(addr); |
884 ASSERT(IsUsed(p)); | 884 ASSERT(IsUsed(p)); |
885 Address cur = p->ObjectAreaStart(); | 885 Address cur = p->ObjectAreaStart(); |
886 Address end = p->AllocationTop(); | 886 Address end = p->AllocationTop(); |
(...skipping 910 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1797 // Insert other blocks at the head of an exact free list. | 1797 // Insert other blocks at the head of an exact free list. |
1798 int index = size_in_bytes >> kPointerSizeLog2; | 1798 int index = size_in_bytes >> kPointerSizeLog2; |
1799 node->set_next(free_[index].head_node_); | 1799 node->set_next(free_[index].head_node_); |
1800 free_[index].head_node_ = node->address(); | 1800 free_[index].head_node_ = node->address(); |
1801 available_ += size_in_bytes; | 1801 available_ += size_in_bytes; |
1802 needs_rebuild_ = true; | 1802 needs_rebuild_ = true; |
1803 return 0; | 1803 return 0; |
1804 } | 1804 } |
1805 | 1805 |
1806 | 1806 |
1807 Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { | 1807 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { |
1808 ASSERT(0 < size_in_bytes); | 1808 ASSERT(0 < size_in_bytes); |
1809 ASSERT(size_in_bytes <= kMaxBlockSize); | 1809 ASSERT(size_in_bytes <= kMaxBlockSize); |
1810 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1810 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1811 | 1811 |
1812 if (needs_rebuild_) RebuildSizeList(); | 1812 if (needs_rebuild_) RebuildSizeList(); |
1813 int index = size_in_bytes >> kPointerSizeLog2; | 1813 int index = size_in_bytes >> kPointerSizeLog2; |
1814 // Check for a perfect fit. | 1814 // Check for a perfect fit. |
1815 if (free_[index].head_node_ != NULL) { | 1815 if (free_[index].head_node_ != NULL) { |
1816 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); | 1816 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); |
1817 // If this was the last block of its size, remove the size. | 1817 // If this was the last block of its size, remove the size. |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1917 if (head_ == NULL) { | 1917 if (head_ == NULL) { |
1918 tail_ = head_ = node->address(); | 1918 tail_ = head_ = node->address(); |
1919 } else { | 1919 } else { |
1920 FreeListNode::FromAddress(tail_)->set_next(node->address()); | 1920 FreeListNode::FromAddress(tail_)->set_next(node->address()); |
1921 tail_ = node->address(); | 1921 tail_ = node->address(); |
1922 } | 1922 } |
1923 available_ += object_size_; | 1923 available_ += object_size_; |
1924 } | 1924 } |
1925 | 1925 |
1926 | 1926 |
1927 Object* FixedSizeFreeList::Allocate() { | 1927 MaybeObject* FixedSizeFreeList::Allocate() { |
1928 if (head_ == NULL) { | 1928 if (head_ == NULL) { |
1929 return Failure::RetryAfterGC(owner_); | 1929 return Failure::RetryAfterGC(owner_); |
1930 } | 1930 } |
1931 | 1931 |
1932 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1932 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
1933 FreeListNode* node = FreeListNode::FromAddress(head_); | 1933 FreeListNode* node = FreeListNode::FromAddress(head_); |
1934 head_ = node->next(); | 1934 head_ = node->next(); |
1935 available_ -= object_size_; | 1935 available_ -= object_size_; |
1936 return node; | 1936 return node; |
1937 } | 1937 } |
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2180 // object area size). | 2180 // object area size). |
2181 Page* current_page = TopPageOf(allocation_info_); | 2181 Page* current_page = TopPageOf(allocation_info_); |
2182 if (current_page->next_page()->is_valid()) { | 2182 if (current_page->next_page()->is_valid()) { |
2183 return AllocateInNextPage(current_page, size_in_bytes); | 2183 return AllocateInNextPage(current_page, size_in_bytes); |
2184 } | 2184 } |
2185 | 2185 |
2186 // There is no next page in this space. Try free list allocation unless that | 2186 // There is no next page in this space. Try free list allocation unless that |
2187 // is currently forbidden. | 2187 // is currently forbidden. |
2188 if (!Heap::linear_allocation()) { | 2188 if (!Heap::linear_allocation()) { |
2189 int wasted_bytes; | 2189 int wasted_bytes; |
2190 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); | 2190 Object* result; |
| 2191 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); |
2191 accounting_stats_.WasteBytes(wasted_bytes); | 2192 accounting_stats_.WasteBytes(wasted_bytes); |
2192 if (!result->IsFailure()) { | 2193 if (maybe->ToObject(&result)) { |
2193 accounting_stats_.AllocateBytes(size_in_bytes); | 2194 accounting_stats_.AllocateBytes(size_in_bytes); |
2194 | 2195 |
2195 HeapObject* obj = HeapObject::cast(result); | 2196 HeapObject* obj = HeapObject::cast(result); |
2196 Page* p = Page::FromAddress(obj->address()); | 2197 Page* p = Page::FromAddress(obj->address()); |
2197 | 2198 |
2198 if (obj->address() >= p->AllocationWatermark()) { | 2199 if (obj->address() >= p->AllocationWatermark()) { |
2199 // There should be no hole between the allocation watermark | 2200 // There should be no hole between the allocation watermark |
2200 // and allocated object address. | 2201 // and allocated object address. |
2201 // Memory above the allocation watermark was not swept and | 2202 // Memory above the allocation watermark was not swept and |
2202 // might contain garbage pointers to new space. | 2203 // might contain garbage pointers to new space. |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2488 // should succeed. | 2489 // should succeed. |
2489 Page* current_page = TopPageOf(allocation_info_); | 2490 Page* current_page = TopPageOf(allocation_info_); |
2490 if (current_page->next_page()->is_valid()) { | 2491 if (current_page->next_page()->is_valid()) { |
2491 return AllocateInNextPage(current_page, size_in_bytes); | 2492 return AllocateInNextPage(current_page, size_in_bytes); |
2492 } | 2493 } |
2493 | 2494 |
2494 // There is no next page in this space. Try free list allocation unless | 2495 // There is no next page in this space. Try free list allocation unless |
2495 // that is currently forbidden. The fixed space free list implicitly assumes | 2496 // that is currently forbidden. The fixed space free list implicitly assumes |
2496 // that all free blocks are of the fixed size. | 2497 // that all free blocks are of the fixed size. |
2497 if (!Heap::linear_allocation()) { | 2498 if (!Heap::linear_allocation()) { |
2498 Object* result = free_list_.Allocate(); | 2499 Object* result; |
2499 if (!result->IsFailure()) { | 2500 MaybeObject* maybe = free_list_.Allocate(); |
| 2501 if (maybe->ToObject(&result)) { |
2500 accounting_stats_.AllocateBytes(size_in_bytes); | 2502 accounting_stats_.AllocateBytes(size_in_bytes); |
2501 HeapObject* obj = HeapObject::cast(result); | 2503 HeapObject* obj = HeapObject::cast(result); |
2502 Page* p = Page::FromAddress(obj->address()); | 2504 Page* p = Page::FromAddress(obj->address()); |
2503 | 2505 |
2504 if (obj->address() >= p->AllocationWatermark()) { | 2506 if (obj->address() >= p->AllocationWatermark()) { |
2505 // There should be no hole between the allocation watermark | 2507 // There should be no hole between the allocation watermark |
2506 // and allocated object address. | 2508 // and allocated object address. |
2507 // Memory above the allocation watermark was not swept and | 2509 // Memory above the allocation watermark was not swept and |
2508 // might contain garbage pointers to new space. | 2510 // might contain garbage pointers to new space. |
2509 ASSERT(obj->address() == p->AllocationWatermark()); | 2511 ASSERT(obj->address() == p->AllocationWatermark()); |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2738 bool is_code = chunk->GetObject()->IsCode(); | 2740 bool is_code = chunk->GetObject()->IsCode(); |
2739 MemoryAllocator::Unprotect(chunk->address(), chunk->size(), | 2741 MemoryAllocator::Unprotect(chunk->address(), chunk->size(), |
2740 is_code ? EXECUTABLE : NOT_EXECUTABLE); | 2742 is_code ? EXECUTABLE : NOT_EXECUTABLE); |
2741 chunk = chunk->next(); | 2743 chunk = chunk->next(); |
2742 } | 2744 } |
2743 } | 2745 } |
2744 | 2746 |
2745 #endif | 2747 #endif |
2746 | 2748 |
2747 | 2749 |
2748 Object* LargeObjectSpace::AllocateRawInternal(int requested_size, | 2750 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, |
2749 int object_size, | 2751 int object_size, |
2750 Executability executable) { | 2752 Executability executable) { |
2751 ASSERT(0 < object_size && object_size <= requested_size); | 2753 ASSERT(0 < object_size && object_size <= requested_size); |
2752 | 2754 |
2753 // Check if we want to force a GC before growing the old space further. | 2755 // Check if we want to force a GC before growing the old space further. |
2754 // If so, fail the allocation. | 2756 // If so, fail the allocation. |
2755 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2757 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
2756 return Failure::RetryAfterGC(identity()); | 2758 return Failure::RetryAfterGC(identity()); |
2757 } | 2759 } |
2758 | 2760 |
2759 size_t chunk_size; | 2761 size_t chunk_size; |
2760 LargeObjectChunk* chunk = | 2762 LargeObjectChunk* chunk = |
(...skipping 15 matching lines...) Expand all Loading... |
2776 // large object page. If the chunk_size happened to be written there, its | 2778 // large object page. If the chunk_size happened to be written there, its |
2777 // low order bit should already be clear. | 2779 // low order bit should already be clear. |
2778 ASSERT((chunk_size & 0x1) == 0); | 2780 ASSERT((chunk_size & 0x1) == 0); |
2779 page->SetIsLargeObjectPage(true); | 2781 page->SetIsLargeObjectPage(true); |
2780 page->SetIsPageExecutable(executable); | 2782 page->SetIsPageExecutable(executable); |
2781 page->SetRegionMarks(Page::kAllRegionsCleanMarks); | 2783 page->SetRegionMarks(Page::kAllRegionsCleanMarks); |
2782 return HeapObject::FromAddress(object_address); | 2784 return HeapObject::FromAddress(object_address); |
2783 } | 2785 } |
2784 | 2786 |
2785 | 2787 |
2786 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { | 2788 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { |
2787 ASSERT(0 < size_in_bytes); | 2789 ASSERT(0 < size_in_bytes); |
2788 return AllocateRawInternal(size_in_bytes, | 2790 return AllocateRawInternal(size_in_bytes, |
2789 size_in_bytes, | 2791 size_in_bytes, |
2790 EXECUTABLE); | 2792 EXECUTABLE); |
2791 } | 2793 } |
2792 | 2794 |
2793 | 2795 |
2794 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { | 2796 MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { |
2795 ASSERT(0 < size_in_bytes); | 2797 ASSERT(0 < size_in_bytes); |
2796 return AllocateRawInternal(size_in_bytes, | 2798 return AllocateRawInternal(size_in_bytes, |
2797 size_in_bytes, | 2799 size_in_bytes, |
2798 NOT_EXECUTABLE); | 2800 NOT_EXECUTABLE); |
2799 } | 2801 } |
2800 | 2802 |
2801 | 2803 |
2802 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) { | 2804 MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) { |
2803 ASSERT(0 < size_in_bytes); | 2805 ASSERT(0 < size_in_bytes); |
2804 return AllocateRawInternal(size_in_bytes, | 2806 return AllocateRawInternal(size_in_bytes, |
2805 size_in_bytes, | 2807 size_in_bytes, |
2806 NOT_EXECUTABLE); | 2808 NOT_EXECUTABLE); |
2807 } | 2809 } |
2808 | 2810 |
2809 | 2811 |
2810 // GC support | 2812 // GC support |
2811 Object* LargeObjectSpace::FindObject(Address a) { | 2813 MaybeObject* LargeObjectSpace::FindObject(Address a) { |
2812 for (LargeObjectChunk* chunk = first_chunk_; | 2814 for (LargeObjectChunk* chunk = first_chunk_; |
2813 chunk != NULL; | 2815 chunk != NULL; |
2814 chunk = chunk->next()) { | 2816 chunk = chunk->next()) { |
2815 Address chunk_address = chunk->address(); | 2817 Address chunk_address = chunk->address(); |
2816 if (chunk_address <= a && a < chunk_address + chunk->size()) { | 2818 if (chunk_address <= a && a < chunk_address + chunk->size()) { |
2817 return chunk->GetObject(); | 2819 return chunk->GetObject(); |
2818 } | 2820 } |
2819 } | 2821 } |
2820 return Failure::Exception(); | 2822 return Failure::Exception(); |
2821 } | 2823 } |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3040 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
3041 if (obj->IsCode()) { | 3043 if (obj->IsCode()) { |
3042 Code* code = Code::cast(obj); | 3044 Code* code = Code::cast(obj); |
3043 code_kind_statistics[code->kind()] += code->Size(); | 3045 code_kind_statistics[code->kind()] += code->Size(); |
3044 } | 3046 } |
3045 } | 3047 } |
3046 } | 3048 } |
3047 #endif // DEBUG | 3049 #endif // DEBUG |
3048 | 3050 |
3049 } } // namespace v8::internal | 3051 } } // namespace v8::internal |
OLD | NEW |