| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 125 | 125 |
| 126 CodeRange::CodeRange(Isolate* isolate) | 126 CodeRange::CodeRange(Isolate* isolate) |
| 127 : isolate_(isolate), | 127 : isolate_(isolate), |
| 128 code_range_(NULL), | 128 code_range_(NULL), |
| 129 free_list_(0), | 129 free_list_(0), |
| 130 allocation_list_(0), | 130 allocation_list_(0), |
| 131 current_allocation_block_index_(0) { | 131 current_allocation_block_index_(0) { |
| 132 } | 132 } |
| 133 | 133 |
| 134 | 134 |
| 135 bool CodeRange::Setup(const size_t requested) { | 135 bool CodeRange::SetUp(const size_t requested) { |
| 136 ASSERT(code_range_ == NULL); | 136 ASSERT(code_range_ == NULL); |
| 137 | 137 |
| 138 code_range_ = new VirtualMemory(requested); | 138 code_range_ = new VirtualMemory(requested); |
| 139 CHECK(code_range_ != NULL); | 139 CHECK(code_range_ != NULL); |
| 140 if (!code_range_->IsReserved()) { | 140 if (!code_range_->IsReserved()) { |
| 141 delete code_range_; | 141 delete code_range_; |
| 142 code_range_ = NULL; | 142 code_range_ = NULL; |
| 143 return false; | 143 return false; |
| 144 } | 144 } |
| 145 | 145 |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 261 | 261 |
| 262 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 262 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 263 : isolate_(isolate), | 263 : isolate_(isolate), |
| 264 capacity_(0), | 264 capacity_(0), |
| 265 capacity_executable_(0), | 265 capacity_executable_(0), |
| 266 size_(0), | 266 size_(0), |
| 267 size_executable_(0) { | 267 size_executable_(0) { |
| 268 } | 268 } |
| 269 | 269 |
| 270 | 270 |
| 271 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { | 271 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 272 capacity_ = RoundUp(capacity, Page::kPageSize); | 272 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 274 ASSERT_GE(capacity_, capacity_executable_); | 274 ASSERT_GE(capacity_, capacity_executable_); |
| 275 | 275 |
| 276 size_ = 0; | 276 size_ = 0; |
| 277 size_executable_ = 0; | 277 size_executable_ = 0; |
| 278 | 278 |
| 279 return true; | 279 return true; |
| 280 } | 280 } |
| 281 | 281 |
| (...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 651 // ----------------------------------------------------------------------------- | 651 // ----------------------------------------------------------------------------- |
| 652 // PagedSpace implementation | 652 // PagedSpace implementation |
| 653 | 653 |
| 654 PagedSpace::PagedSpace(Heap* heap, | 654 PagedSpace::PagedSpace(Heap* heap, |
| 655 intptr_t max_capacity, | 655 intptr_t max_capacity, |
| 656 AllocationSpace id, | 656 AllocationSpace id, |
| 657 Executability executable) | 657 Executability executable) |
| 658 : Space(heap, id, executable), | 658 : Space(heap, id, executable), |
| 659 free_list_(this), | 659 free_list_(this), |
| 660 was_swept_conservatively_(false), | 660 was_swept_conservatively_(false), |
| 661 first_unswept_page_(Page::FromAddress(NULL)) { | 661 first_unswept_page_(Page::FromAddress(NULL)), |
| 662 unswept_free_bytes_(0) { |
| 662 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 663 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 663 * Page::kObjectAreaSize; | 664 * Page::kObjectAreaSize; |
| 664 accounting_stats_.Clear(); | 665 accounting_stats_.Clear(); |
| 665 | 666 |
| 666 allocation_info_.top = NULL; | 667 allocation_info_.top = NULL; |
| 667 allocation_info_.limit = NULL; | 668 allocation_info_.limit = NULL; |
| 668 | 669 |
| 669 anchor_.InitializeAsAnchor(this); | 670 anchor_.InitializeAsAnchor(this); |
| 670 } | 671 } |
| 671 | 672 |
| 672 | 673 |
| 673 bool PagedSpace::Setup() { | 674 bool PagedSpace::SetUp() { |
| 674 return true; | 675 return true; |
| 675 } | 676 } |
| 676 | 677 |
| 677 | 678 |
| 678 bool PagedSpace::HasBeenSetup() { | 679 bool PagedSpace::HasBeenSetUp() { |
| 679 return true; | 680 return true; |
| 680 } | 681 } |
| 681 | 682 |
| 682 | 683 |
| 683 void PagedSpace::TearDown() { | 684 void PagedSpace::TearDown() { |
| 684 PageIterator iterator(this); | 685 PageIterator iterator(this); |
| 685 while (iterator.has_next()) { | 686 while (iterator.has_next()) { |
| 686 heap()->isolate()->memory_allocator()->Free(iterator.next()); | 687 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 687 } | 688 } |
| 688 anchor_.set_next_page(&anchor_); | 689 anchor_.set_next_page(&anchor_); |
| (...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 866 } | 867 } |
| 867 ASSERT(allocation_pointer_found_in_space); | 868 ASSERT(allocation_pointer_found_in_space); |
| 868 } | 869 } |
| 869 #endif | 870 #endif |
| 870 | 871 |
| 871 | 872 |
| 872 // ----------------------------------------------------------------------------- | 873 // ----------------------------------------------------------------------------- |
| 873 // NewSpace implementation | 874 // NewSpace implementation |
| 874 | 875 |
| 875 | 876 |
| 876 bool NewSpace::Setup(int reserved_semispace_capacity, | 877 bool NewSpace::SetUp(int reserved_semispace_capacity, |
| 877 int maximum_semispace_capacity) { | 878 int maximum_semispace_capacity) { |
| 878 // Setup new space based on the preallocated memory block defined by | 879 // Set up new space based on the preallocated memory block defined by |
| 879 // start and size. The provided space is divided into two semi-spaces. | 880 // start and size. The provided space is divided into two semi-spaces. |
| 880 // To support fast containment testing in the new space, the size of | 881 // To support fast containment testing in the new space, the size of |
| 881 // this chunk must be a power of two and it must be aligned to its size. | 882 // this chunk must be a power of two and it must be aligned to its size. |
| 882 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | 883 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 883 | 884 |
| 884 size_t size = 2 * reserved_semispace_capacity; | 885 size_t size = 2 * reserved_semispace_capacity; |
| 885 Address base = | 886 Address base = |
| 886 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | 887 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 887 size, size, &reservation_); | 888 size, size, &reservation_); |
| 888 if (base == NULL) return false; | 889 if (base == NULL) return false; |
| 889 | 890 |
| 890 chunk_base_ = base; | 891 chunk_base_ = base; |
| 891 chunk_size_ = static_cast<uintptr_t>(size); | 892 chunk_size_ = static_cast<uintptr_t>(size); |
| 892 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | 893 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 893 | 894 |
| 894 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 895 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); |
| 895 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 896 ASSERT(IsPowerOf2(maximum_semispace_capacity)); |
| 896 | 897 |
| 897 // Allocate and setup the histogram arrays if necessary. | 898 // Allocate and set up the histogram arrays if necessary. |
| 898 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 899 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 899 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 900 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 900 | 901 |
| 901 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 902 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ |
| 902 promoted_histogram_[name].set_name(#name); | 903 promoted_histogram_[name].set_name(#name); |
| 903 INSTANCE_TYPE_LIST(SET_NAME) | 904 INSTANCE_TYPE_LIST(SET_NAME) |
| 904 #undef SET_NAME | 905 #undef SET_NAME |
| 905 | 906 |
| 906 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); | 907 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 907 ASSERT(static_cast<intptr_t>(chunk_size_) >= | 908 ASSERT(static_cast<intptr_t>(chunk_size_) >= |
| 908 2 * heap()->ReservedSemiSpaceSize()); | 909 2 * heap()->ReservedSemiSpaceSize()); |
| 909 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); | 910 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
| 910 | 911 |
| 911 if (!to_space_.Setup(chunk_base_, | 912 if (!to_space_.SetUp(chunk_base_, |
| 912 initial_semispace_capacity, | 913 initial_semispace_capacity, |
| 913 maximum_semispace_capacity)) { | 914 maximum_semispace_capacity)) { |
| 914 return false; | 915 return false; |
| 915 } | 916 } |
| 916 if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity, | 917 if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, |
| 917 initial_semispace_capacity, | 918 initial_semispace_capacity, |
| 918 maximum_semispace_capacity)) { | 919 maximum_semispace_capacity)) { |
| 919 return false; | 920 return false; |
| 920 } | 921 } |
| 921 | 922 |
| 922 start_ = chunk_base_; | 923 start_ = chunk_base_; |
| 923 address_mask_ = ~(2 * reserved_semispace_capacity - 1); | 924 address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
| 924 object_mask_ = address_mask_ | kHeapObjectTagMask; | 925 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 925 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; | 926 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
| 926 | 927 |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1141 ASSERT_EQ(from_space_.id(), kFromSpace); | 1142 ASSERT_EQ(from_space_.id(), kFromSpace); |
| 1142 ASSERT_EQ(to_space_.id(), kToSpace); | 1143 ASSERT_EQ(to_space_.id(), kToSpace); |
| 1143 from_space_.Verify(); | 1144 from_space_.Verify(); |
| 1144 to_space_.Verify(); | 1145 to_space_.Verify(); |
| 1145 } | 1146 } |
| 1146 #endif | 1147 #endif |
| 1147 | 1148 |
| 1148 // ----------------------------------------------------------------------------- | 1149 // ----------------------------------------------------------------------------- |
| 1149 // SemiSpace implementation | 1150 // SemiSpace implementation |
| 1150 | 1151 |
| 1151 bool SemiSpace::Setup(Address start, | 1152 bool SemiSpace::SetUp(Address start, |
| 1152 int initial_capacity, | 1153 int initial_capacity, |
| 1153 int maximum_capacity) { | 1154 int maximum_capacity) { |
| 1154 // Creates a space in the young generation. The constructor does not | 1155 // Creates a space in the young generation. The constructor does not |
| 1155 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1156 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1156 // memory of size 'capacity' when set up, and does not grow or shrink | 1157 // memory of size 'capacity' when set up, and does not grow or shrink |
| 1157 // otherwise. In the mark-compact collector, the memory region of the from | 1158 // otherwise. In the mark-compact collector, the memory region of the from |
| 1158 // space is used as the marking stack. It requires contiguous memory | 1159 // space is used as the marking stack. It requires contiguous memory |
| 1159 // addresses. | 1160 // addresses. |
| 1160 ASSERT(maximum_capacity >= Page::kPageSize); | 1161 ASSERT(maximum_capacity >= Page::kPageSize); |
| 1161 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1162 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| (...skipping 893 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2055 Bitmap::Clear(p); | 2056 Bitmap::Clear(p); |
| 2056 if (FLAG_gc_verbose) { | 2057 if (FLAG_gc_verbose) { |
| 2057 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", | 2058 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", |
| 2058 reinterpret_cast<intptr_t>(p)); | 2059 reinterpret_cast<intptr_t>(p)); |
| 2059 } | 2060 } |
| 2060 } | 2061 } |
| 2061 p = p->next_page(); | 2062 p = p->next_page(); |
| 2062 } while (p != anchor()); | 2063 } while (p != anchor()); |
| 2063 } | 2064 } |
| 2064 first_unswept_page_ = Page::FromAddress(NULL); | 2065 first_unswept_page_ = Page::FromAddress(NULL); |
| 2066 unswept_free_bytes_ = 0; |
| 2065 | 2067 |
| 2066 // Clear the free list before a full GC---it will be rebuilt afterward. | 2068 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2067 free_list_.Reset(); | 2069 free_list_.Reset(); |
| 2068 } | 2070 } |
| 2069 | 2071 |
| 2070 | 2072 |
| 2071 bool PagedSpace::ReserveSpace(int size_in_bytes) { | 2073 bool PagedSpace::ReserveSpace(int size_in_bytes) { |
| 2072 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); | 2074 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); |
| 2073 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); | 2075 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); |
| 2074 Address current_top = allocation_info_.top; | 2076 Address current_top = allocation_info_.top; |
| (...skipping 28 matching lines...) Expand all Loading... |
| 2103 | 2105 |
| 2104 intptr_t freed_bytes = 0; | 2106 intptr_t freed_bytes = 0; |
| 2105 Page* p = first_unswept_page_; | 2107 Page* p = first_unswept_page_; |
| 2106 do { | 2108 do { |
| 2107 Page* next_page = p->next_page(); | 2109 Page* next_page = p->next_page(); |
| 2108 if (ShouldBeSweptLazily(p)) { | 2110 if (ShouldBeSweptLazily(p)) { |
| 2109 if (FLAG_gc_verbose) { | 2111 if (FLAG_gc_verbose) { |
| 2110 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 2112 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| 2111 reinterpret_cast<intptr_t>(p)); | 2113 reinterpret_cast<intptr_t>(p)); |
| 2112 } | 2114 } |
| 2115 unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes()); |
| 2113 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); | 2116 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| 2114 } | 2117 } |
| 2115 p = next_page; | 2118 p = next_page; |
| 2116 } while (p != anchor() && freed_bytes < bytes_to_sweep); | 2119 } while (p != anchor() && freed_bytes < bytes_to_sweep); |
| 2117 | 2120 |
| 2118 if (p == anchor()) { | 2121 if (p == anchor()) { |
| 2119 first_unswept_page_ = Page::FromAddress(NULL); | 2122 first_unswept_page_ = Page::FromAddress(NULL); |
| 2120 } else { | 2123 } else { |
| 2121 first_unswept_page_ = p; | 2124 first_unswept_page_ = p; |
| 2122 } | 2125 } |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2401 intptr_t max_capacity, | 2404 intptr_t max_capacity, |
| 2402 AllocationSpace id) | 2405 AllocationSpace id) |
| 2403 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2406 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 2404 max_capacity_(max_capacity), | 2407 max_capacity_(max_capacity), |
| 2405 first_page_(NULL), | 2408 first_page_(NULL), |
| 2406 size_(0), | 2409 size_(0), |
| 2407 page_count_(0), | 2410 page_count_(0), |
| 2408 objects_size_(0) {} | 2411 objects_size_(0) {} |
| 2409 | 2412 |
| 2410 | 2413 |
| 2411 bool LargeObjectSpace::Setup() { | 2414 bool LargeObjectSpace::SetUp() { |
| 2412 first_page_ = NULL; | 2415 first_page_ = NULL; |
| 2413 size_ = 0; | 2416 size_ = 0; |
| 2414 page_count_ = 0; | 2417 page_count_ = 0; |
| 2415 objects_size_ = 0; | 2418 objects_size_ = 0; |
| 2416 return true; | 2419 return true; |
| 2417 } | 2420 } |
| 2418 | 2421 |
| 2419 | 2422 |
| 2420 void LargeObjectSpace::TearDown() { | 2423 void LargeObjectSpace::TearDown() { |
| 2421 while (first_page_ != NULL) { | 2424 while (first_page_ != NULL) { |
| 2422 LargePage* page = first_page_; | 2425 LargePage* page = first_page_; |
| 2423 first_page_ = first_page_->next_page(); | 2426 first_page_ = first_page_->next_page(); |
| 2424 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); | 2427 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); |
| 2425 | 2428 |
| 2426 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); | 2429 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); |
| 2427 heap()->isolate()->memory_allocator()->PerformAllocationCallback( | 2430 heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 2428 space, kAllocationActionFree, page->size()); | 2431 space, kAllocationActionFree, page->size()); |
| 2429 heap()->isolate()->memory_allocator()->Free(page); | 2432 heap()->isolate()->memory_allocator()->Free(page); |
| 2430 } | 2433 } |
| 2431 Setup(); | 2434 SetUp(); |
| 2432 } | 2435 } |
| 2433 | 2436 |
| 2434 | 2437 |
| 2435 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, | 2438 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, |
| 2436 Executability executable) { | 2439 Executability executable) { |
| 2437 // Check if we want to force a GC before growing the old space further. | 2440 // Check if we want to force a GC before growing the old space further. |
| 2438 // If so, fail the allocation. | 2441 // If so, fail the allocation. |
| 2439 if (!heap()->always_allocate() && | 2442 if (!heap()->always_allocate() && |
| 2440 heap()->OldGenerationAllocationLimitReached()) { | 2443 heap()->OldGenerationAllocationLimitReached()) { |
| 2441 return Failure::RetryAfterGC(identity()); | 2444 return Failure::RetryAfterGC(identity()); |
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2658 object->ShortPrint(); | 2661 object->ShortPrint(); |
| 2659 PrintF("\n"); | 2662 PrintF("\n"); |
| 2660 } | 2663 } |
| 2661 printf(" --------------------------------------\n"); | 2664 printf(" --------------------------------------\n"); |
| 2662 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2665 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2663 } | 2666 } |
| 2664 | 2667 |
| 2665 #endif // DEBUG | 2668 #endif // DEBUG |
| 2666 | 2669 |
| 2667 } } // namespace v8::internal | 2670 } } // namespace v8::internal |
| OLD | NEW |