OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1911 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1922 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 1922 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
1923 Memory::Address_at(address() + kNextOffset) = | 1923 Memory::Address_at(address() + kNextOffset) = |
1924 reinterpret_cast<Address>(next); | 1924 reinterpret_cast<Address>(next); |
1925 } else { | 1925 } else { |
1926 Memory::Address_at(address() + kPointerSize) = | 1926 Memory::Address_at(address() + kPointerSize) = |
1927 reinterpret_cast<Address>(next); | 1927 reinterpret_cast<Address>(next); |
1928 } | 1928 } |
1929 } | 1929 } |
1930 | 1930 |
1931 | 1931 |
1932 FreeList::FreeList(PagedSpace* owner) | 1932 void FreeListCategory::Reset() { |
1933 : owner_(owner), heap_(owner->heap()) { | 1933 top_ = NULL; |
1934 Reset(); | 1934 end_ = NULL; |
1935 available_ = 0; | |
1935 } | 1936 } |
1936 | 1937 |
1937 | 1938 |
1938 void FreeList::Reset() { | 1939 void FreeListCategory:: |
1939 available_ = 0; | 1940 ConcatenateListsConcurrentAdd(FreeListCategory* category) { |
Michael Starzinger
2012/11/29 14:31:20
Let's break the line after the opening parenthesis
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
1940 small_list_ = NULL; | 1941 FreeListNode* current_top; |
1941 medium_list_ = NULL; | 1942 FreeListNode* category_top = category->top(); |
1942 large_list_ = NULL; | 1943 FreeListNode* category_end = category->end(); |
1943 huge_list_ = NULL; | 1944 |
1945 do { | |
1946 current_top = top_; | |
1947 if (current_top == NULL) { | |
1948 end_ = category_end; | |
1949 top_ = category_top; | |
1950 break; | |
1951 } | |
Michael Starzinger
2012/11/29 14:31:20
The adjustment category_end->next to current_top i
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
1952 } while (current_top != top_ || | |
1953 NoBarrier_CompareAndSwap( | |
1954 reinterpret_cast<AtomicWord*>(&top_), | |
1955 reinterpret_cast<AtomicWord>(current_top), | |
1956 reinterpret_cast<AtomicWord>(category_top)) != | |
1957 reinterpret_cast<AtomicWord>(current_top)); | |
1958 NoBarrier_AtomicIncrement(reinterpret_cast<AtomicWord*>(&available_), | |
1959 category->available()); | |
1960 category->Reset(); | |
1944 } | 1961 } |
1945 | 1962 |
1946 | 1963 |
1947 int FreeList::Free(Address start, int size_in_bytes) { | 1964 void FreeListCategory:: |
1948 if (size_in_bytes == 0) return 0; | 1965 ConcatenateListsConcurrentRemove(FreeListCategory* category) { |
Michael Starzinger
2012/11/29 14:31:20
Likewise.
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
1949 FreeListNode* node = FreeListNode::FromAddress(start); | 1966 if (category->top() == NULL) { |
1950 node->set_size(heap_, size_in_bytes); | 1967 return; |
1968 } | |
1951 | 1969 |
1952 // Early return to drop too-small blocks on the floor. | 1970 FreeListNode* category_top; |
1953 if (size_in_bytes < kSmallListMin) return size_in_bytes; | 1971 FreeListNode* category_end; |
1972 FreeListNode** category_top_address = category->GetTopAddress(); | |
1973 FreeListNode** category_end_address = category->GetEndAddress(); | |
1974 int* category_available_address = category->GetAvailableAddress(); | |
1975 int category_available; | |
1954 | 1976 |
1955 // Insert other blocks at the head of a free list of the appropriate | 1977 do { |
1956 // magnitude. | 1978 category_top = category->top(); |
Michael Starzinger
2012/11/29 14:31:20
This requires the memory model to ensure that we d
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
1957 if (size_in_bytes <= kSmallListMax) { | 1979 category_end = category->end(); |
1958 node->set_next(small_list_); | 1980 category_available = category->available(); |
1959 small_list_ = node; | 1981 } while (category_top != category->top() || |
1960 } else if (size_in_bytes <= kMediumListMax) { | 1982 NoBarrier_CompareAndSwap( |
1961 node->set_next(medium_list_); | 1983 reinterpret_cast<AtomicWord*>(category_top_address), |
1962 medium_list_ = node; | 1984 reinterpret_cast<AtomicWord>(category_top), 0) != |
1963 } else if (size_in_bytes <= kLargeListMax) { | 1985 reinterpret_cast<AtomicWord>(category_top)); |
1964 node->set_next(large_list_); | 1986 NoBarrier_CompareAndSwap( |
1965 large_list_ = node; | 1987 reinterpret_cast<AtomicWord*>(category_end_address), |
1966 } else { | 1988 reinterpret_cast<AtomicWord>(category_end), 0); |
Michael Starzinger
2012/11/29 14:31:20
Since we don't need the category->end to be in a c
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
1967 node->set_next(huge_list_); | 1989 NoBarrier_AtomicIncrement( |
1968 huge_list_ = node; | 1990 reinterpret_cast<AtomicWord*>(category_available_address), |
1969 } | 1991 -category_available); |
1970 available_ += size_in_bytes; | 1992 |
1971 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1993 category_end->set_next(top_); |
1972 return 0; | 1994 top_ = category_top; |
1995 available_ += category_available; | |
1973 } | 1996 } |
1974 | 1997 |
1975 | 1998 |
1976 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { | 1999 intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { |
1977 FreeListNode* node = *list; | 2000 intptr_t sum = 0; |
2001 FreeListNode* n = top_; | |
2002 while (n != NULL) { | |
2003 if (Page::FromAddress(n->address()) == p) { | |
2004 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); | |
2005 sum += free_space->Size(); | |
2006 } | |
2007 n = n->next(); | |
2008 } | |
2009 return sum; | |
2010 } | |
2011 | |
2012 | |
2013 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { | |
2014 intptr_t sum = 0; | |
2015 FreeListNode** n = &top_; | |
2016 while (*n != NULL) { | |
2017 if (Page::FromAddress((*n)->address()) == p) { | |
2018 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); | |
2019 sum += free_space->Size(); | |
2020 *n = (*n)->next(); | |
2021 } else { | |
2022 n = (*n)->next_address(); | |
2023 } | |
2024 } | |
Michael Starzinger
2012/11/29 14:31:20
We need to adjust the end pointer in case you evic
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
2025 available_ -= sum; | |
2026 return sum; | |
2027 } | |
2028 | |
2029 | |
2030 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) { | |
2031 FreeListNode* node = top_; | |
1978 | 2032 |
1979 if (node == NULL) return NULL; | 2033 if (node == NULL) return NULL; |
1980 | 2034 |
1981 while (node != NULL && | 2035 while (node != NULL && |
1982 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | 2036 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
1983 available_ -= node->Size(); | 2037 available_ -= node->Size(); |
1984 node = node->next(); | 2038 node = node->next(); |
1985 } | 2039 } |
1986 | 2040 |
1987 if (node != NULL) { | 2041 if (node != NULL) { |
2042 set_top(node->next()); | |
1988 *node_size = node->Size(); | 2043 *node_size = node->Size(); |
1989 *list = node->next(); | 2044 available_ -= *node_size; |
1990 } else { | 2045 } else { |
1991 *list = NULL; | 2046 set_top(NULL); |
2047 } | |
2048 | |
2049 if (top() == NULL) { | |
2050 set_end(NULL); | |
1992 } | 2051 } |
1993 | 2052 |
1994 return node; | 2053 return node; |
1995 } | 2054 } |
1996 | 2055 |
1997 | 2056 |
2057 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { | |
2058 node->set_next(top_); | |
2059 top_ = node; | |
2060 if (end_ == NULL) { | |
2061 end_ = node; | |
2062 } | |
2063 available_ += size_in_bytes; | |
2064 } | |
2065 | |
2066 | |
2067 void FreeListCategory::RepairFreeList(Heap* heap) { | |
2068 FreeListNode* n = top_; | |
2069 while (n != NULL) { | |
2070 Map** map_location = reinterpret_cast<Map**>(n->address()); | |
2071 if (*map_location == NULL) { | |
2072 *map_location = heap->free_space_map(); | |
2073 } else { | |
2074 ASSERT(*map_location == heap->free_space_map()); | |
2075 } | |
2076 n = n->next(); | |
2077 } | |
2078 } | |
2079 | |
2080 | |
2081 FreeList::FreeList(PagedSpace* owner) | |
2082 : owner_(owner), heap_(owner->heap()) { | |
2083 Reset(); | |
2084 } | |
2085 | |
2086 | |
2087 void FreeList::Reset() { | |
2088 small_list_.Reset(); | |
2089 medium_list_.Reset(); | |
2090 large_list_.Reset(); | |
2091 huge_list_.Reset(); | |
2092 } | |
2093 | |
2094 | |
2095 int FreeList::Free(Address start, int size_in_bytes) { | |
2096 if (size_in_bytes == 0) return 0; | |
2097 | |
2098 FreeListNode* node = FreeListNode::FromAddress(start); | |
2099 node->set_size(heap_, size_in_bytes); | |
2100 | |
2101 // Early return to drop too-small blocks on the floor. | |
2102 if (size_in_bytes < kSmallListMin) return size_in_bytes; | |
2103 | |
2104 // Insert other blocks at the head of a free list of the appropriate | |
2105 // magnitude. | |
2106 if (size_in_bytes <= kSmallListMax) { | |
2107 small_list_.Free(node, size_in_bytes); | |
2108 } else if (size_in_bytes <= kMediumListMax) { | |
2109 medium_list_.Free(node, size_in_bytes); | |
2110 } else if (size_in_bytes <= kLargeListMax) { | |
2111 large_list_.Free(node, size_in_bytes); | |
2112 } else { | |
2113 huge_list_.Free(node, size_in_bytes); | |
2114 } | |
2115 | |
2116 ASSERT(IsVeryLong() || available() == SumFreeLists()); | |
2117 return 0; | |
2118 } | |
2119 | |
2120 | |
1998 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { | 2121 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
1999 FreeListNode* node = NULL; | 2122 FreeListNode* node = NULL; |
2000 | 2123 |
2001 if (size_in_bytes <= kSmallAllocationMax) { | 2124 if (size_in_bytes <= kSmallAllocationMax) { |
2002 node = PickNodeFromList(&small_list_, node_size); | 2125 node = small_list_.PickNodeFromList(node_size); |
2003 if (node != NULL) return node; | 2126 if (node != NULL) return node; |
2004 } | 2127 } |
2005 | 2128 |
2006 if (size_in_bytes <= kMediumAllocationMax) { | 2129 if (size_in_bytes <= kMediumAllocationMax) { |
2007 node = PickNodeFromList(&medium_list_, node_size); | 2130 node = medium_list_.PickNodeFromList(node_size); |
2008 if (node != NULL) return node; | 2131 if (node != NULL) return node; |
2009 } | 2132 } |
2010 | 2133 |
2011 if (size_in_bytes <= kLargeAllocationMax) { | 2134 if (size_in_bytes <= kLargeAllocationMax) { |
2012 node = PickNodeFromList(&large_list_, node_size); | 2135 node = large_list_.PickNodeFromList(node_size); |
2013 if (node != NULL) return node; | 2136 if (node != NULL) return node; |
2014 } | 2137 } |
2015 | 2138 |
2016 for (FreeListNode** cur = &huge_list_; | 2139 int huge_list_available = huge_list_.available(); |
2140 for (FreeListNode** cur = huge_list_.GetTopAddress(); | |
2017 *cur != NULL; | 2141 *cur != NULL; |
2018 cur = (*cur)->next_address()) { | 2142 cur = (*cur)->next_address()) { |
2019 FreeListNode* cur_node = *cur; | 2143 FreeListNode* cur_node = *cur; |
2020 while (cur_node != NULL && | 2144 while (cur_node != NULL && |
2021 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | 2145 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
2022 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); | 2146 huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
2023 cur_node = cur_node->next(); | 2147 cur_node = cur_node->next(); |
2024 } | 2148 } |
2025 | 2149 |
2026 *cur = cur_node; | 2150 *cur = cur_node; |
2027 if (cur_node == NULL) break; | 2151 if (cur_node == NULL) { |
2152 huge_list_.set_end(NULL); | |
2153 break; | |
2154 } | |
2028 | 2155 |
2029 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); | 2156 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
2030 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | 2157 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
2031 int size = cur_as_free_space->Size(); | 2158 int size = cur_as_free_space->Size(); |
2032 if (size >= size_in_bytes) { | 2159 if (size >= size_in_bytes) { |
2033 // Large enough node found. Unlink it from the list. | 2160 // Large enough node found. Unlink it from the list. |
2034 node = *cur; | 2161 node = *cur; |
2162 *cur = node->next(); | |
Michael Starzinger
2012/11/29 14:31:20
We also need to reset the end pointer in case we p
Hannes Payer (out of office)
2012/11/29 15:55:02
Done.
| |
2035 *node_size = size; | 2163 *node_size = size; |
2036 *cur = node->next(); | 2164 huge_list_available -= size; |
2037 break; | 2165 break; |
2038 } | 2166 } |
2039 } | 2167 } |
2040 | 2168 |
2169 huge_list_.set_available(huge_list_available); | |
2170 ASSERT(IsVeryLong() || available() == SumFreeLists()); | |
2171 | |
2041 return node; | 2172 return node; |
2042 } | 2173 } |
2043 | 2174 |
2044 | 2175 |
2045 // Allocation on the old space free list. If it succeeds then a new linear | 2176 // Allocation on the old space free list. If it succeeds then a new linear |
2046 // allocation space has been set up with the top and limit of the space. If | 2177 // allocation space has been set up with the top and limit of the space. If |
2047 // the allocation fails then NULL is returned, and the caller can perform a GC | 2178 // the allocation fails then NULL is returned, and the caller can perform a GC |
2048 // or allocate a new page before retrying. | 2179 // or allocate a new page before retrying. |
2049 HeapObject* FreeList::Allocate(int size_in_bytes) { | 2180 HeapObject* FreeList::Allocate(int size_in_bytes) { |
2050 ASSERT(0 < size_in_bytes); | 2181 ASSERT(0 < size_in_bytes); |
2051 ASSERT(size_in_bytes <= kMaxBlockSize); | 2182 ASSERT(size_in_bytes <= kMaxBlockSize); |
2052 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 2183 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
2053 // Don't free list allocate if there is linear space available. | 2184 // Don't free list allocate if there is linear space available. |
2054 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); | 2185 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
2055 | 2186 |
2056 int new_node_size = 0; | 2187 int new_node_size = 0; |
2057 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2188 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
2058 if (new_node == NULL) return NULL; | 2189 if (new_node == NULL) return NULL; |
2059 | 2190 |
2060 available_ -= new_node_size; | |
2061 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | |
2062 | 2191 |
2063 int bytes_left = new_node_size - size_in_bytes; | 2192 int bytes_left = new_node_size - size_in_bytes; |
2064 ASSERT(bytes_left >= 0); | 2193 ASSERT(bytes_left >= 0); |
2065 | 2194 |
2066 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2195 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
2067 // Mark the old linear allocation area with a free space map so it can be | 2196 // Mark the old linear allocation area with a free space map so it can be |
2068 // skipped when scanning the heap. This also puts it back in the free list | 2197 // skipped when scanning the heap. This also puts it back in the free list |
2069 // if it is big enough. | 2198 // if it is big enough. |
2070 owner_->Free(owner_->top(), old_linear_size); | 2199 owner_->Free(owner_->top(), old_linear_size); |
2071 | 2200 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2109 } else { | 2238 } else { |
2110 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2239 // TODO(gc) Try not freeing linear allocation region when bytes_left |
2111 // are zero. | 2240 // are zero. |
2112 owner_->SetTop(NULL, NULL); | 2241 owner_->SetTop(NULL, NULL); |
2113 } | 2242 } |
2114 | 2243 |
2115 return new_node; | 2244 return new_node; |
2116 } | 2245 } |
2117 | 2246 |
2118 | 2247 |
2119 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { | |
2120 intptr_t sum = 0; | |
2121 while (n != NULL) { | |
2122 if (Page::FromAddress(n->address()) == p) { | |
2123 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); | |
2124 sum += free_space->Size(); | |
2125 } | |
2126 n = n->next(); | |
2127 } | |
2128 return sum; | |
2129 } | |
2130 | |
2131 | |
2132 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { | 2248 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { |
2133 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p); | 2249 sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p); |
2134 if (sizes->huge_size_ < p->area_size()) { | 2250 if (sizes->huge_size_ < p->area_size()) { |
2135 sizes->small_size_ = CountFreeListItemsInList(small_list_, p); | 2251 sizes->small_size_ = small_list_.CountFreeListItemsInList(p); |
2136 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p); | 2252 sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p); |
2137 sizes->large_size_ = CountFreeListItemsInList(large_list_, p); | 2253 sizes->large_size_ = large_list_.CountFreeListItemsInList(p); |
2138 } else { | 2254 } else { |
2139 sizes->small_size_ = 0; | 2255 sizes->small_size_ = 0; |
2140 sizes->medium_size_ = 0; | 2256 sizes->medium_size_ = 0; |
2141 sizes->large_size_ = 0; | 2257 sizes->large_size_ = 0; |
2142 } | 2258 } |
2143 } | 2259 } |
2144 | 2260 |
2145 | 2261 |
2146 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) { | |
2147 intptr_t sum = 0; | |
2148 while (*n != NULL) { | |
2149 if (Page::FromAddress((*n)->address()) == p) { | |
2150 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); | |
2151 sum += free_space->Size(); | |
2152 *n = (*n)->next(); | |
2153 } else { | |
2154 n = (*n)->next_address(); | |
2155 } | |
2156 } | |
2157 return sum; | |
2158 } | |
2159 | |
2160 | |
2161 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2262 intptr_t FreeList::EvictFreeListItems(Page* p) { |
2162 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p); | 2263 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
2163 | 2264 |
2164 if (sum < p->area_size()) { | 2265 if (sum < p->area_size()) { |
2165 sum += EvictFreeListItemsInList(&small_list_, p) + | 2266 sum += small_list_.EvictFreeListItemsInList(p) + |
2166 EvictFreeListItemsInList(&medium_list_, p) + | 2267 medium_list_.EvictFreeListItemsInList(p) + |
2167 EvictFreeListItemsInList(&large_list_, p); | 2268 large_list_.EvictFreeListItemsInList(p); |
2168 } | 2269 } |
2169 | 2270 |
2170 available_ -= static_cast<int>(sum); | |
2171 | |
2172 return sum; | 2271 return sum; |
2173 } | 2272 } |
2174 | 2273 |
2175 | 2274 |
2275 void FreeList::RepairLists(Heap* heap) { | |
2276 small_list_.RepairFreeList(heap); | |
2277 medium_list_.RepairFreeList(heap); | |
2278 large_list_.RepairFreeList(heap); | |
2279 huge_list_.RepairFreeList(heap); | |
2280 } | |
2281 | |
2282 | |
2176 #ifdef DEBUG | 2283 #ifdef DEBUG |
2177 intptr_t FreeList::SumFreeList(FreeListNode* cur) { | 2284 intptr_t FreeListCategory::SumFreeList() { |
2178 intptr_t sum = 0; | 2285 intptr_t sum = 0; |
2286 FreeListNode* cur = top_; | |
2179 while (cur != NULL) { | 2287 while (cur != NULL) { |
2180 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); | 2288 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); |
2181 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); | 2289 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); |
2182 sum += cur_as_free_space->Size(); | 2290 sum += cur_as_free_space->Size(); |
2183 cur = cur->next(); | 2291 cur = cur->next(); |
2184 } | 2292 } |
2185 return sum; | 2293 return sum; |
2186 } | 2294 } |
2187 | 2295 |
2188 | 2296 |
2189 static const int kVeryLongFreeList = 500; | 2297 static const int kVeryLongFreeList = 500; |
2190 | 2298 |
2191 | 2299 |
2192 int FreeList::FreeListLength(FreeListNode* cur) { | 2300 int FreeListCategory::FreeListLength() { |
2193 int length = 0; | 2301 int length = 0; |
2302 FreeListNode* cur = top_; | |
2194 while (cur != NULL) { | 2303 while (cur != NULL) { |
2195 length++; | 2304 length++; |
2196 cur = cur->next(); | 2305 cur = cur->next(); |
2197 if (length == kVeryLongFreeList) return length; | 2306 if (length == kVeryLongFreeList) return length; |
2198 } | 2307 } |
2199 return length; | 2308 return length; |
2200 } | 2309 } |
2201 | 2310 |
2202 | 2311 |
2203 bool FreeList::IsVeryLong() { | 2312 bool FreeList::IsVeryLong() { |
2204 if (FreeListLength(small_list_) == kVeryLongFreeList) return true; | 2313 if (small_list_.FreeListLength() == kVeryLongFreeList) return true; |
2205 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; | 2314 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; |
2206 if (FreeListLength(large_list_) == kVeryLongFreeList) return true; | 2315 if (large_list_.FreeListLength() == kVeryLongFreeList) return true; |
2207 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; | 2316 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; |
2208 return false; | 2317 return false; |
2209 } | 2318 } |
2210 | 2319 |
2211 | 2320 |
2212 // This can take a very long time because it is linear in the number of entries | 2321 // This can take a very long time because it is linear in the number of entries |
2213 // on the free list, so it should not be called if FreeListLength returns | 2322 // on the free list, so it should not be called if FreeListLength returns |
2214 // kVeryLongFreeList. | 2323 // kVeryLongFreeList. |
2215 intptr_t FreeList::SumFreeLists() { | 2324 intptr_t FreeList::SumFreeLists() { |
2216 intptr_t sum = SumFreeList(small_list_); | 2325 intptr_t sum = small_list_.SumFreeList(); |
2217 sum += SumFreeList(medium_list_); | 2326 sum += medium_list_.SumFreeList(); |
2218 sum += SumFreeList(large_list_); | 2327 sum += large_list_.SumFreeList(); |
2219 sum += SumFreeList(huge_list_); | 2328 sum += huge_list_.SumFreeList(); |
2220 return sum; | 2329 return sum; |
2221 } | 2330 } |
2222 #endif | 2331 #endif |
2223 | 2332 |
2224 | 2333 |
2225 // ----------------------------------------------------------------------------- | 2334 // ----------------------------------------------------------------------------- |
2226 // OldSpace implementation | 2335 // OldSpace implementation |
2227 | 2336 |
2228 bool NewSpace::ReserveSpace(int bytes) { | 2337 bool NewSpace::ReserveSpace(int bytes) { |
2229 // We can't reliably unpack a partial snapshot that needs more new space | 2338 // We can't reliably unpack a partial snapshot that needs more new space |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2297 // Mark the old linear allocation area with a free space so it can be | 2406 // Mark the old linear allocation area with a free space so it can be |
2298 // skipped when scanning the heap. This also puts it back in the free list | 2407 // skipped when scanning the heap. This also puts it back in the free list |
2299 // if it is big enough. | 2408 // if it is big enough. |
2300 Free(top(), old_linear_size); | 2409 Free(top(), old_linear_size); |
2301 | 2410 |
2302 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2411 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
2303 return true; | 2412 return true; |
2304 } | 2413 } |
2305 | 2414 |
2306 | 2415 |
2307 static void RepairFreeList(Heap* heap, FreeListNode* n) { | |
2308 while (n != NULL) { | |
2309 Map** map_location = reinterpret_cast<Map**>(n->address()); | |
2310 if (*map_location == NULL) { | |
2311 *map_location = heap->free_space_map(); | |
2312 } else { | |
2313 ASSERT(*map_location == heap->free_space_map()); | |
2314 } | |
2315 n = n->next(); | |
2316 } | |
2317 } | |
2318 | |
2319 | |
2320 void FreeList::RepairLists(Heap* heap) { | |
2321 RepairFreeList(heap, small_list_); | |
2322 RepairFreeList(heap, medium_list_); | |
2323 RepairFreeList(heap, large_list_); | |
2324 RepairFreeList(heap, huge_list_); | |
2325 } | |
2326 | |
2327 | |
2328 // After we have booted, we have created a map which represents free space | 2416 // After we have booted, we have created a map which represents free space |
2329 // on the heap. If there was already a free list then the elements on it | 2417 // on the heap. If there was already a free list then the elements on it |
2330 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2418 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
2331 // fix them. | 2419 // fix them. |
2332 void PagedSpace::RepairFreeListsAfterBoot() { | 2420 void PagedSpace::RepairFreeListsAfterBoot() { |
2333 free_list_.RepairLists(heap()); | 2421 free_list_.RepairLists(heap()); |
2334 } | 2422 } |
2335 | 2423 |
2336 | 2424 |
2337 // You have to call this last, since the implementation from PagedSpace | 2425 // You have to call this last, since the implementation from PagedSpace |
(...skipping 612 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2950 object->ShortPrint(); | 3038 object->ShortPrint(); |
2951 PrintF("\n"); | 3039 PrintF("\n"); |
2952 } | 3040 } |
2953 printf(" --------------------------------------\n"); | 3041 printf(" --------------------------------------\n"); |
2954 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3042 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
2955 } | 3043 } |
2956 | 3044 |
2957 #endif // DEBUG | 3045 #endif // DEBUG |
2958 | 3046 |
2959 } } // namespace v8::internal | 3047 } } // namespace v8::internal |
OLD | NEW |