OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 | 138 |
139 | 139 |
140 // ----------------------------------------------------------------------------- | 140 // ----------------------------------------------------------------------------- |
141 // Page | 141 // Page |
142 | 142 |
143 #ifdef DEBUG | 143 #ifdef DEBUG |
144 Page::RSetState Page::rset_state_ = Page::IN_USE; | 144 Page::RSetState Page::rset_state_ = Page::IN_USE; |
145 #endif | 145 #endif |
146 | 146 |
147 // ----------------------------------------------------------------------------- | 147 // ----------------------------------------------------------------------------- |
148 // CodeRange | |
149 | |
150 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | |
151 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | |
152 int CodeRange::current_allocation_block_index_ = 0; | |
153 VirtualMemory* CodeRange::code_range_ = NULL; | |
154 | |
155 | |
156 bool CodeRange::Setup(const size_t requested) { | |
157 ASSERT(code_range_ == NULL); | |
158 | |
159 code_range_ = new VirtualMemory(requested); | |
160 CHECK(code_range_ != NULL); | |
161 if (!code_range_->IsReserved()) { | |
162 delete code_range_; | |
163 code_range_ = NULL; | |
164 return false; | |
165 } | |
166 | |
167 // We are sure that we have mapped a block of requested addresses. | |
168 ASSERT(code_range_->size() == requested); | |
169 LOG(NewEvent("CodeRange", code_range_->address(), requested)); | |
170 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); | |
171 current_allocation_block_index_ = 0; | |
172 return true; | |
173 } | |
174 | |
175 | |
176 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | |
177 const FreeBlock* right) { | |
178 // The entire point of CodeRange is that the difference between two | |
179 // addresses in the range can be represented as a signed 32-bit int, | |
180 // so the cast is semantically correct. | |
181 return static_cast<int>(left->start - right->start); | |
182 } | |
183 | |
184 | |
185 void CodeRange::GetNextAllocationBlock(size_t requested) { | |
186 for (current_allocation_block_index_++; | |
187 current_allocation_block_index_ < allocation_list_.length(); | |
188 current_allocation_block_index_++) { | |
189 if (requested <= allocation_list_[current_allocation_block_index_].size) { | |
190 return; // Found a large enough allocation block. | |
191 } | |
192 } | |
193 | |
194 // Sort and merge the free blocks on the free list and the allocation list. | |
195 free_list_.AddAll(allocation_list_); | |
196 allocation_list_.Clear(); | |
197 free_list_.Sort(&CompareFreeBlockAddress); | |
198 for (int i = 0; i < free_list_.length();) { | |
199 FreeBlock merged = free_list_[i]; | |
200 i++; | |
201 // Add adjacent free blocks to the current merged block. | |
202 while (i < free_list_.length() && | |
203 free_list_[i].start == merged.start + merged.size) { | |
204 merged.size += free_list_[i].size; | |
205 i++; | |
206 } | |
207 if (merged.size > 0) { | |
208 allocation_list_.Add(merged); | |
209 } | |
210 } | |
211 free_list_.Clear(); | |
212 | |
213 for (current_allocation_block_index_ = 0; | |
214 current_allocation_block_index_ < allocation_list_.length(); | |
215 current_allocation_block_index_++) { | |
216 if (requested <= allocation_list_[current_allocation_block_index_].size) { | |
217 return; // Found a large enough allocation block. | |
218 } | |
219 } | |
220 | |
221 // Code range is full or too fragmented. | |
222 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | |
223 } | |
224 | |
225 | |
226 | |
227 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { | |
228 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | |
229 if (requested > allocation_list_[current_allocation_block_index_].size) { | |
230 // Find an allocation block large enough. This function call may | |
231 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | |
232 GetNextAllocationBlock(requested); | |
233 } | |
234 // Commit the requested memory at the start of the current allocation block. | |
235 *allocated = RoundUp(requested, Page::kPageSize); | |
236 FreeBlock current = allocation_list_[current_allocation_block_index_]; | |
237 if (*allocated >= current.size - Page::kPageSize) { | |
238 // Don't leave a small free block, useless for a large object or chunk. | |
239 *allocated = current.size; | |
240 } | |
241 ASSERT(*allocated <= current.size); | |
242 if (!code_range_->Commit(current.start, *allocated, true)) { | |
243 *allocated = 0; | |
244 return NULL; | |
245 } | |
246 allocation_list_[current_allocation_block_index_].start += *allocated; | |
247 allocation_list_[current_allocation_block_index_].size -= *allocated; | |
248 if (*allocated == current.size) { | |
249 GetNextAllocationBlock(0); // This block is used up, get the next one. | |
250 } | |
251 return current.start; | |
252 } | |
253 | |
254 | |
255 void CodeRange::FreeRawMemory(void* address, size_t length) { | |
256 free_list_.Add(FreeBlock(address, length)); | |
257 code_range_->Uncommit(address, length); | |
258 } | |
259 | |
260 | |
261 void CodeRange::TearDown() { | |
262 delete code_range_; // Frees all memory in the virtual memory range. | |
263 code_range_ = NULL; | |
264 free_list_.Free(); | |
265 allocation_list_.Free(); | |
266 } | |
267 | |
268 | |
269 // ----------------------------------------------------------------------------- | |
270 // MemoryAllocator | 148 // MemoryAllocator |
271 // | 149 // |
272 int MemoryAllocator::capacity_ = 0; | 150 int MemoryAllocator::capacity_ = 0; |
273 int MemoryAllocator::size_ = 0; | 151 int MemoryAllocator::size_ = 0; |
274 | 152 |
275 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | 153 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; |
276 | 154 |
277 // 270 is an estimate based on the static default heap size of a pair of 256K | 155 // 270 is an estimate based on the static default heap size of a pair of 256K |
278 // semispaces and a 64M old generation. | 156 // semispaces and a 64M old generation. |
279 const int kEstimatedNumberOfChunks = 270; | 157 const int kEstimatedNumberOfChunks = 270; |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
341 capacity_ = 0; | 219 capacity_ = 0; |
342 size_ = 0; | 220 size_ = 0; |
343 max_nof_chunks_ = 0; | 221 max_nof_chunks_ = 0; |
344 } | 222 } |
345 | 223 |
346 | 224 |
347 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 225 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
348 size_t* allocated, | 226 size_t* allocated, |
349 Executability executable) { | 227 Executability executable) { |
350 if (size_ + static_cast<int>(requested) > capacity_) return NULL; | 228 if (size_ + static_cast<int>(requested) > capacity_) return NULL; |
351 void* mem; | 229 |
352 if (executable == EXECUTABLE && CodeRange::exists()) { | 230 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE); |
353 mem = CodeRange::AllocateRawMemory(requested, allocated); | |
354 } else { | |
355 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); | |
356 } | |
357 int alloced = *allocated; | 231 int alloced = *allocated; |
358 size_ += alloced; | 232 size_ += alloced; |
359 Counters::memory_allocated.Increment(alloced); | 233 Counters::memory_allocated.Increment(alloced); |
360 return mem; | 234 return mem; |
361 } | 235 } |
362 | 236 |
363 | 237 |
364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { | 238 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { |
365 if (CodeRange::contains(static_cast<Address>(mem))) { | 239 OS::Free(mem, length); |
366 CodeRange::FreeRawMemory(mem, length); | |
367 } else { | |
368 OS::Free(mem, length); | |
369 } | |
370 Counters::memory_allocated.Decrement(length); | 240 Counters::memory_allocated.Decrement(length); |
371 size_ -= length; | 241 size_ -= length; |
372 ASSERT(size_ >= 0); | 242 ASSERT(size_ >= 0); |
373 } | 243 } |
374 | 244 |
375 | 245 |
376 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | 246 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { |
377 ASSERT(initial_chunk_ == NULL); | 247 ASSERT(initial_chunk_ == NULL); |
378 | 248 |
379 initial_chunk_ = new VirtualMemory(requested); | 249 initial_chunk_ = new VirtualMemory(requested); |
(...skipping 2400 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2780 reinterpret_cast<Object**>(object->address() | 2650 reinterpret_cast<Object**>(object->address() |
2781 + Page::kObjectAreaSize), | 2651 + Page::kObjectAreaSize), |
2782 allocation_top); | 2652 allocation_top); |
2783 PrintF("\n"); | 2653 PrintF("\n"); |
2784 } | 2654 } |
2785 } | 2655 } |
2786 } | 2656 } |
2787 #endif // DEBUG | 2657 #endif // DEBUG |
2788 | 2658 |
2789 } } // namespace v8::internal | 2659 } } // namespace v8::internal |
OLD | NEW |