OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 | 138 |
139 | 139 |
140 // ----------------------------------------------------------------------------- | 140 // ----------------------------------------------------------------------------- |
141 // Page | 141 // Page |
142 | 142 |
143 #ifdef DEBUG | 143 #ifdef DEBUG |
144 Page::RSetState Page::rset_state_ = Page::IN_USE; | 144 Page::RSetState Page::rset_state_ = Page::IN_USE; |
145 #endif | 145 #endif |
146 | 146 |
147 // ----------------------------------------------------------------------------- | 147 // ----------------------------------------------------------------------------- |
| 148 // CodeRange |
| 149 |
| 150 List<CodeRange::FreeBlock> CodeRange::free_list_(0); |
| 151 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); |
| 152 int CodeRange::current_allocation_block_index_ = 0; |
| 153 VirtualMemory* CodeRange::code_range_ = NULL; |
| 154 |
| 155 |
| 156 bool CodeRange::Setup(const size_t requested) { |
| 157 ASSERT(code_range_ == NULL); |
| 158 |
| 159 code_range_ = new VirtualMemory(requested); |
| 160 CHECK(code_range_ != NULL); |
| 161 if (!code_range_->IsReserved()) { |
| 162 delete code_range_; |
| 163 code_range_ = NULL; |
| 164 return false; |
| 165 } |
| 166 |
| 167 // We are sure that we have mapped a block of requested addresses. |
| 168 ASSERT(code_range_->size() == requested); |
| 169 LOG(NewEvent("CodeRange", code_range_->address(), requested)); |
| 170 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); |
| 171 current_allocation_block_index_ = 0; |
| 172 return true; |
| 173 } |
| 174 |
| 175 |
| 176 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 177 const FreeBlock* right) { |
| 178 // The entire point of CodeRange is that the difference between two |
| 179 // addresses in the range can be represented as a signed 32-bit int, |
| 180 // so the cast is semantically correct. |
| 181 return static_cast<int>(left->start - right->start); |
| 182 } |
| 183 |
| 184 |
| 185 void CodeRange::GetNextAllocationBlock(size_t requested) { |
| 186 for (current_allocation_block_index_++; |
| 187 current_allocation_block_index_ < allocation_list_.length(); |
| 188 current_allocation_block_index_++) { |
| 189 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 190 return; // Found a large enough allocation block. |
| 191 } |
| 192 } |
| 193 |
| 194 // Sort and merge the free blocks on the free list and the allocation list. |
| 195 free_list_.AddAll(allocation_list_); |
| 196 allocation_list_.Clear(); |
| 197 free_list_.Sort(&CompareFreeBlockAddress); |
| 198 for (int i = 0; i < free_list_.length();) { |
| 199 FreeBlock merged = free_list_[i]; |
| 200 i++; |
| 201 // Add adjacent free blocks to the current merged block. |
| 202 while (i < free_list_.length() && |
| 203 free_list_[i].start == merged.start + merged.size) { |
| 204 merged.size += free_list_[i].size; |
| 205 i++; |
| 206 } |
| 207 if (merged.size > 0) { |
| 208 allocation_list_.Add(merged); |
| 209 } |
| 210 } |
| 211 free_list_.Clear(); |
| 212 |
| 213 for (current_allocation_block_index_ = 0; |
| 214 current_allocation_block_index_ < allocation_list_.length(); |
| 215 current_allocation_block_index_++) { |
| 216 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 217 return; // Found a large enough allocation block. |
| 218 } |
| 219 } |
| 220 |
| 221 // Code range is full or too fragmented. |
| 222 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
| 223 } |
| 224 |
| 225 |
| 226 |
| 227 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { |
| 228 ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| 229 if (requested > allocation_list_[current_allocation_block_index_].size) { |
| 230 // Find an allocation block large enough. This function call may |
| 231 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
| 232 GetNextAllocationBlock(requested); |
| 233 } |
| 234 // Commit the requested memory at the start of the current allocation block. |
| 235 *allocated = RoundUp(requested, Page::kPageSize); |
| 236 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 237 if (*allocated >= current.size - Page::kPageSize) { |
| 238 // Don't leave a small free block, useless for a large object or chunk. |
| 239 *allocated = current.size; |
| 240 } |
| 241 ASSERT(*allocated <= current.size); |
| 242 if (!code_range_->Commit(current.start, *allocated, true)) { |
| 243 *allocated = 0; |
| 244 return NULL; |
| 245 } |
| 246 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 247 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 248 if (*allocated == current.size) { |
| 249 GetNextAllocationBlock(0); // This block is used up, get the next one. |
| 250 } |
| 251 return current.start; |
| 252 } |
| 253 |
| 254 |
| 255 void CodeRange::FreeRawMemory(void* address, size_t length) { |
| 256 free_list_.Add(FreeBlock(address, length)); |
| 257 code_range_->Uncommit(address, length); |
| 258 } |
| 259 |
| 260 |
| 261 void CodeRange::TearDown() { |
| 262 delete code_range_; // Frees all memory in the virtual memory range. |
| 263 code_range_ = NULL; |
| 264 free_list_.Free(); |
| 265 allocation_list_.Free(); |
| 266 } |
| 267 |
| 268 |
| 269 // ----------------------------------------------------------------------------- |
148 // MemoryAllocator | 270 // MemoryAllocator |
149 // | 271 // |
150 int MemoryAllocator::capacity_ = 0; | 272 int MemoryAllocator::capacity_ = 0; |
151 int MemoryAllocator::size_ = 0; | 273 int MemoryAllocator::size_ = 0; |
152 | 274 |
153 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | 275 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; |
154 | 276 |
155 // 270 is an estimate based on the static default heap size of a pair of 256K | 277 // 270 is an estimate based on the static default heap size of a pair of 256K |
156 // semispaces and a 64M old generation. | 278 // semispaces and a 64M old generation. |
157 const int kEstimatedNumberOfChunks = 270; | 279 const int kEstimatedNumberOfChunks = 270; |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
219 capacity_ = 0; | 341 capacity_ = 0; |
220 size_ = 0; | 342 size_ = 0; |
221 max_nof_chunks_ = 0; | 343 max_nof_chunks_ = 0; |
222 } | 344 } |
223 | 345 |
224 | 346 |
225 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 347 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
226 size_t* allocated, | 348 size_t* allocated, |
227 Executability executable) { | 349 Executability executable) { |
228 if (size_ + static_cast<int>(requested) > capacity_) return NULL; | 350 if (size_ + static_cast<int>(requested) > capacity_) return NULL; |
229 | 351 void* mem; |
230 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE); | 352 if (executable == EXECUTABLE && CodeRange::exists()) { |
| 353 mem = CodeRange::AllocateRawMemory(requested, allocated); |
| 354 } else { |
| 355 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); |
| 356 } |
231 int alloced = *allocated; | 357 int alloced = *allocated; |
232 size_ += alloced; | 358 size_ += alloced; |
233 Counters::memory_allocated.Increment(alloced); | 359 Counters::memory_allocated.Increment(alloced); |
234 return mem; | 360 return mem; |
235 } | 361 } |
236 | 362 |
237 | 363 |
238 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { | 364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { |
239 OS::Free(mem, length); | 365 if (CodeRange::contains(static_cast<Address>(mem))) { |
| 366 CodeRange::FreeRawMemory(mem, length); |
| 367 } else { |
| 368 OS::Free(mem, length); |
| 369 } |
240 Counters::memory_allocated.Decrement(length); | 370 Counters::memory_allocated.Decrement(length); |
241 size_ -= length; | 371 size_ -= length; |
242 ASSERT(size_ >= 0); | 372 ASSERT(size_ >= 0); |
243 } | 373 } |
244 | 374 |
245 | 375 |
246 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | 376 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { |
247 ASSERT(initial_chunk_ == NULL); | 377 ASSERT(initial_chunk_ == NULL); |
248 | 378 |
249 initial_chunk_ = new VirtualMemory(requested); | 379 initial_chunk_ = new VirtualMemory(requested); |
(...skipping 2400 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2650 reinterpret_cast<Object**>(object->address() | 2780 reinterpret_cast<Object**>(object->address() |
2651 + Page::kObjectAreaSize), | 2781 + Page::kObjectAreaSize), |
2652 allocation_top); | 2782 allocation_top); |
2653 PrintF("\n"); | 2783 PrintF("\n"); |
2654 } | 2784 } |
2655 } | 2785 } |
2656 } | 2786 } |
2657 #endif // DEBUG | 2787 #endif // DEBUG |
2658 | 2788 |
2659 } } // namespace v8::internal | 2789 } } // namespace v8::internal |
OLD | NEW |