OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 #endif | 133 #endif |
134 stop_page_ = space->last_page_; | 134 stop_page_ = space->last_page_; |
135 break; | 135 break; |
136 } | 136 } |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 // ----------------------------------------------------------------------------- | 140 // ----------------------------------------------------------------------------- |
141 // Page | 141 // Page |
142 | 142 |
143 #ifdef DEBUG | |
144 Page::RSetState Page::rset_state_ = Page::IN_USE; | |
145 #endif | |
146 | |
147 // ----------------------------------------------------------------------------- | 143 // ----------------------------------------------------------------------------- |
148 // CodeRange | 144 // CodeRange |
149 | 145 |
150 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | 146 CodeRangeData::CodeRangeData() |
151 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | 147 :free_list_(0), allocation_list_(0), current_allocation_block_index_(0), |
152 int CodeRange::current_allocation_block_index_ = 0; | 148 code_range_(NULL) { |
153 VirtualMemory* CodeRange::code_range_ = NULL; | 149 } |
154 | |
155 | 150 |
156 bool CodeRange::Setup(const size_t requested) { | 151 bool CodeRange::Setup(const size_t requested) { |
157 ASSERT(code_range_ == NULL); | 152 CodeRangeData& data = v8_context()->code_range_data_; |
| 153 ASSERT(data.code_range_ == NULL); |
158 | 154 |
159 code_range_ = new VirtualMemory(requested); | 155 data.code_range_ = new VirtualMemory(requested); |
160 CHECK(code_range_ != NULL); | 156 CHECK(data.code_range_ != NULL); |
161 if (!code_range_->IsReserved()) { | 157 if (!data.code_range_->IsReserved()) { |
162 delete code_range_; | 158 delete data.code_range_; |
163 code_range_ = NULL; | 159 data.code_range_ = NULL; |
164 return false; | 160 return false; |
165 } | 161 } |
166 | 162 |
167 // We are sure that we have mapped a block of requested addresses. | 163 // We are sure that we have mapped a block of requested addresses. |
168 ASSERT(code_range_->size() == requested); | 164 ASSERT(data.code_range_->size() == requested); |
169 LOG(NewEvent("CodeRange", code_range_->address(), requested)); | 165 LOG(NewEvent("CodeRange", data.code_range_->address(), requested)); |
170 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); | 166 data.allocation_list_.Add( |
171 current_allocation_block_index_ = 0; | 167 CodeRangeData::FreeBlock( |
| 168 data.code_range_->address(), |
| 169 data.code_range_->size())); |
| 170 data.current_allocation_block_index_ = 0; |
172 return true; | 171 return true; |
173 } | 172 } |
174 | 173 |
175 | 174 |
176 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 175 int CodeRange::CompareFreeBlockAddress(const CodeRangeData::FreeBlock* left, |
177 const FreeBlock* right) { | 176 const CodeRangeData::FreeBlock* right) { |
178 // The entire point of CodeRange is that the difference between two | 177 // The entire point of CodeRange is that the difference between two |
179 // addresses in the range can be represented as a signed 32-bit int, | 178 // addresses in the range can be represented as a signed 32-bit int, |
180 // so the cast is semantically correct. | 179 // so the cast is semantically correct. |
181 return static_cast<int>(left->start - right->start); | 180 return static_cast<int>(left->start - right->start); |
182 } | 181 } |
183 | 182 |
184 | 183 |
185 void CodeRange::GetNextAllocationBlock(size_t requested) { | 184 void CodeRange::GetNextAllocationBlock(size_t requested) { |
186 for (current_allocation_block_index_++; | 185 CodeRangeData& data = v8_context()->code_range_data_; |
187 current_allocation_block_index_ < allocation_list_.length(); | 186 for (data.current_allocation_block_index_++; |
188 current_allocation_block_index_++) { | 187 data.current_allocation_block_index_ < data.allocation_list_.length(); |
189 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 188 data.current_allocation_block_index_++) { |
| 189 if (requested <= |
| 190 data.allocation_list_[data.current_allocation_block_index_].size) { |
190 return; // Found a large enough allocation block. | 191 return; // Found a large enough allocation block. |
191 } | 192 } |
192 } | 193 } |
193 | 194 |
194 // Sort and merge the free blocks on the free list and the allocation list. | 195 // Sort and merge the free blocks on the free list and the allocation list. |
195 free_list_.AddAll(allocation_list_); | 196 data.free_list_.AddAll(data.allocation_list_); |
196 allocation_list_.Clear(); | 197 data.allocation_list_.Clear(); |
197 free_list_.Sort(&CompareFreeBlockAddress); | 198 data.free_list_.Sort(&CompareFreeBlockAddress); |
198 for (int i = 0; i < free_list_.length();) { | 199 for (int i = 0; i < data.free_list_.length();) { |
199 FreeBlock merged = free_list_[i]; | 200 CodeRangeData::FreeBlock merged = data.free_list_[i]; |
200 i++; | 201 i++; |
201 // Add adjacent free blocks to the current merged block. | 202 // Add adjacent free blocks to the current merged block. |
202 while (i < free_list_.length() && | 203 while (i < data.free_list_.length() && |
203 free_list_[i].start == merged.start + merged.size) { | 204 data.free_list_[i].start == merged.start + merged.size) { |
204 merged.size += free_list_[i].size; | 205 merged.size += data.free_list_[i].size; |
205 i++; | 206 i++; |
206 } | 207 } |
207 if (merged.size > 0) { | 208 if (merged.size > 0) { |
208 allocation_list_.Add(merged); | 209 data.allocation_list_.Add(merged); |
209 } | 210 } |
210 } | 211 } |
211 free_list_.Clear(); | 212 data.free_list_.Clear(); |
212 | 213 |
213 for (current_allocation_block_index_ = 0; | 214 for (data.current_allocation_block_index_ = 0; |
214 current_allocation_block_index_ < allocation_list_.length(); | 215 data.current_allocation_block_index_ < data.allocation_list_.length(); |
215 current_allocation_block_index_++) { | 216 data.current_allocation_block_index_++) { |
216 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 217 if (requested <= |
| 218 data.allocation_list_[data.current_allocation_block_index_].size) { |
217 return; // Found a large enough allocation block. | 219 return; // Found a large enough allocation block. |
218 } | 220 } |
219 } | 221 } |
220 | 222 |
221 // Code range is full or too fragmented. | 223 // Code range is full or too fragmented. |
222 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 224 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
223 } | 225 } |
224 | 226 |
225 | 227 |
226 | 228 |
227 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { | 229 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { |
228 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 230 CodeRangeData& data = v8_context()->code_range_data_; |
229 if (requested > allocation_list_[current_allocation_block_index_].size) { | 231 ASSERT(data.current_allocation_block_index_ < data.allocation_list_.length()); |
| 232 if (requested > |
| 233 data.allocation_list_[data.current_allocation_block_index_].size) { |
230 // Find an allocation block large enough. This function call may | 234 // Find an allocation block large enough. This function call may |
231 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 235 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
232 GetNextAllocationBlock(requested); | 236 GetNextAllocationBlock(requested); |
233 } | 237 } |
234 // Commit the requested memory at the start of the current allocation block. | 238 // Commit the requested memory at the start of the current allocation block. |
235 *allocated = RoundUp(requested, Page::kPageSize); | 239 *allocated = RoundUp(requested, Page::kPageSize); |
236 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 240 CodeRangeData::FreeBlock current = |
| 241 data.allocation_list_[data.current_allocation_block_index_]; |
237 if (*allocated >= current.size - Page::kPageSize) { | 242 if (*allocated >= current.size - Page::kPageSize) { |
238 // Don't leave a small free block, useless for a large object or chunk. | 243 // Don't leave a small free block, useless for a large object or chunk. |
239 *allocated = current.size; | 244 *allocated = current.size; |
240 } | 245 } |
241 ASSERT(*allocated <= current.size); | 246 ASSERT(*allocated <= current.size); |
242 if (!code_range_->Commit(current.start, *allocated, true)) { | 247 if (!data.code_range_->Commit(current.start, *allocated, true)) { |
243 *allocated = 0; | 248 *allocated = 0; |
244 return NULL; | 249 return NULL; |
245 } | 250 } |
246 allocation_list_[current_allocation_block_index_].start += *allocated; | 251 data.allocation_list_[data.current_allocation_block_index_].start += |
247 allocation_list_[current_allocation_block_index_].size -= *allocated; | 252 *allocated; |
| 253 data.allocation_list_[data.current_allocation_block_index_].size -= |
| 254 *allocated; |
248 if (*allocated == current.size) { | 255 if (*allocated == current.size) { |
249 GetNextAllocationBlock(0); // This block is used up, get the next one. | 256 GetNextAllocationBlock(0); // This block is used up, get the next one. |
250 } | 257 } |
251 return current.start; | 258 return current.start; |
252 } | 259 } |
253 | 260 |
254 | 261 |
255 void CodeRange::FreeRawMemory(void* address, size_t length) { | 262 void CodeRange::FreeRawMemory(void* address, size_t length) { |
256 free_list_.Add(FreeBlock(address, length)); | 263 CodeRangeData& data = v8_context()->code_range_data_; |
257 code_range_->Uncommit(address, length); | 264 data.free_list_.Add(CodeRangeData::FreeBlock(address, length)); |
| 265 data.code_range_->Uncommit(address, length); |
258 } | 266 } |
259 | 267 |
260 | 268 |
261 void CodeRange::TearDown() { | 269 void CodeRange::TearDown() { |
262 delete code_range_; // Frees all memory in the virtual memory range. | 270 CodeRangeData& data = v8_context()->code_range_data_; |
263 code_range_ = NULL; | 271 delete data.code_range_; // Frees all memory in the virtual memory range. |
264 free_list_.Free(); | 272 data.code_range_ = NULL; |
265 allocation_list_.Free(); | 273 data.free_list_.Free(); |
| 274 data.allocation_list_.Free(); |
266 } | 275 } |
267 | 276 |
268 | 277 |
269 // ----------------------------------------------------------------------------- | 278 // ----------------------------------------------------------------------------- |
270 // MemoryAllocator | 279 // MemoryAllocator |
271 // | 280 // |
272 int MemoryAllocator::capacity_ = 0; | |
273 int MemoryAllocator::size_ = 0; | |
274 | |
275 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | |
276 | 281 |
277 // 270 is an estimate based on the static default heap size of a pair of 256K | 282 // 270 is an estimate based on the static default heap size of a pair of 256K |
278 // semispaces and a 64M old generation. | 283 // semispaces and a 64M old generation. |
279 const int kEstimatedNumberOfChunks = 270; | 284 const int kEstimatedNumberOfChunks = 270; |
280 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( | |
281 kEstimatedNumberOfChunks); | |
282 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); | |
283 int MemoryAllocator::max_nof_chunks_ = 0; | |
284 int MemoryAllocator::top_ = 0; | |
285 | 285 |
| 286 MemoryAllocatorData::MemoryAllocatorData() |
| 287 :capacity_(0), size_(0), initial_chunk_(NULL), |
| 288 free_chunk_ids_(kEstimatedNumberOfChunks), max_nof_chunks_(0), top_(0) { |
| 289 } |
286 | 290 |
287 void MemoryAllocator::Push(int free_chunk_id) { | 291 void MemoryAllocator::Push(int free_chunk_id) { |
288 ASSERT(max_nof_chunks_ > 0); | 292 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
289 ASSERT(top_ < max_nof_chunks_); | 293 ASSERT(data.max_nof_chunks_ > 0); |
290 free_chunk_ids_[top_++] = free_chunk_id; | 294 ASSERT(data.top_ < data.max_nof_chunks_); |
| 295 data.free_chunk_ids_[data.top_++] = free_chunk_id; |
291 } | 296 } |
292 | 297 |
293 | 298 |
294 int MemoryAllocator::Pop() { | 299 int MemoryAllocator::Pop() { |
295 ASSERT(top_ > 0); | 300 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
296 return free_chunk_ids_[--top_]; | 301 ASSERT(data.top_ > 0); |
| 302 return data.free_chunk_ids_[--data.top_]; |
297 } | 303 } |
298 | 304 |
299 | 305 |
300 bool MemoryAllocator::Setup(int capacity) { | 306 bool MemoryAllocator::Setup(int capacity) { |
301 capacity_ = RoundUp(capacity, Page::kPageSize); | 307 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
| 308 data.capacity_ = RoundUp(capacity, Page::kPageSize); |
302 | 309 |
303 // Over-estimate the size of chunks_ array. It assumes the expansion of old | 310 // Over-estimate the size of chunks_ array. It assumes the expansion of old |
304 // space is always in the unit of a chunk (kChunkSize) except the last | 311 // space is always in the unit of a chunk (kChunkSize) except the last |
305 // expansion. | 312 // expansion. |
306 // | 313 // |
307 // Due to alignment, allocated space might be one page less than required | 314 // Due to alignment, allocated space might be one page less than required |
308 // number (kPagesPerChunk) of pages for old spaces. | 315 // number (kPagesPerChunk) of pages for old spaces. |
309 // | 316 // |
310 // Reserve two chunk ids for semispaces, one for map space, one for old | 317 // Reserve two chunk ids for semispaces, one for map space, one for old |
311 // space, and one for code space. | 318 // space, and one for code space. |
312 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; | 319 data.max_nof_chunks_ = (data.capacity_ / (kChunkSize - Page::kPageSize)) + 5; |
313 if (max_nof_chunks_ > kMaxNofChunks) return false; | 320 if (data.max_nof_chunks_ > kMaxNofChunks) return false; |
314 | 321 |
315 size_ = 0; | 322 data.size_ = 0; |
316 ChunkInfo info; // uninitialized element. | 323 MemoryAllocatorData::ChunkInfo info; // uninitialized element. |
317 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { | 324 for (int i = data.max_nof_chunks_ - 1; i >= 0; i--) { |
318 chunks_.Add(info); | 325 data.chunks_.Add(info); |
319 free_chunk_ids_.Add(i); | 326 data.free_chunk_ids_.Add(i); |
320 } | 327 } |
321 top_ = max_nof_chunks_; | 328 data.top_ = data.max_nof_chunks_; |
322 return true; | 329 return true; |
323 } | 330 } |
324 | 331 |
325 | 332 |
326 void MemoryAllocator::TearDown() { | 333 void MemoryAllocator::TearDown() { |
327 for (int i = 0; i < max_nof_chunks_; i++) { | 334 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
328 if (chunks_[i].address() != NULL) DeleteChunk(i); | 335 for (int i = 0; i < data.max_nof_chunks_; i++) { |
| 336 if (data.chunks_[i].address() != NULL) DeleteChunk(i); |
329 } | 337 } |
330 chunks_.Clear(); | 338 data.chunks_.Clear(); |
331 free_chunk_ids_.Clear(); | 339 data.free_chunk_ids_.Clear(); |
332 | 340 |
333 if (initial_chunk_ != NULL) { | 341 if (data.initial_chunk_ != NULL) { |
334 LOG(DeleteEvent("InitialChunk", initial_chunk_->address())); | 342 LOG(DeleteEvent("InitialChunk", data.initial_chunk_->address())); |
335 delete initial_chunk_; | 343 delete data.initial_chunk_; |
336 initial_chunk_ = NULL; | 344 data.initial_chunk_ = NULL; |
337 } | 345 } |
338 | 346 |
339 ASSERT(top_ == max_nof_chunks_); // all chunks are free | 347 ASSERT(data.top_ == data.max_nof_chunks_); // all chunks are free |
340 top_ = 0; | 348 data.top_ = 0; |
341 capacity_ = 0; | 349 data.capacity_ = 0; |
342 size_ = 0; | 350 data.size_ = 0; |
343 max_nof_chunks_ = 0; | 351 data.max_nof_chunks_ = 0; |
344 } | 352 } |
345 | 353 |
346 | 354 |
347 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 355 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
348 size_t* allocated, | 356 size_t* allocated, |
349 Executability executable) { | 357 Executability executable) { |
350 if (size_ + static_cast<int>(requested) > capacity_) return NULL; | 358 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
| 359 if (data.size_ + static_cast<int>(requested) > data.capacity_) return NULL; |
351 void* mem; | 360 void* mem; |
352 if (executable == EXECUTABLE && CodeRange::exists()) { | 361 if (executable == EXECUTABLE && CodeRange::exists()) { |
353 mem = CodeRange::AllocateRawMemory(requested, allocated); | 362 mem = CodeRange::AllocateRawMemory(requested, allocated); |
354 } else { | 363 } else { |
355 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); | 364 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); |
356 } | 365 } |
357 int alloced = static_cast<int>(*allocated); | 366 int alloced = static_cast<int>(*allocated); |
358 size_ += alloced; | 367 data.size_ += alloced; |
359 Counters::memory_allocated.Increment(alloced); | 368 INCREMENT_COUNTER(memory_allocated, alloced); |
360 return mem; | 369 return mem; |
361 } | 370 } |
362 | 371 |
363 | 372 |
364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { | 373 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { |
365 if (CodeRange::contains(static_cast<Address>(mem))) { | 374 if (CodeRange::contains(static_cast<Address>(mem))) { |
366 CodeRange::FreeRawMemory(mem, length); | 375 CodeRange::FreeRawMemory(mem, length); |
367 } else { | 376 } else { |
368 OS::Free(mem, length); | 377 OS::Free(mem, length); |
369 } | 378 } |
370 Counters::memory_allocated.Decrement(static_cast<int>(length)); | 379 DECREMENT_COUNTER(memory_allocated, static_cast<int>(length)); |
371 size_ -= static_cast<int>(length); | 380 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
372 ASSERT(size_ >= 0); | 381 data.size_ -= static_cast<int>(length); |
| 382 ASSERT(data.size_ >= 0); |
373 } | 383 } |
374 | 384 |
375 | 385 |
376 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | 386 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { |
377 ASSERT(initial_chunk_ == NULL); | 387 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
| 388 ASSERT(data.initial_chunk_ == NULL); |
378 | 389 |
379 initial_chunk_ = new VirtualMemory(requested); | 390 data.initial_chunk_ = new VirtualMemory(requested); |
380 CHECK(initial_chunk_ != NULL); | 391 CHECK(data.initial_chunk_ != NULL); |
381 if (!initial_chunk_->IsReserved()) { | 392 if (!data.initial_chunk_->IsReserved()) { |
382 delete initial_chunk_; | 393 delete data.initial_chunk_; |
383 initial_chunk_ = NULL; | 394 data.initial_chunk_ = NULL; |
384 return NULL; | 395 return NULL; |
385 } | 396 } |
386 | 397 |
387 // We are sure that we have mapped a block of requested addresses. | 398 // We are sure that we have mapped a block of requested addresses. |
388 ASSERT(initial_chunk_->size() == requested); | 399 ASSERT(data.initial_chunk_->size() == requested); |
389 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); | 400 LOG(NewEvent("InitialChunk", data.initial_chunk_->address(), requested)); |
390 size_ += static_cast<int>(requested); | 401 data.size_ += static_cast<int>(requested); |
391 return initial_chunk_->address(); | 402 return data.initial_chunk_->address(); |
392 } | 403 } |
393 | 404 |
394 | 405 |
395 static int PagesInChunk(Address start, size_t size) { | 406 static int PagesInChunk(Address start, size_t size) { |
396 // The first page starts on the first page-aligned address from start onward | 407 // The first page starts on the first page-aligned address from start onward |
397 // and the last page ends on the last page-aligned address before | 408 // and the last page ends on the last page-aligned address before |
398 // start+size. Page::kPageSize is a power of two so we can divide by | 409 // start+size. Page::kPageSize is a power of two so we can divide by |
399 // shifting. | 410 // shifting. |
400 return static_cast<int>((RoundDown(start + size, Page::kPageSize) | 411 return static_cast<int>((RoundDown(start + size, Page::kPageSize) |
401 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits); | 412 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits); |
402 } | 413 } |
403 | 414 |
404 | 415 |
405 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, | 416 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, |
406 PagedSpace* owner) { | 417 PagedSpace* owner) { |
407 if (requested_pages <= 0) return Page::FromAddress(NULL); | 418 if (requested_pages <= 0) return Page::FromAddress(NULL); |
408 size_t chunk_size = requested_pages * Page::kPageSize; | 419 size_t chunk_size = requested_pages * Page::kPageSize; |
409 | 420 |
| 421 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
410 // There is not enough space to guarantee the desired number pages can be | 422 // There is not enough space to guarantee the desired number pages can be |
411 // allocated. | 423 // allocated. |
412 if (size_ + static_cast<int>(chunk_size) > capacity_) { | 424 if (data.size_ + static_cast<int>(chunk_size) > data.capacity_) { |
413 // Request as many pages as we can. | 425 // Request as many pages as we can. |
414 chunk_size = capacity_ - size_; | 426 chunk_size = data.capacity_ - data.size_; |
415 requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits); | 427 requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits); |
416 | 428 |
417 if (requested_pages <= 0) return Page::FromAddress(NULL); | 429 if (requested_pages <= 0) return Page::FromAddress(NULL); |
418 } | 430 } |
419 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); | 431 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); |
420 if (chunk == NULL) return Page::FromAddress(NULL); | 432 if (chunk == NULL) return Page::FromAddress(NULL); |
421 LOG(NewEvent("PagedChunk", chunk, chunk_size)); | 433 LOG(NewEvent("PagedChunk", chunk, chunk_size)); |
422 | 434 |
423 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); | 435 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); |
424 if (*allocated_pages == 0) { | 436 if (*allocated_pages == 0) { |
425 FreeRawMemory(chunk, chunk_size); | 437 FreeRawMemory(chunk, chunk_size); |
426 LOG(DeleteEvent("PagedChunk", chunk)); | 438 LOG(DeleteEvent("PagedChunk", chunk)); |
427 return Page::FromAddress(NULL); | 439 return Page::FromAddress(NULL); |
428 } | 440 } |
429 | 441 |
430 int chunk_id = Pop(); | 442 int chunk_id = Pop(); |
431 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); | 443 data.chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); |
432 | 444 |
433 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); | 445 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); |
434 } | 446 } |
435 | 447 |
436 | 448 |
437 Page* MemoryAllocator::CommitPages(Address start, size_t size, | 449 Page* MemoryAllocator::CommitPages(Address start, size_t size, |
438 PagedSpace* owner, int* num_pages) { | 450 PagedSpace* owner, int* num_pages) { |
| 451 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
439 ASSERT(start != NULL); | 452 ASSERT(start != NULL); |
440 *num_pages = PagesInChunk(start, size); | 453 *num_pages = PagesInChunk(start, size); |
441 ASSERT(*num_pages > 0); | 454 ASSERT(*num_pages > 0); |
442 ASSERT(initial_chunk_ != NULL); | 455 ASSERT(data.initial_chunk_ != NULL); |
443 ASSERT(InInitialChunk(start)); | 456 ASSERT(InInitialChunk(start)); |
444 ASSERT(InInitialChunk(start + size - 1)); | 457 ASSERT(InInitialChunk(start + size - 1)); |
445 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 458 if (!data.initial_chunk_->Commit(start, size, owner->executable() |
| 459 == EXECUTABLE)) { |
446 return Page::FromAddress(NULL); | 460 return Page::FromAddress(NULL); |
447 } | 461 } |
448 Counters::memory_allocated.Increment(static_cast<int>(size)); | 462 INCREMENT_COUNTER(memory_allocated, static_cast<int>(size)); |
449 | 463 |
450 // So long as we correctly overestimated the number of chunks we should not | 464 // So long as we correctly overestimated the number of chunks we should not |
451 // run out of chunk ids. | 465 // run out of chunk ids. |
452 CHECK(!OutOfChunkIds()); | 466 CHECK(!OutOfChunkIds()); |
453 int chunk_id = Pop(); | 467 int chunk_id = Pop(); |
454 chunks_[chunk_id].init(start, size, owner); | 468 data.chunks_[chunk_id].init(start, size, owner); |
455 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 469 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
456 } | 470 } |
457 | 471 |
458 | 472 |
459 bool MemoryAllocator::CommitBlock(Address start, | 473 bool MemoryAllocator::CommitBlock(Address start, |
460 size_t size, | 474 size_t size, |
461 Executability executable) { | 475 Executability executable) { |
| 476 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
462 ASSERT(start != NULL); | 477 ASSERT(start != NULL); |
463 ASSERT(size > 0); | 478 ASSERT(size > 0); |
464 ASSERT(initial_chunk_ != NULL); | 479 ASSERT(data.initial_chunk_ != NULL); |
465 ASSERT(InInitialChunk(start)); | 480 ASSERT(InInitialChunk(start)); |
466 ASSERT(InInitialChunk(start + size - 1)); | 481 ASSERT(InInitialChunk(start + size - 1)); |
467 | 482 |
468 if (!initial_chunk_->Commit(start, size, executable)) return false; | 483 if (!data.initial_chunk_->Commit(start, size, executable)) return false; |
469 Counters::memory_allocated.Increment(static_cast<int>(size)); | 484 INCREMENT_COUNTER(memory_allocated, static_cast<int>(size)); |
470 return true; | 485 return true; |
471 } | 486 } |
472 | 487 |
473 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 488 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 489 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
474 ASSERT(start != NULL); | 490 ASSERT(start != NULL); |
475 ASSERT(size > 0); | 491 ASSERT(size > 0); |
476 ASSERT(initial_chunk_ != NULL); | 492 ASSERT(data.initial_chunk_ != NULL); |
477 ASSERT(InInitialChunk(start)); | 493 ASSERT(InInitialChunk(start)); |
478 ASSERT(InInitialChunk(start + size - 1)); | 494 ASSERT(InInitialChunk(start + size - 1)); |
479 | 495 |
480 if (!initial_chunk_->Uncommit(start, size)) return false; | 496 if (!data.initial_chunk_->Uncommit(start, size)) return false; |
481 Counters::memory_allocated.Decrement(static_cast<int>(size)); | 497 DECREMENT_COUNTER(memory_allocated, static_cast<int>(size)); |
482 return true; | 498 return true; |
483 } | 499 } |
484 | 500 |
485 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 501 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
486 PagedSpace* owner) { | 502 PagedSpace* owner) { |
487 ASSERT(IsValidChunk(chunk_id)); | 503 ASSERT(IsValidChunk(chunk_id)); |
488 ASSERT(pages_in_chunk > 0); | 504 ASSERT(pages_in_chunk > 0); |
489 | 505 |
490 Address chunk_start = chunks_[chunk_id].address(); | 506 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
| 507 Address chunk_start = data.chunks_[chunk_id].address(); |
491 | 508 |
492 Address low = RoundUp(chunk_start, Page::kPageSize); | 509 Address low = RoundUp(chunk_start, Page::kPageSize); |
493 | 510 |
494 #ifdef DEBUG | 511 #ifdef DEBUG |
495 size_t chunk_size = chunks_[chunk_id].size(); | 512 size_t chunk_size = data.chunks_[chunk_id].size(); |
496 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 513 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); |
497 ASSERT(pages_in_chunk <= | 514 ASSERT(pages_in_chunk <= |
498 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); | 515 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); |
499 #endif | 516 #endif |
500 | 517 |
501 Address page_addr = low; | 518 Address page_addr = low; |
502 for (int i = 0; i < pages_in_chunk; i++) { | 519 for (int i = 0; i < pages_in_chunk; i++) { |
503 Page* p = Page::FromAddress(page_addr); | 520 Page* p = Page::FromAddress(page_addr); |
504 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | 521 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
505 p->is_normal_page = 1; | 522 p->is_normal_page = 1; |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 | 557 |
541 // Free the current chunk. | 558 // Free the current chunk. |
542 DeleteChunk(chunk_id); | 559 DeleteChunk(chunk_id); |
543 } | 560 } |
544 | 561 |
545 return page_to_return; | 562 return page_to_return; |
546 } | 563 } |
547 | 564 |
548 | 565 |
549 void MemoryAllocator::DeleteChunk(int chunk_id) { | 566 void MemoryAllocator::DeleteChunk(int chunk_id) { |
| 567 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
550 ASSERT(IsValidChunk(chunk_id)); | 568 ASSERT(IsValidChunk(chunk_id)); |
551 | 569 |
552 ChunkInfo& c = chunks_[chunk_id]; | 570 MemoryAllocatorData::ChunkInfo& c = data.chunks_[chunk_id]; |
553 | 571 |
554 // We cannot free a chunk contained in the initial chunk because it was not | 572 // We cannot free a chunk contained in the initial chunk because it was not |
555 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 573 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
556 // memory. | 574 // memory. |
557 if (InInitialChunk(c.address())) { | 575 if (InInitialChunk(c.address())) { |
558 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 576 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
559 // is ignored here. | 577 // is ignored here. |
560 initial_chunk_->Uncommit(c.address(), c.size()); | 578 data.initial_chunk_->Uncommit(c.address(), c.size()); |
561 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); | 579 DECREMENT_COUNTER(memory_allocated, static_cast<int>(c.size())); |
562 } else { | 580 } else { |
563 LOG(DeleteEvent("PagedChunk", c.address())); | 581 LOG(DeleteEvent("PagedChunk", c.address())); |
564 FreeRawMemory(c.address(), c.size()); | 582 FreeRawMemory(c.address(), c.size()); |
565 } | 583 } |
566 c.init(NULL, 0, NULL); | 584 c.init(NULL, 0, NULL); |
567 Push(chunk_id); | 585 Push(chunk_id); |
568 } | 586 } |
569 | 587 |
570 | 588 |
571 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { | 589 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { |
572 int chunk_id = GetChunkId(p); | 590 int chunk_id = GetChunkId(p); |
573 ASSERT(IsValidChunk(chunk_id)); | 591 ASSERT(IsValidChunk(chunk_id)); |
574 | 592 |
575 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize); | 593 Address low = RoundUp( |
| 594 v8_context()->memory_allocator_data_.chunks_[chunk_id].address(), |
| 595 Page::kPageSize); |
576 return Page::FromAddress(low); | 596 return Page::FromAddress(low); |
577 } | 597 } |
578 | 598 |
579 | 599 |
580 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) { | 600 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) { |
581 int chunk_id = GetChunkId(p); | 601 int chunk_id = GetChunkId(p); |
582 ASSERT(IsValidChunk(chunk_id)); | 602 ASSERT(IsValidChunk(chunk_id)); |
583 | 603 |
584 Address chunk_start = chunks_[chunk_id].address(); | 604 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
585 size_t chunk_size = chunks_[chunk_id].size(); | 605 Address chunk_start = data.chunks_[chunk_id].address(); |
| 606 size_t chunk_size = data.chunks_[chunk_id].size(); |
586 | 607 |
587 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 608 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); |
588 ASSERT(chunk_start <= p->address() && p->address() < high); | 609 ASSERT(chunk_start <= p->address() && p->address() < high); |
589 | 610 |
590 return Page::FromAddress(high - Page::kPageSize); | 611 return Page::FromAddress(high - Page::kPageSize); |
591 } | 612 } |
592 | 613 |
593 | 614 |
594 #ifdef DEBUG | 615 #ifdef DEBUG |
595 void MemoryAllocator::ReportStatistics() { | 616 void MemoryAllocator::ReportStatistics() { |
596 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 617 MemoryAllocatorData& data = v8_context()->memory_allocator_data_; |
| 618 float pct = static_cast<float>(data.capacity_ - data.size_) / data.capacity_; |
597 PrintF(" capacity: %d, used: %d, available: %%%d\n\n", | 619 PrintF(" capacity: %d, used: %d, available: %%%d\n\n", |
598 capacity_, size_, static_cast<int>(pct*100)); | 620 data.capacity_, data.size_, static_cast<int>(pct*100)); |
599 } | 621 } |
600 #endif | 622 #endif |
601 | 623 |
602 | 624 |
603 // ----------------------------------------------------------------------------- | 625 // ----------------------------------------------------------------------------- |
604 // PagedSpace implementation | 626 // PagedSpace implementation |
605 | 627 |
606 PagedSpace::PagedSpace(int max_capacity, | 628 PagedSpace::PagedSpace(int max_capacity, |
607 AllocationSpace id, | 629 AllocationSpace id, |
608 Executability executable) | 630 Executability executable) |
(...skipping 2182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2791 reinterpret_cast<Object**>(object->address() | 2813 reinterpret_cast<Object**>(object->address() |
2792 + Page::kObjectAreaSize), | 2814 + Page::kObjectAreaSize), |
2793 allocation_top); | 2815 allocation_top); |
2794 PrintF("\n"); | 2816 PrintF("\n"); |
2795 } | 2817 } |
2796 } | 2818 } |
2797 } | 2819 } |
2798 #endif // DEBUG | 2820 #endif // DEBUG |
2799 | 2821 |
2800 } } // namespace v8::internal | 2822 } } // namespace v8::internal |
OLD | NEW |