OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
267 } | 267 } |
268 | 268 |
269 | 269 |
270 // ----------------------------------------------------------------------------- | 270 // ----------------------------------------------------------------------------- |
271 // MemoryAllocator | 271 // MemoryAllocator |
272 // | 272 // |
273 int MemoryAllocator::capacity_ = 0; | 273 int MemoryAllocator::capacity_ = 0; |
274 int MemoryAllocator::size_ = 0; | 274 int MemoryAllocator::size_ = 0; |
275 int MemoryAllocator::size_executable_ = 0; | 275 int MemoryAllocator::size_executable_ = 0; |
276 | 276 |
| 277 List<MemoryAllocator::MemoryAllocationCallbackRegistration> |
| 278 MemoryAllocator::memory_allocation_callbacks_; |
| 279 |
277 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | 280 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; |
278 | 281 |
279 // 270 is an estimate based on the static default heap size of a pair of 256K | 282 // 270 is an estimate based on the static default heap size of a pair of 256K |
280 // semispaces and a 64M old generation. | 283 // semispaces and a 64M old generation. |
281 const int kEstimatedNumberOfChunks = 270; | 284 const int kEstimatedNumberOfChunks = 270; |
282 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( | 285 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( |
283 kEstimatedNumberOfChunks); | 286 kEstimatedNumberOfChunks); |
284 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); | 287 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); |
285 int MemoryAllocator::max_nof_chunks_ = 0; | 288 int MemoryAllocator::max_nof_chunks_ = 0; |
286 int MemoryAllocator::top_ = 0; | 289 int MemoryAllocator::top_ = 0; |
287 | 290 |
288 | 291 |
289 void MemoryAllocator::Push(int free_chunk_id) { | 292 void MemoryAllocator::Push(int free_chunk_id) { |
290 ASSERT(max_nof_chunks_ > 0); | 293 ASSERT(max_nof_chunks_ > 0); |
291 ASSERT(top_ < max_nof_chunks_); | 294 ASSERT(top_ < max_nof_chunks_); |
292 free_chunk_ids_[top_++] = free_chunk_id; | 295 free_chunk_ids_[top_++] = free_chunk_id; |
293 } | 296 } |
294 | 297 |
295 | 298 |
296 int MemoryAllocator::Pop() { | 299 int MemoryAllocator::Pop() { |
297 ASSERT(top_ > 0); | 300 ASSERT(top_ > 0); |
298 return free_chunk_ids_[--top_]; | 301 return free_chunk_ids_[--top_]; |
299 } | 302 } |
300 | 303 |
301 | 304 |
302 void *executable_memory_histogram = NULL; | |
303 | |
304 bool MemoryAllocator::Setup(int capacity) { | 305 bool MemoryAllocator::Setup(int capacity) { |
305 capacity_ = RoundUp(capacity, Page::kPageSize); | 306 capacity_ = RoundUp(capacity, Page::kPageSize); |
306 | 307 |
307 // Over-estimate the size of chunks_ array. It assumes the expansion of old | 308 // Over-estimate the size of chunks_ array. It assumes the expansion of old |
308 // space is always in the unit of a chunk (kChunkSize) except the last | 309 // space is always in the unit of a chunk (kChunkSize) except the last |
309 // expansion. | 310 // expansion. |
310 // | 311 // |
311 // Due to alignment, allocated space might be one page less than required | 312 // Due to alignment, allocated space might be one page less than required |
312 // number (kPagesPerChunk) of pages for old spaces. | 313 // number (kPagesPerChunk) of pages for old spaces. |
313 // | 314 // |
314 // Reserve two chunk ids for semispaces, one for map space, one for old | 315 // Reserve two chunk ids for semispaces, one for map space, one for old |
315 // space, and one for code space. | 316 // space, and one for code space. |
316 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; | 317 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; |
317 if (max_nof_chunks_ > kMaxNofChunks) return false; | 318 if (max_nof_chunks_ > kMaxNofChunks) return false; |
318 | 319 |
319 size_ = 0; | 320 size_ = 0; |
320 size_executable_ = 0; | 321 size_executable_ = 0; |
321 executable_memory_histogram = | |
322 StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50); | |
323 ChunkInfo info; // uninitialized element. | 322 ChunkInfo info; // uninitialized element. |
324 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { | 323 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { |
325 chunks_.Add(info); | 324 chunks_.Add(info); |
326 free_chunk_ids_.Add(i); | 325 free_chunk_ids_.Add(i); |
327 } | 326 } |
328 top_ = max_nof_chunks_; | 327 top_ = max_nof_chunks_; |
329 return true; | 328 return true; |
330 } | 329 } |
331 | 330 |
332 | 331 |
(...skipping 26 matching lines...) Expand all Loading... |
359 } | 358 } |
360 void* mem; | 359 void* mem; |
361 if (executable == EXECUTABLE && CodeRange::exists()) { | 360 if (executable == EXECUTABLE && CodeRange::exists()) { |
362 mem = CodeRange::AllocateRawMemory(requested, allocated); | 361 mem = CodeRange::AllocateRawMemory(requested, allocated); |
363 } else { | 362 } else { |
364 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); | 363 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); |
365 } | 364 } |
366 int alloced = static_cast<int>(*allocated); | 365 int alloced = static_cast<int>(*allocated); |
367 size_ += alloced; | 366 size_ += alloced; |
368 | 367 |
369 if (executable == EXECUTABLE) { | 368 if (executable == EXECUTABLE) size_executable_ += alloced; |
370 size_executable_ += alloced; | |
371 static int size_executable_max_observed_ = 0; | |
372 if (size_executable_max_observed_ < size_executable_) { | |
373 size_executable_max_observed_ = size_executable_; | |
374 StatsTable::AddHistogramSample(executable_memory_histogram, | |
375 size_executable_); | |
376 } | |
377 } | |
378 #ifdef DEBUG | 369 #ifdef DEBUG |
379 ZapBlock(reinterpret_cast<Address>(mem), alloced); | 370 ZapBlock(reinterpret_cast<Address>(mem), alloced); |
380 #endif | 371 #endif |
381 Counters::memory_allocated.Increment(alloced); | 372 Counters::memory_allocated.Increment(alloced); |
382 return mem; | 373 return mem; |
383 } | 374 } |
384 | 375 |
385 | 376 |
386 void MemoryAllocator::FreeRawMemory(void* mem, | 377 void MemoryAllocator::FreeRawMemory(void* mem, |
387 size_t length, | 378 size_t length, |
388 Executability executable) { | 379 Executability executable) { |
389 #ifdef DEBUG | 380 #ifdef DEBUG |
390 ZapBlock(reinterpret_cast<Address>(mem), length); | 381 ZapBlock(reinterpret_cast<Address>(mem), length); |
391 #endif | 382 #endif |
392 if (CodeRange::contains(static_cast<Address>(mem))) { | 383 if (CodeRange::contains(static_cast<Address>(mem))) { |
393 CodeRange::FreeRawMemory(mem, length); | 384 CodeRange::FreeRawMemory(mem, length); |
394 } else { | 385 } else { |
395 OS::Free(mem, length); | 386 OS::Free(mem, length); |
396 } | 387 } |
397 Counters::memory_allocated.Decrement(static_cast<int>(length)); | 388 Counters::memory_allocated.Decrement(static_cast<int>(length)); |
398 size_ -= static_cast<int>(length); | 389 size_ -= static_cast<int>(length); |
399 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); | 390 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); |
| 391 |
400 ASSERT(size_ >= 0); | 392 ASSERT(size_ >= 0); |
401 } | 393 } |
402 | 394 |
403 | 395 |
| 396 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 397 AllocationAction action, |
| 398 int size) { |
| 399 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 400 MemoryAllocationCallbackRegistration registration = |
| 401 memory_allocation_callbacks_[i]; |
| 402 if ((registration.space & space) == space && |
| 403 (registration.action & action) == action) |
| 404 registration.callback(space, action, static_cast<int>(size)); |
| 405 } |
| 406 } |
| 407 |
| 408 |
| 409 bool MemoryAllocator::MemoryAllocationCallbackRegistered( |
| 410 MemoryAllocationCallback callback) { |
| 411 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 412 if (memory_allocation_callbacks_[i].callback == callback) return true; |
| 413 } |
| 414 return false; |
| 415 } |
| 416 |
| 417 |
| 418 void MemoryAllocator::AddMemoryAllocationCallback( |
| 419 MemoryAllocationCallback callback, |
| 420 ObjectSpace space, |
| 421 AllocationAction action) { |
| 422 ASSERT(callback != NULL); |
| 423 MemoryAllocationCallbackRegistration registration(callback, space, action); |
| 424 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
| 425 return memory_allocation_callbacks_.Add(registration); |
| 426 } |
| 427 |
| 428 |
| 429 void MemoryAllocator::RemoveMemoryAllocationCallback( |
| 430 MemoryAllocationCallback callback) { |
| 431 ASSERT(callback != NULL); |
| 432 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 433 if (memory_allocation_callbacks_[i].callback == callback) { |
| 434 memory_allocation_callbacks_.Remove(i); |
| 435 return; |
| 436 } |
| 437 } |
| 438 UNREACHABLE(); |
| 439 } |
| 440 |
404 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | 441 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { |
405 ASSERT(initial_chunk_ == NULL); | 442 ASSERT(initial_chunk_ == NULL); |
406 | 443 |
407 initial_chunk_ = new VirtualMemory(requested); | 444 initial_chunk_ = new VirtualMemory(requested); |
408 CHECK(initial_chunk_ != NULL); | 445 CHECK(initial_chunk_ != NULL); |
409 if (!initial_chunk_->IsReserved()) { | 446 if (!initial_chunk_->IsReserved()) { |
410 delete initial_chunk_; | 447 delete initial_chunk_; |
411 initial_chunk_ = NULL; | 448 initial_chunk_ = NULL; |
412 return NULL; | 449 return NULL; |
413 } | 450 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
451 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); | 488 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); |
452 if (*allocated_pages == 0) { | 489 if (*allocated_pages == 0) { |
453 FreeRawMemory(chunk, chunk_size, owner->executable()); | 490 FreeRawMemory(chunk, chunk_size, owner->executable()); |
454 LOG(DeleteEvent("PagedChunk", chunk)); | 491 LOG(DeleteEvent("PagedChunk", chunk)); |
455 return Page::FromAddress(NULL); | 492 return Page::FromAddress(NULL); |
456 } | 493 } |
457 | 494 |
458 int chunk_id = Pop(); | 495 int chunk_id = Pop(); |
459 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); | 496 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); |
460 | 497 |
| 498 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 499 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
461 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); | 500 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); |
462 } | 501 } |
463 | 502 |
464 | 503 |
465 Page* MemoryAllocator::CommitPages(Address start, size_t size, | 504 Page* MemoryAllocator::CommitPages(Address start, size_t size, |
466 PagedSpace* owner, int* num_pages) { | 505 PagedSpace* owner, int* num_pages) { |
467 ASSERT(start != NULL); | 506 ASSERT(start != NULL); |
468 *num_pages = PagesInChunk(start, size); | 507 *num_pages = PagesInChunk(start, size); |
469 ASSERT(*num_pages > 0); | 508 ASSERT(*num_pages > 0); |
470 ASSERT(initial_chunk_ != NULL); | 509 ASSERT(initial_chunk_ != NULL); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
609 // We cannot free a chunk contained in the initial chunk because it was not | 648 // We cannot free a chunk contained in the initial chunk because it was not |
610 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 649 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
611 // memory. | 650 // memory. |
612 if (InInitialChunk(c.address())) { | 651 if (InInitialChunk(c.address())) { |
613 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 652 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
614 // is ignored here. | 653 // is ignored here. |
615 initial_chunk_->Uncommit(c.address(), c.size()); | 654 initial_chunk_->Uncommit(c.address(), c.size()); |
616 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); | 655 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); |
617 } else { | 656 } else { |
618 LOG(DeleteEvent("PagedChunk", c.address())); | 657 LOG(DeleteEvent("PagedChunk", c.address())); |
| 658 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity()); |
| 659 int size = c.size(); |
619 FreeRawMemory(c.address(), c.size(), c.owner()->executable()); | 660 FreeRawMemory(c.address(), c.size(), c.owner()->executable()); |
| 661 PerformAllocationCallback(space, kAllocationActionFree, size); |
620 } | 662 } |
621 c.init(NULL, 0, NULL); | 663 c.init(NULL, 0, NULL); |
622 Push(chunk_id); | 664 Push(chunk_id); |
623 } | 665 } |
624 | 666 |
625 | 667 |
626 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { | 668 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { |
627 int chunk_id = GetChunkId(p); | 669 int chunk_id = GetChunkId(p); |
628 ASSERT(IsValidChunk(chunk_id)); | 670 ASSERT(IsValidChunk(chunk_id)); |
629 | 671 |
(...skipping 1977 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2607 void* mem = MemoryAllocator::AllocateRawMemory(requested, | 2649 void* mem = MemoryAllocator::AllocateRawMemory(requested, |
2608 chunk_size, | 2650 chunk_size, |
2609 executable); | 2651 executable); |
2610 if (mem == NULL) return NULL; | 2652 if (mem == NULL) return NULL; |
2611 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); | 2653 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); |
2612 if (*chunk_size < requested) { | 2654 if (*chunk_size < requested) { |
2613 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable); | 2655 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable); |
2614 LOG(DeleteEvent("LargeObjectChunk", mem)); | 2656 LOG(DeleteEvent("LargeObjectChunk", mem)); |
2615 return NULL; | 2657 return NULL; |
2616 } | 2658 } |
| 2659 ObjectSpace space = |
| 2660 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace; |
| 2661 MemoryAllocator::PerformAllocationCallback(space, |
| 2662 kAllocationActionAllocate, |
| 2663 *chunk_size); |
2617 return reinterpret_cast<LargeObjectChunk*>(mem); | 2664 return reinterpret_cast<LargeObjectChunk*>(mem); |
2618 } | 2665 } |
2619 | 2666 |
2620 | 2667 |
2621 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { | 2668 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { |
2622 int os_alignment = static_cast<int>(OS::AllocateAlignment()); | 2669 int os_alignment = static_cast<int>(OS::AllocateAlignment()); |
2623 if (os_alignment < Page::kPageSize) | 2670 if (os_alignment < Page::kPageSize) |
2624 size_in_bytes += (Page::kPageSize - os_alignment); | 2671 size_in_bytes += (Page::kPageSize - os_alignment); |
2625 return size_in_bytes + Page::kObjectStartOffset; | 2672 return size_in_bytes + Page::kObjectStartOffset; |
2626 } | 2673 } |
(...skipping 17 matching lines...) Expand all Loading... |
2644 | 2691 |
2645 | 2692 |
2646 void LargeObjectSpace::TearDown() { | 2693 void LargeObjectSpace::TearDown() { |
2647 while (first_chunk_ != NULL) { | 2694 while (first_chunk_ != NULL) { |
2648 LargeObjectChunk* chunk = first_chunk_; | 2695 LargeObjectChunk* chunk = first_chunk_; |
2649 first_chunk_ = first_chunk_->next(); | 2696 first_chunk_ = first_chunk_->next(); |
2650 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); | 2697 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); |
2651 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); | 2698 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
2652 Executability executable = | 2699 Executability executable = |
2653 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; | 2700 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; |
| 2701 ObjectSpace space = kObjectSpaceLoSpace; |
| 2702 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; |
| 2703 int size = chunk->size(); |
2654 MemoryAllocator::FreeRawMemory(chunk->address(), | 2704 MemoryAllocator::FreeRawMemory(chunk->address(), |
2655 chunk->size(), | 2705 chunk->size(), |
2656 executable); | 2706 executable); |
| 2707 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree, |
| 2708 size); |
2657 } | 2709 } |
2658 | 2710 |
2659 size_ = 0; | 2711 size_ = 0; |
2660 page_count_ = 0; | 2712 page_count_ = 0; |
2661 } | 2713 } |
2662 | 2714 |
2663 | 2715 |
2664 #ifdef ENABLE_HEAP_PROTECTION | 2716 #ifdef ENABLE_HEAP_PROTECTION |
2665 | 2717 |
2666 void LargeObjectSpace::Protect() { | 2718 void LargeObjectSpace::Protect() { |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2860 if (previous == NULL) { | 2912 if (previous == NULL) { |
2861 first_chunk_ = current; | 2913 first_chunk_ = current; |
2862 } else { | 2914 } else { |
2863 previous->set_next(current); | 2915 previous->set_next(current); |
2864 } | 2916 } |
2865 | 2917 |
2866 // Free the chunk. | 2918 // Free the chunk. |
2867 MarkCompactCollector::ReportDeleteIfNeeded(object); | 2919 MarkCompactCollector::ReportDeleteIfNeeded(object); |
2868 size_ -= static_cast<int>(chunk_size); | 2920 size_ -= static_cast<int>(chunk_size); |
2869 page_count_--; | 2921 page_count_--; |
| 2922 ObjectSpace space = kObjectSpaceLoSpace; |
| 2923 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; |
2870 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable); | 2924 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable); |
| 2925 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree, |
| 2926 size_); |
2871 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); | 2927 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); |
2872 } | 2928 } |
2873 } | 2929 } |
2874 } | 2930 } |
2875 | 2931 |
2876 | 2932 |
2877 bool LargeObjectSpace::Contains(HeapObject* object) { | 2933 bool LargeObjectSpace::Contains(HeapObject* object) { |
2878 Address address = object->address(); | 2934 Address address = object->address(); |
2879 if (Heap::new_space()->Contains(address)) { | 2935 if (Heap::new_space()->Contains(address)) { |
2880 return false; | 2936 return false; |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2976 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3032 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
2977 if (obj->IsCode()) { | 3033 if (obj->IsCode()) { |
2978 Code* code = Code::cast(obj); | 3034 Code* code = Code::cast(obj); |
2979 code_kind_statistics[code->kind()] += code->Size(); | 3035 code_kind_statistics[code->kind()] += code->Size(); |
2980 } | 3036 } |
2981 } | 3037 } |
2982 } | 3038 } |
2983 #endif // DEBUG | 3039 #endif // DEBUG |
2984 | 3040 |
2985 } } // namespace v8::internal | 3041 } } // namespace v8::internal |
OLD | NEW |