OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
318 | 318 |
319 size_ = 0; | 319 size_ = 0; |
320 size_executable_ = 0; | 320 size_executable_ = 0; |
321 | 321 |
322 return true; | 322 return true; |
323 } | 323 } |
324 | 324 |
325 | 325 |
326 void MemoryAllocator::TearDown() { | 326 void MemoryAllocator::TearDown() { |
327 // Check that spaces were torn down before MemoryAllocator. | 327 // Check that spaces were torn down before MemoryAllocator. |
328 DCHECK(size_.Value() == 0); | 328 DCHECK(size_ == 0); |
329 // TODO(gc) this will be true again when we fix FreeMemory. | 329 // TODO(gc) this will be true again when we fix FreeMemory. |
330 // DCHECK(size_executable_ == 0); | 330 // DCHECK(size_executable_ == 0); |
331 capacity_ = 0; | 331 capacity_ = 0; |
332 capacity_executable_ = 0; | 332 capacity_executable_ = 0; |
333 } | 333 } |
334 | 334 |
335 | 335 |
336 bool MemoryAllocator::CommitMemory(Address base, size_t size, | 336 bool MemoryAllocator::CommitMemory(Address base, size_t size, |
337 Executability executable) { | 337 Executability executable) { |
338 if (!base::VirtualMemory::CommitRegion(base, size, | 338 if (!base::VirtualMemory::CommitRegion(base, size, |
339 executable == EXECUTABLE)) { | 339 executable == EXECUTABLE)) { |
340 return false; | 340 return false; |
341 } | 341 } |
342 UpdateAllocatedSpaceLimits(base, base + size); | 342 UpdateAllocatedSpaceLimits(base, base + size); |
343 return true; | 343 return true; |
344 } | 344 } |
345 | 345 |
346 | 346 |
347 void MemoryAllocator::FreeNewSpaceMemory(Address addr, | 347 void MemoryAllocator::FreeNewSpaceMemory(Address addr, |
348 base::VirtualMemory* reservation, | 348 base::VirtualMemory* reservation, |
349 Executability executable) { | 349 Executability executable) { |
350 LOG(isolate_, DeleteEvent("NewSpace", addr)); | 350 LOG(isolate_, DeleteEvent("NewSpace", addr)); |
351 | 351 |
352 DCHECK(reservation->IsReserved()); | 352 DCHECK(reservation->IsReserved()); |
353 const intptr_t size = static_cast<intptr_t>(reservation->size()); | 353 const size_t size = reservation->size(); |
354 DCHECK(size_.Value() >= size); | 354 DCHECK(size_ >= size); |
355 size_.Increment(-size); | 355 size_ -= size; |
356 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 356 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
357 FreeMemory(reservation, NOT_EXECUTABLE); | 357 FreeMemory(reservation, NOT_EXECUTABLE); |
358 } | 358 } |
359 | 359 |
360 | 360 |
361 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 361 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
362 Executability executable) { | 362 Executability executable) { |
363 // TODO(gc) make code_range part of memory allocator? | 363 // TODO(gc) make code_range part of memory allocator? |
364 // Code which is part of the code-range does not have its own VirtualMemory. | 364 // Code which is part of the code-range does not have its own VirtualMemory. |
365 DCHECK(isolate_->code_range() == NULL || | 365 DCHECK(isolate_->code_range() == NULL || |
(...skipping 22 matching lines...) Expand all Loading... |
388 DCHECK(result); | 388 DCHECK(result); |
389 } | 389 } |
390 } | 390 } |
391 | 391 |
392 | 392 |
393 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, | 393 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
394 base::VirtualMemory* controller) { | 394 base::VirtualMemory* controller) { |
395 base::VirtualMemory reservation(size, alignment); | 395 base::VirtualMemory reservation(size, alignment); |
396 | 396 |
397 if (!reservation.IsReserved()) return NULL; | 397 if (!reservation.IsReserved()) return NULL; |
398 size_.Increment(static_cast<intptr_t>(reservation.size())); | 398 size_ += reservation.size(); |
399 Address base = | 399 Address base = |
400 RoundUp(static_cast<Address>(reservation.address()), alignment); | 400 RoundUp(static_cast<Address>(reservation.address()), alignment); |
401 controller->TakeControl(&reservation); | 401 controller->TakeControl(&reservation); |
402 return base; | 402 return base; |
403 } | 403 } |
404 | 404 |
405 | 405 |
406 Address MemoryAllocator::AllocateAlignedMemory( | 406 Address MemoryAllocator::AllocateAlignedMemory( |
407 size_t reserve_size, size_t commit_size, size_t alignment, | 407 size_t reserve_size, size_t commit_size, size_t alignment, |
408 Executability executable, base::VirtualMemory* controller) { | 408 Executability executable, base::VirtualMemory* controller) { |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 chunk->size_ = size; | 486 chunk->size_ = size; |
487 chunk->area_start_ = area_start; | 487 chunk->area_start_ = area_start; |
488 chunk->area_end_ = area_end; | 488 chunk->area_end_ = area_end; |
489 chunk->flags_ = 0; | 489 chunk->flags_ = 0; |
490 chunk->set_owner(owner); | 490 chunk->set_owner(owner); |
491 chunk->InitializeReservedMemory(); | 491 chunk->InitializeReservedMemory(); |
492 chunk->slots_buffer_ = NULL; | 492 chunk->slots_buffer_ = NULL; |
493 chunk->skip_list_ = NULL; | 493 chunk->skip_list_ = NULL; |
494 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 494 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
495 chunk->progress_bar_ = 0; | 495 chunk->progress_bar_ = 0; |
496 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); | 496 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
497 chunk->set_parallel_sweeping(SWEEPING_DONE); | 497 chunk->set_parallel_sweeping(SWEEPING_DONE); |
498 chunk->mutex_ = NULL; | 498 chunk->mutex_ = NULL; |
499 chunk->available_in_small_free_list_ = 0; | 499 chunk->available_in_small_free_list_ = 0; |
500 chunk->available_in_medium_free_list_ = 0; | 500 chunk->available_in_medium_free_list_ = 0; |
501 chunk->available_in_large_free_list_ = 0; | 501 chunk->available_in_large_free_list_ = 0; |
502 chunk->available_in_huge_free_list_ = 0; | 502 chunk->available_in_huge_free_list_ = 0; |
503 chunk->non_available_small_blocks_ = 0; | 503 chunk->non_available_small_blocks_ = 0; |
504 chunk->ResetLiveBytes(); | 504 chunk->ResetLiveBytes(); |
505 Bitmap::Clear(chunk); | 505 Bitmap::Clear(chunk); |
506 chunk->initialize_scan_on_scavenge(false); | 506 chunk->initialize_scan_on_scavenge(false); |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
632 // | Reserved but not committed | | 632 // | Reserved but not committed | |
633 // +----------------------------+<- base + chunk_size | 633 // +----------------------------+<- base + chunk_size |
634 // | 634 // |
635 | 635 |
636 if (executable == EXECUTABLE) { | 636 if (executable == EXECUTABLE) { |
637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | 637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
638 base::OS::CommitPageSize()) + | 638 base::OS::CommitPageSize()) + |
639 CodePageGuardSize(); | 639 CodePageGuardSize(); |
640 | 640 |
641 // Check executable memory limit. | 641 // Check executable memory limit. |
642 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > | 642 if ((size_executable_ + chunk_size) > capacity_executable_) { |
643 capacity_executable_) { | |
644 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", | 643 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
645 "V8 Executable Allocation capacity exceeded")); | 644 "V8 Executable Allocation capacity exceeded")); |
646 return NULL; | 645 return NULL; |
647 } | 646 } |
648 | 647 |
649 // Size of header (not executable) plus area (executable). | 648 // Size of header (not executable) plus area (executable). |
650 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 649 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
651 base::OS::CommitPageSize()); | 650 base::OS::CommitPageSize()); |
652 // Allocate executable memory either from code range or from the | 651 // Allocate executable memory either from code range or from the |
653 // OS. | 652 // OS. |
654 #ifdef V8_TARGET_ARCH_MIPS64 | 653 #ifdef V8_TARGET_ARCH_MIPS64 |
655 // Use code range only for large object space on mips64 to keep address | 654 // Use code range only for large object space on mips64 to keep address |
656 // range within 256-MB memory region. | 655 // range within 256-MB memory region. |
657 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && | 656 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && |
658 reserve_area_size > CodePageAreaSize()) { | 657 reserve_area_size > CodePageAreaSize()) { |
659 #else | 658 #else |
660 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { | 659 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
661 #endif | 660 #endif |
662 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, | 661 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, |
663 &chunk_size); | 662 &chunk_size); |
664 DCHECK( | 663 DCHECK( |
665 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); | 664 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
666 if (base == NULL) return NULL; | 665 if (base == NULL) return NULL; |
667 size_.Increment(static_cast<intptr_t>(chunk_size)); | 666 size_ += chunk_size; |
668 // Update executable memory size. | 667 // Update executable memory size. |
669 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); | 668 size_executable_ += chunk_size; |
670 } else { | 669 } else { |
671 base = AllocateAlignedMemory(chunk_size, commit_size, | 670 base = AllocateAlignedMemory(chunk_size, commit_size, |
672 MemoryChunk::kAlignment, executable, | 671 MemoryChunk::kAlignment, executable, |
673 &reservation); | 672 &reservation); |
674 if (base == NULL) return NULL; | 673 if (base == NULL) return NULL; |
675 // Update executable memory size. | 674 // Update executable memory size. |
676 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); | 675 size_executable_ += reservation.size(); |
677 } | 676 } |
678 | 677 |
679 if (Heap::ShouldZapGarbage()) { | 678 if (Heap::ShouldZapGarbage()) { |
680 ZapBlock(base, CodePageGuardStartOffset()); | 679 ZapBlock(base, CodePageGuardStartOffset()); |
681 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | 680 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
682 } | 681 } |
683 | 682 |
684 area_start = base + CodePageAreaStartOffset(); | 683 area_start = base + CodePageAreaStartOffset(); |
685 area_end = area_start + commit_area_size; | 684 area_end = area_start + commit_area_size; |
686 } else { | 685 } else { |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
753 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 752 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
754 if (chunk->owner() != NULL) { | 753 if (chunk->owner() != NULL) { |
755 ObjectSpace space = | 754 ObjectSpace space = |
756 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 755 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
757 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 756 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
758 } | 757 } |
759 | 758 |
760 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 759 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
761 chunk->IsEvacuationCandidate()); | 760 chunk->IsEvacuationCandidate()); |
762 | 761 |
763 intptr_t size; | 762 size_t size; |
764 base::VirtualMemory* reservation = chunk->reserved_memory(); | 763 base::VirtualMemory* reservation = chunk->reserved_memory(); |
765 if (reservation->IsReserved()) { | 764 if (reservation->IsReserved()) { |
766 size = static_cast<intptr_t>(reservation->size()); | 765 size = reservation->size(); |
767 } else { | 766 } else { |
768 size = static_cast<intptr_t>(chunk->size()); | 767 size = chunk->size(); |
769 } | 768 } |
770 DCHECK(size_.Value() >= size); | 769 DCHECK(size_ >= size); |
771 size_.Increment(-size); | 770 size_ -= size; |
772 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 771 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
773 | 772 |
774 if (chunk->executable() == EXECUTABLE) { | 773 if (chunk->executable() == EXECUTABLE) { |
775 DCHECK(size_executable_.Value() >= size); | 774 DCHECK(size_executable_ >= size); |
776 size_executable_.Increment(-size); | 775 size_executable_ -= size; |
777 } | 776 } |
778 | 777 |
779 chunk->SetFlag(MemoryChunk::PRE_FREED); | 778 chunk->SetFlag(MemoryChunk::PRE_FREED); |
780 } | 779 } |
781 | 780 |
782 | 781 |
783 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { | 782 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
784 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 783 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
785 chunk->ReleaseAllocatedMemory(); | 784 chunk->ReleaseAllocatedMemory(); |
786 | 785 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 memory_allocation_callbacks_.Remove(i); | 865 memory_allocation_callbacks_.Remove(i); |
867 return; | 866 return; |
868 } | 867 } |
869 } | 868 } |
870 UNREACHABLE(); | 869 UNREACHABLE(); |
871 } | 870 } |
872 | 871 |
873 | 872 |
874 #ifdef DEBUG | 873 #ifdef DEBUG |
875 void MemoryAllocator::ReportStatistics() { | 874 void MemoryAllocator::ReportStatistics() { |
876 intptr_t size = Size(); | 875 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
877 float pct = static_cast<float>(capacity_ - size) / capacity_; | |
878 PrintF(" capacity: %" V8_PTR_PREFIX | 876 PrintF(" capacity: %" V8_PTR_PREFIX |
879 "d" | 877 "d" |
880 ", used: %" V8_PTR_PREFIX | 878 ", used: %" V8_PTR_PREFIX |
881 "d" | 879 "d" |
882 ", available: %%%d\n\n", | 880 ", available: %%%d\n\n", |
883 capacity_, size, static_cast<int>(pct * 100)); | 881 capacity_, size_, static_cast<int>(pct * 100)); |
884 } | 882 } |
885 #endif | 883 #endif |
886 | 884 |
887 | 885 |
888 int MemoryAllocator::CodePageGuardStartOffset() { | 886 int MemoryAllocator::CodePageGuardStartOffset() { |
889 // We are guarding code pages: the first OS page after the header | 887 // We are guarding code pages: the first OS page after the header |
890 // will be protected as non-writable. | 888 // will be protected as non-writable. |
891 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | 889 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
892 } | 890 } |
893 | 891 |
(...skipping 2293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3187 object->ShortPrint(); | 3185 object->ShortPrint(); |
3188 PrintF("\n"); | 3186 PrintF("\n"); |
3189 } | 3187 } |
3190 printf(" --------------------------------------\n"); | 3188 printf(" --------------------------------------\n"); |
3191 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3189 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3192 } | 3190 } |
3193 | 3191 |
3194 #endif // DEBUG | 3192 #endif // DEBUG |
3195 } // namespace internal | 3193 } // namespace internal |
3196 } // namespace v8 | 3194 } // namespace v8 |
OLD | NEW |