| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
| (...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 439 USE(result); | 439 USE(result); |
| 440 DCHECK(result); | 440 DCHECK(result); |
| 441 } | 441 } |
| 442 } | 442 } |
| 443 | 443 |
| 444 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, | 444 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
| 445 base::VirtualMemory* controller) { | 445 base::VirtualMemory* controller) { |
| 446 base::VirtualMemory reservation(size, alignment); | 446 base::VirtualMemory reservation(size, alignment); |
| 447 | 447 |
| 448 if (!reservation.IsReserved()) return NULL; | 448 if (!reservation.IsReserved()) return NULL; |
| 449 size_.Increment(static_cast<intptr_t>(reservation.size())); | 449 size_.Increment(reservation.size()); |
| 450 Address base = | 450 Address base = |
| 451 RoundUp(static_cast<Address>(reservation.address()), alignment); | 451 RoundUp(static_cast<Address>(reservation.address()), alignment); |
| 452 controller->TakeControl(&reservation); | 452 controller->TakeControl(&reservation); |
| 453 return base; | 453 return base; |
| 454 } | 454 } |
| 455 | 455 |
| 456 Address MemoryAllocator::AllocateAlignedMemory( | 456 Address MemoryAllocator::AllocateAlignedMemory( |
| 457 size_t reserve_size, size_t commit_size, size_t alignment, | 457 size_t reserve_size, size_t commit_size, size_t alignment, |
| 458 Executability executable, base::VirtualMemory* controller) { | 458 Executability executable, base::VirtualMemory* controller) { |
| 459 DCHECK(commit_size <= reserve_size); | 459 DCHECK(commit_size <= reserve_size); |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 674 // | Reserved but not committed | | 674 // | Reserved but not committed | |
| 675 // +----------------------------+<- base + chunk_size | 675 // +----------------------------+<- base + chunk_size |
| 676 // | 676 // |
| 677 | 677 |
| 678 if (executable == EXECUTABLE) { | 678 if (executable == EXECUTABLE) { |
| 679 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | 679 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
| 680 base::OS::CommitPageSize()) + | 680 base::OS::CommitPageSize()) + |
| 681 CodePageGuardSize(); | 681 CodePageGuardSize(); |
| 682 | 682 |
| 683 // Check executable memory limit. | 683 // Check executable memory limit. |
| 684 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > | 684 if ((size_executable_.Value() + chunk_size) > capacity_executable_) { |
| 685 capacity_executable_) { | |
| 686 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", | 685 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
| 687 "V8 Executable Allocation capacity exceeded")); | 686 "V8 Executable Allocation capacity exceeded")); |
| 688 return NULL; | 687 return NULL; |
| 689 } | 688 } |
| 690 | 689 |
| 691 // Size of header (not executable) plus area (executable). | 690 // Size of header (not executable) plus area (executable). |
| 692 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 691 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
| 693 base::OS::CommitPageSize()); | 692 base::OS::CommitPageSize()); |
| 694 // Allocate executable memory either from code range or from the | 693 // Allocate executable memory either from code range or from the |
| 695 // OS. | 694 // OS. |
| 696 #ifdef V8_TARGET_ARCH_MIPS64 | 695 #ifdef V8_TARGET_ARCH_MIPS64 |
| 697 // Use code range only for large object space on mips64 to keep address | 696 // Use code range only for large object space on mips64 to keep address |
| 698 // range within 256-MB memory region. | 697 // range within 256-MB memory region. |
| 699 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { | 698 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { |
| 700 #else | 699 #else |
| 701 if (code_range()->valid()) { | 700 if (code_range()->valid()) { |
| 702 #endif | 701 #endif |
| 703 base = | 702 base = |
| 704 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size); | 703 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size); |
| 705 DCHECK( | 704 DCHECK( |
| 706 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); | 705 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
| 707 if (base == NULL) return NULL; | 706 if (base == NULL) return NULL; |
| 708 size_.Increment(static_cast<intptr_t>(chunk_size)); | 707 size_.Increment(chunk_size); |
| 709 // Update executable memory size. | 708 // Update executable memory size. |
| 710 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); | 709 size_executable_.Increment(chunk_size); |
| 711 } else { | 710 } else { |
| 712 base = AllocateAlignedMemory(chunk_size, commit_size, | 711 base = AllocateAlignedMemory(chunk_size, commit_size, |
| 713 MemoryChunk::kAlignment, executable, | 712 MemoryChunk::kAlignment, executable, |
| 714 &reservation); | 713 &reservation); |
| 715 if (base == NULL) return NULL; | 714 if (base == NULL) return NULL; |
| 716 // Update executable memory size. | 715 // Update executable memory size. |
| 717 size_executable_.Increment(static_cast<intptr_t>(reservation.size())); | 716 size_executable_.Increment(reservation.size()); |
| 718 } | 717 } |
| 719 | 718 |
| 720 if (Heap::ShouldZapGarbage()) { | 719 if (Heap::ShouldZapGarbage()) { |
| 721 ZapBlock(base, CodePageGuardStartOffset()); | 720 ZapBlock(base, CodePageGuardStartOffset()); |
| 722 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | 721 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
| 723 } | 722 } |
| 724 | 723 |
| 725 area_start = base + CodePageAreaStartOffset(); | 724 area_start = base + CodePageAreaStartOffset(); |
| 726 area_end = area_start + commit_area_size; | 725 area_end = area_start + commit_area_size; |
| 727 } else { | 726 } else { |
| (...skipping 24 matching lines...) Expand all Loading... |
| 752 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 751 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 753 | 752 |
| 754 // We cannot use the last chunk in the address space because we would | 753 // We cannot use the last chunk in the address space because we would |
| 755 // overflow when comparing top and limit if this chunk is used for a | 754 // overflow when comparing top and limit if this chunk is used for a |
| 756 // linear allocation area. | 755 // linear allocation area. |
| 757 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) { | 756 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) { |
| 758 CHECK(!last_chunk_.IsReserved()); | 757 CHECK(!last_chunk_.IsReserved()); |
| 759 last_chunk_.TakeControl(&reservation); | 758 last_chunk_.TakeControl(&reservation); |
| 760 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()), | 759 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()), |
| 761 last_chunk_.size()); | 760 last_chunk_.size()); |
| 762 size_.Increment(-static_cast<intptr_t>(chunk_size)); | 761 size_.Decrement(chunk_size); |
| 763 if (executable == EXECUTABLE) { | 762 if (executable == EXECUTABLE) { |
| 764 size_executable_.Increment(-static_cast<intptr_t>(chunk_size)); | 763 size_executable_.Decrement(chunk_size); |
| 765 } | 764 } |
| 766 CHECK(last_chunk_.IsReserved()); | 765 CHECK(last_chunk_.IsReserved()); |
| 767 return AllocateChunk(reserve_area_size, commit_area_size, executable, | 766 return AllocateChunk(reserve_area_size, commit_area_size, executable, |
| 768 owner); | 767 owner); |
| 769 } | 768 } |
| 770 | 769 |
| 771 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, | 770 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end, |
| 772 executable, owner, &reservation); | 771 executable, owner, &reservation); |
| 773 } | 772 } |
| 774 | 773 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 830 // We do not allow partial shrink for code. | 829 // We do not allow partial shrink for code. |
| 831 DCHECK(chunk->executable() == NOT_EXECUTABLE); | 830 DCHECK(chunk->executable() == NOT_EXECUTABLE); |
| 832 | 831 |
| 833 intptr_t size; | 832 intptr_t size; |
| 834 base::VirtualMemory* reservation = chunk->reserved_memory(); | 833 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 835 DCHECK(reservation->IsReserved()); | 834 DCHECK(reservation->IsReserved()); |
| 836 size = static_cast<intptr_t>(reservation->size()); | 835 size = static_cast<intptr_t>(reservation->size()); |
| 837 | 836 |
| 838 size_t to_free_size = size - (start_free - chunk->address()); | 837 size_t to_free_size = size - (start_free - chunk->address()); |
| 839 | 838 |
| 840 DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); | 839 DCHECK(size_.Value() >= to_free_size); |
| 841 size_.Increment(-static_cast<intptr_t>(to_free_size)); | 840 size_.Decrement(to_free_size); |
| 842 isolate_->counters()->memory_allocated()->Decrement( | 841 isolate_->counters()->memory_allocated()->Decrement( |
| 843 static_cast<int>(to_free_size)); | 842 static_cast<int>(to_free_size)); |
| 844 chunk->set_size(size - to_free_size); | 843 chunk->set_size(size - to_free_size); |
| 845 | 844 |
| 846 reservation->ReleasePartial(start_free); | 845 reservation->ReleasePartial(start_free); |
| 847 } | 846 } |
| 848 | 847 |
| 849 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { | 848 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
| 850 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 849 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| 851 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 850 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 852 | 851 |
| 853 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 852 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 854 chunk->IsEvacuationCandidate()); | 853 chunk->IsEvacuationCandidate()); |
| 855 | 854 |
| 856 intptr_t size; | |
| 857 base::VirtualMemory* reservation = chunk->reserved_memory(); | 855 base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 858 if (reservation->IsReserved()) { | 856 const size_t size = |
| 859 size = static_cast<intptr_t>(reservation->size()); | 857 reservation->IsReserved() ? reservation->size() : chunk->size(); |
| 860 } else { | 858 DCHECK_GE(size_.Value(), static_cast<size_t>(size)); |
| 861 size = static_cast<intptr_t>(chunk->size()); | 859 size_.Decrement(size); |
| 862 } | |
| 863 DCHECK(size_.Value() >= size); | |
| 864 size_.Increment(-size); | |
| 865 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 860 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 866 | |
| 867 if (chunk->executable() == EXECUTABLE) { | 861 if (chunk->executable() == EXECUTABLE) { |
| 868 DCHECK(size_executable_.Value() >= size); | 862 DCHECK_GE(size_executable_.Value(), size); |
| 869 size_executable_.Increment(-size); | 863 size_executable_.Decrement(size); |
| 870 } | 864 } |
| 871 | 865 |
| 872 chunk->SetFlag(MemoryChunk::PRE_FREED); | 866 chunk->SetFlag(MemoryChunk::PRE_FREED); |
| 873 } | 867 } |
| 874 | 868 |
| 875 | 869 |
| 876 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { | 870 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
| 877 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 871 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| 878 chunk->ReleaseAllocatedMemory(); | 872 chunk->ReleaseAllocatedMemory(); |
| 879 | 873 |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 992 | 986 |
| 993 | 987 |
| 994 void MemoryAllocator::ZapBlock(Address start, size_t size) { | 988 void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 995 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | 989 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 996 Memory::Address_at(start + s) = kZapValue; | 990 Memory::Address_at(start + s) = kZapValue; |
| 997 } | 991 } |
| 998 } | 992 } |
| 999 | 993 |
| 1000 #ifdef DEBUG | 994 #ifdef DEBUG |
| 1001 void MemoryAllocator::ReportStatistics() { | 995 void MemoryAllocator::ReportStatistics() { |
| 1002 intptr_t size = Size(); | 996 size_t size = Size(); |
| 1003 float pct = static_cast<float>(capacity_ - size) / capacity_; | 997 float pct = static_cast<float>(capacity_ - size) / capacity_; |
| 1004 PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR | 998 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n", |
| 1005 ", available: %%%d\n\n", | |
| 1006 capacity_, size, static_cast<int>(pct * 100)); | 999 capacity_, size, static_cast<int>(pct * 100)); |
| 1007 } | 1000 } |
| 1008 #endif | 1001 #endif |
| 1009 | 1002 |
| 1010 | 1003 |
| 1011 int MemoryAllocator::CodePageGuardStartOffset() { | 1004 int MemoryAllocator::CodePageGuardStartOffset() { |
| 1012 // We are guarding code pages: the first OS page after the header | 1005 // We are guarding code pages: the first OS page after the header |
| 1013 // will be protected as non-writable. | 1006 // will be protected as non-writable. |
| 1014 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | 1007 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
| 1015 } | 1008 } |
| (...skipping 2223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3239 object->ShortPrint(); | 3232 object->ShortPrint(); |
| 3240 PrintF("\n"); | 3233 PrintF("\n"); |
| 3241 } | 3234 } |
| 3242 printf(" --------------------------------------\n"); | 3235 printf(" --------------------------------------\n"); |
| 3243 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3236 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3244 } | 3237 } |
| 3245 | 3238 |
| 3246 #endif // DEBUG | 3239 #endif // DEBUG |
| 3247 } // namespace internal | 3240 } // namespace internal |
| 3248 } // namespace v8 | 3241 } // namespace v8 |
| OLD | NEW |