OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
11 #include "src/base/platform/semaphore.h" | 11 #include "src/base/platform/semaphore.h" |
12 #include "src/full-codegen/full-codegen.h" | 12 #include "src/full-codegen/full-codegen.h" |
13 #include "src/heap/array-buffer-tracker.h" | 13 #include "src/heap/array-buffer-tracker.h" |
14 #include "src/heap/slot-set.h" | 14 #include "src/heap/slot-set.h" |
15 #include "src/macro-assembler.h" | 15 #include "src/macro-assembler.h" |
16 #include "src/msan.h" | 16 #include "src/msan.h" |
17 #include "src/snapshot/snapshot.h" | 17 #include "src/snapshot/snapshot.h" |
18 #include "src/v8.h" | 18 #include "src/v8.h" |
19 | 19 |
20 namespace v8 { | 20 namespace v8 { |
21 namespace internal { | 21 namespace internal { |
22 | 22 |
23 | |
24 // ---------------------------------------------------------------------------- | 23 // ---------------------------------------------------------------------------- |
25 // HeapObjectIterator | 24 // HeapObjectIterator |
26 | 25 |
27 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) | 26 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) |
28 : cur_addr_(nullptr), | 27 : cur_addr_(nullptr), |
29 cur_end_(nullptr), | 28 cur_end_(nullptr), |
30 space_(space), | 29 space_(space), |
31 page_range_(space->anchor()->next_page(), space->anchor()), | 30 page_range_(space->anchor()->next_page(), space->anchor()), |
32 current_page_(page_range_.begin()) {} | 31 current_page_(page_range_.begin()) {} |
33 | 32 |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
100 } else { | 99 } else { |
101 return true; | 100 return true; |
102 } | 101 } |
103 } | 102 } |
104 | 103 |
105 if (requested <= kMinimumCodeRangeSize) { | 104 if (requested <= kMinimumCodeRangeSize) { |
106 requested = kMinimumCodeRangeSize; | 105 requested = kMinimumCodeRangeSize; |
107 } | 106 } |
108 | 107 |
109 const size_t reserved_area = | 108 const size_t reserved_area = |
110 kReservedCodeRangePages * base::OS::CommitPageSize(); | 109 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize(); |
111 if (requested < (kMaximalCodeRangeSize - reserved_area)) | 110 if (requested < (kMaximalCodeRangeSize - reserved_area)) |
112 requested += reserved_area; | 111 requested += reserved_area; |
113 | 112 |
114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); | 113 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); |
115 | 114 |
116 code_range_ = new base::VirtualMemory( | 115 code_range_ = new base::VirtualMemory( |
117 requested, Max(kCodeRangeAreaAlignment, | 116 requested, Max(kCodeRangeAreaAlignment, |
118 static_cast<size_t>(base::OS::AllocateAlignment()))); | 117 static_cast<size_t>(base::OS::AllocateAlignment()))); |
119 CHECK(code_range_ != NULL); | 118 CHECK(code_range_ != NULL); |
120 if (!code_range_->IsReserved()) { | 119 if (!code_range_->IsReserved()) { |
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
539 return chunk; | 538 return chunk; |
540 } | 539 } |
541 | 540 |
542 | 541 |
543 // Commit MemoryChunk area to the requested size. | 542 // Commit MemoryChunk area to the requested size. |
544 bool MemoryChunk::CommitArea(size_t requested) { | 543 bool MemoryChunk::CommitArea(size_t requested) { |
545 size_t guard_size = | 544 size_t guard_size = |
546 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; | 545 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; |
547 size_t header_size = area_start() - address() - guard_size; | 546 size_t header_size = area_start() - address() - guard_size; |
548 size_t commit_size = | 547 size_t commit_size = |
549 RoundUp(header_size + requested, base::OS::CommitPageSize()); | 548 RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize()); |
550 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), | 549 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
551 base::OS::CommitPageSize()); | 550 MemoryAllocator::GetCommitPageSize()); |
552 | 551 |
553 if (commit_size > committed_size) { | 552 if (commit_size > committed_size) { |
554 // Commit size should be less or equal than the reserved size. | 553 // Commit size should be less or equal than the reserved size. |
555 DCHECK(commit_size <= size() - 2 * guard_size); | 554 DCHECK(commit_size <= size() - 2 * guard_size); |
556 // Append the committed area. | 555 // Append the committed area. |
557 Address start = address() + committed_size + guard_size; | 556 Address start = address() + committed_size + guard_size; |
558 size_t length = commit_size - committed_size; | 557 size_t length = commit_size - committed_size; |
559 if (reservation_.IsReserved()) { | 558 if (reservation_.IsReserved()) { |
560 Executability executable = | 559 Executability executable = |
561 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 560 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
609 void MemoryChunk::Unlink() { | 608 void MemoryChunk::Unlink() { |
610 MemoryChunk* next_element = next_chunk(); | 609 MemoryChunk* next_element = next_chunk(); |
611 MemoryChunk* prev_element = prev_chunk(); | 610 MemoryChunk* prev_element = prev_chunk(); |
612 next_element->set_prev_chunk(prev_element); | 611 next_element->set_prev_chunk(prev_element); |
613 prev_element->set_next_chunk(next_element); | 612 prev_element->set_next_chunk(next_element); |
614 set_prev_chunk(NULL); | 613 set_prev_chunk(NULL); |
615 set_next_chunk(NULL); | 614 set_next_chunk(NULL); |
616 } | 615 } |
617 | 616 |
618 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { | 617 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
619 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); | 618 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize())); |
620 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); | 619 DCHECK_EQ(0, bytes_to_shrink % GetCommitPageSize()); |
621 Address free_start = chunk->area_end_ - bytes_to_shrink; | 620 Address free_start = chunk->area_end_ - bytes_to_shrink; |
622 // Don't adjust the size of the page. The area is just uncomitted but not | 621 // Don't adjust the size of the page. The area is just uncomitted but not |
623 // released. | 622 // released. |
624 chunk->area_end_ -= bytes_to_shrink; | 623 chunk->area_end_ -= bytes_to_shrink; |
625 UncommitBlock(free_start, bytes_to_shrink); | 624 UncommitBlock(free_start, bytes_to_shrink); |
626 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { | 625 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { |
627 if (chunk->reservation_.IsReserved()) | 626 if (chunk->reservation_.IsReserved()) |
628 chunk->reservation_.Guard(chunk->area_end_); | 627 chunk->reservation_.Guard(chunk->area_end_); |
629 else | 628 else |
630 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); | 629 base::OS::Guard(chunk->area_end_, GetCommitPageSize()); |
631 } | 630 } |
632 } | 631 } |
633 | 632 |
634 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, | 633 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, |
635 size_t commit_area_size, | 634 size_t commit_area_size, |
636 Executability executable, | 635 Executability executable, |
637 Space* owner) { | 636 Space* owner) { |
638 DCHECK_LE(commit_area_size, reserve_area_size); | 637 DCHECK_LE(commit_area_size, reserve_area_size); |
639 | 638 |
640 size_t chunk_size; | 639 size_t chunk_size; |
(...skipping 28 matching lines...) Expand all Loading... | |
669 // | Area | | 668 // | Area | |
670 // +----------------------------+<- area_end_ (area_start + commit_area_size) | 669 // +----------------------------+<- area_end_ (area_start + commit_area_size) |
671 // | Committed but not used | | 670 // | Committed but not used | |
672 // +----------------------------+<- aligned at OS page boundary | 671 // +----------------------------+<- aligned at OS page boundary |
673 // | Reserved but not committed | | 672 // | Reserved but not committed | |
674 // +----------------------------+<- base + chunk_size | 673 // +----------------------------+<- base + chunk_size |
675 // | 674 // |
676 | 675 |
677 if (executable == EXECUTABLE) { | 676 if (executable == EXECUTABLE) { |
678 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | 677 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
679 base::OS::CommitPageSize()) + | 678 GetCommitPageSize()) + |
680 CodePageGuardSize(); | 679 CodePageGuardSize(); |
681 | 680 |
682 // Check executable memory limit. | 681 // Check executable memory limit. |
683 if ((size_executable_.Value() + chunk_size) > capacity_executable_) { | 682 if ((size_executable_.Value() + chunk_size) > capacity_executable_) { |
684 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", | 683 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
685 "V8 Executable Allocation capacity exceeded")); | 684 "V8 Executable Allocation capacity exceeded")); |
686 return NULL; | 685 return NULL; |
687 } | 686 } |
688 | 687 |
689 // Size of header (not executable) plus area (executable). | 688 // Size of header (not executable) plus area (executable). |
690 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 689 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
691 base::OS::CommitPageSize()); | 690 GetCommitPageSize()); |
692 // Allocate executable memory either from code range or from the | 691 // Allocate executable memory either from code range or from the |
693 // OS. | 692 // OS. |
694 #ifdef V8_TARGET_ARCH_MIPS64 | 693 #ifdef V8_TARGET_ARCH_MIPS64 |
695 // Use code range only for large object space on mips64 to keep address | 694 // Use code range only for large object space on mips64 to keep address |
696 // range within 256-MB memory region. | 695 // range within 256-MB memory region. |
697 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { | 696 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { |
698 #else | 697 #else |
699 if (code_range()->valid()) { | 698 if (code_range()->valid()) { |
700 #endif | 699 #endif |
701 base = | 700 base = |
(...skipping 15 matching lines...) Expand all Loading... | |
717 | 716 |
718 if (Heap::ShouldZapGarbage()) { | 717 if (Heap::ShouldZapGarbage()) { |
719 ZapBlock(base, CodePageGuardStartOffset()); | 718 ZapBlock(base, CodePageGuardStartOffset()); |
720 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | 719 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
721 } | 720 } |
722 | 721 |
723 area_start = base + CodePageAreaStartOffset(); | 722 area_start = base + CodePageAreaStartOffset(); |
724 area_end = area_start + commit_area_size; | 723 area_end = area_start + commit_area_size; |
725 } else { | 724 } else { |
726 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, | 725 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, |
727 base::OS::CommitPageSize()); | 726 GetCommitPageSize()); |
728 size_t commit_size = | 727 size_t commit_size = |
729 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, | 728 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, |
730 base::OS::CommitPageSize()); | 729 GetCommitPageSize()); |
731 base = | 730 base = |
732 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, | 731 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
733 executable, &reservation); | 732 executable, &reservation); |
734 | 733 |
735 if (base == NULL) return NULL; | 734 if (base == NULL) return NULL; |
736 | 735 |
737 if (Heap::ShouldZapGarbage()) { | 736 if (Heap::ShouldZapGarbage()) { |
738 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); | 737 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); |
739 } | 738 } |
740 | 739 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
797 // iterate even further. | 796 // iterate even further. |
798 while ((filler2->address() + filler2->Size()) != area_end()) { | 797 while ((filler2->address() + filler2->Size()) != area_end()) { |
799 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size()); | 798 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size()); |
800 DCHECK(filler2->IsFiller()); | 799 DCHECK(filler2->IsFiller()); |
801 } | 800 } |
802 DCHECK_EQ(filler->address(), filler2->address()); | 801 DCHECK_EQ(filler->address(), filler2->address()); |
803 #endif // DEBUG | 802 #endif // DEBUG |
804 | 803 |
805 size_t unused = RoundDown( | 804 size_t unused = RoundDown( |
806 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), | 805 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), |
807 base::OS::CommitPageSize()); | 806 MemoryAllocator::GetCommitPageSize()); |
808 if (unused > 0) { | 807 if (unused > 0) { |
809 if (FLAG_trace_gc_verbose) { | 808 if (FLAG_trace_gc_verbose) { |
810 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", | 809 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", |
811 reinterpret_cast<void*>(this), | 810 reinterpret_cast<void*>(this), |
812 reinterpret_cast<void*>(area_end()), | 811 reinterpret_cast<void*>(area_end()), |
813 reinterpret_cast<void*>(area_end() - unused)); | 812 reinterpret_cast<void*>(area_end() - unused)); |
814 } | 813 } |
815 heap()->CreateFillerObjectAt( | 814 heap()->CreateFillerObjectAt( |
816 filler->address(), | 815 filler->address(), |
817 static_cast<int>(area_end() - filler->address() - unused), | 816 static_cast<int>(area_end() - filler->address() - unused), |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
995 size_t size = Size(); | 994 size_t size = Size(); |
996 float pct = static_cast<float>(capacity_ - size) / capacity_; | 995 float pct = static_cast<float>(capacity_ - size) / capacity_; |
997 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n", | 996 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n", |
998 capacity_, size, static_cast<int>(pct * 100)); | 997 capacity_, size, static_cast<int>(pct * 100)); |
999 } | 998 } |
1000 #endif | 999 #endif |
1001 | 1000 |
1002 size_t MemoryAllocator::CodePageGuardStartOffset() { | 1001 size_t MemoryAllocator::CodePageGuardStartOffset() { |
1003 // We are guarding code pages: the first OS page after the header | 1002 // We are guarding code pages: the first OS page after the header |
1004 // will be protected as non-writable. | 1003 // will be protected as non-writable. |
1005 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | 1004 return RoundUp(Page::kObjectStartOffset, GetCommitPageSize()); |
1006 } | 1005 } |
1007 | 1006 |
1008 size_t MemoryAllocator::CodePageGuardSize() { | 1007 size_t MemoryAllocator::CodePageGuardSize() { |
1009 return static_cast<int>(base::OS::CommitPageSize()); | 1008 return static_cast<int>(GetCommitPageSize()); |
1010 } | 1009 } |
1011 | 1010 |
1012 size_t MemoryAllocator::CodePageAreaStartOffset() { | 1011 size_t MemoryAllocator::CodePageAreaStartOffset() { |
1013 // We are guarding code pages: the first OS page after the header | 1012 // We are guarding code pages: the first OS page after the header |
1014 // will be protected as non-writable. | 1013 // will be protected as non-writable. |
1015 return CodePageGuardStartOffset() + CodePageGuardSize(); | 1014 return CodePageGuardStartOffset() + CodePageGuardSize(); |
1016 } | 1015 } |
1017 | 1016 |
1018 size_t MemoryAllocator::CodePageAreaEndOffset() { | 1017 size_t MemoryAllocator::CodePageAreaEndOffset() { |
1019 // We are guarding code pages: the last OS page will be protected as | 1018 // We are guarding code pages: the last OS page will be protected as |
1020 // non-writable. | 1019 // non-writable. |
1021 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); | 1020 return Page::kPageSize - static_cast<int>(GetCommitPageSize()); |
1021 } | |
1022 | |
1023 intptr_t MemoryAllocator::GetCommitPageSize() { | |
1024 if (FLAG_v8_os_page_size != 0) | |
Michael Lippautz
2016/11/14 16:04:08
DCHECK(IsPowerOfTwo32...)
| |
1025 return FLAG_v8_os_page_size; | |
Michael Lippautz
2016/11/14 16:04:08
FLAG_v8_os_page_size * KB;
| |
1026 else | |
1027 return base::OS::CommitPageSize(); | |
1022 } | 1028 } |
1023 | 1029 |
1024 | 1030 |
1025 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, | 1031 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, |
1026 Address start, size_t commit_size, | 1032 Address start, size_t commit_size, |
1027 size_t reserved_size) { | 1033 size_t reserved_size) { |
1028 // Commit page header (not executable). | 1034 // Commit page header (not executable). |
1029 Address header = start; | 1035 Address header = start; |
1030 size_t header_size = CodePageGuardStartOffset(); | 1036 size_t header_size = CodePageGuardStartOffset(); |
1031 if (vm->Commit(header, header_size, false)) { | 1037 if (vm->Commit(header, header_size, false)) { |
(...skipping 1853 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2885 #ifdef VERIFY_HEAP | 2891 #ifdef VERIFY_HEAP |
2886 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } | 2892 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
2887 #endif | 2893 #endif |
2888 | 2894 |
2889 Address LargePage::GetAddressToShrink() { | 2895 Address LargePage::GetAddressToShrink() { |
2890 HeapObject* object = GetObject(); | 2896 HeapObject* object = GetObject(); |
2891 if (executable() == EXECUTABLE) { | 2897 if (executable() == EXECUTABLE) { |
2892 return 0; | 2898 return 0; |
2893 } | 2899 } |
2894 size_t used_size = RoundUp((object->address() - address()) + object->Size(), | 2900 size_t used_size = RoundUp((object->address() - address()) + object->Size(), |
2895 base::OS::CommitPageSize()); | 2901 MemoryAllocator::GetCommitPageSize()); |
2896 if (used_size < CommittedPhysicalMemory()) { | 2902 if (used_size < CommittedPhysicalMemory()) { |
2897 return address() + used_size; | 2903 return address() + used_size; |
2898 } | 2904 } |
2899 return 0; | 2905 return 0; |
2900 } | 2906 } |
2901 | 2907 |
2902 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { | 2908 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { |
2903 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), | 2909 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), |
2904 SlotSet::FREE_EMPTY_BUCKETS); | 2910 SlotSet::FREE_EMPTY_BUCKETS); |
2905 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), | 2911 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3230 object->ShortPrint(); | 3236 object->ShortPrint(); |
3231 PrintF("\n"); | 3237 PrintF("\n"); |
3232 } | 3238 } |
3233 printf(" --------------------------------------\n"); | 3239 printf(" --------------------------------------\n"); |
3234 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3240 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3235 } | 3241 } |
3236 | 3242 |
3237 #endif // DEBUG | 3243 #endif // DEBUG |
3238 } // namespace internal | 3244 } // namespace internal |
3239 } // namespace v8 | 3245 } // namespace v8 |
OLD | NEW |