Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(179)

Side by Side Diff: src/heap/spaces.cc

Issue 2488403003: Add v8_os_page_size flag for cross compilation (Closed)
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
11 #include "src/base/platform/semaphore.h" 11 #include "src/base/platform/semaphore.h"
12 #include "src/full-codegen/full-codegen.h" 12 #include "src/full-codegen/full-codegen.h"
13 #include "src/heap/array-buffer-tracker.h" 13 #include "src/heap/array-buffer-tracker.h"
14 #include "src/heap/slot-set.h" 14 #include "src/heap/slot-set.h"
15 #include "src/macro-assembler.h" 15 #include "src/macro-assembler.h"
16 #include "src/msan.h" 16 #include "src/msan.h"
17 #include "src/snapshot/snapshot.h" 17 #include "src/snapshot/snapshot.h"
18 #include "src/v8.h" 18 #include "src/v8.h"
19 19
20 namespace v8 { 20 namespace v8 {
21 namespace internal { 21 namespace internal {
22 22
23 intptr_t GetCommitPageSize() {
Michael Lippautz 2016/11/14 09:24:44 For consistency, this should go as a static functi
24 if (FLAG_target_os_page_size != 0)
25 return FLAG_target_os_page_size;
26 else
27 return base::OS::CommitPageSize();
28 }
23 29
24 // ---------------------------------------------------------------------------- 30 // ----------------------------------------------------------------------------
25 // HeapObjectIterator 31 // HeapObjectIterator
26 32
27 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) 33 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
28 : cur_addr_(nullptr), 34 : cur_addr_(nullptr),
29 cur_end_(nullptr), 35 cur_end_(nullptr),
30 space_(space), 36 space_(space),
31 page_range_(space->anchor()->next_page(), space->anchor()), 37 page_range_(space->anchor()->next_page(), space->anchor()),
32 current_page_(page_range_.begin()) {} 38 current_page_(page_range_.begin()) {}
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 requested = kMaximalCodeRangeSize; 105 requested = kMaximalCodeRangeSize;
100 } else { 106 } else {
101 return true; 107 return true;
102 } 108 }
103 } 109 }
104 110
105 if (requested <= kMinimumCodeRangeSize) { 111 if (requested <= kMinimumCodeRangeSize) {
106 requested = kMinimumCodeRangeSize; 112 requested = kMinimumCodeRangeSize;
107 } 113 }
108 114
109 const size_t reserved_area = 115 const size_t reserved_area = kReservedCodeRangePages * GetCommitPageSize();
110 kReservedCodeRangePages * base::OS::CommitPageSize();
111 if (requested < (kMaximalCodeRangeSize - reserved_area)) 116 if (requested < (kMaximalCodeRangeSize - reserved_area))
112 requested += reserved_area; 117 requested += reserved_area;
113 118
114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); 119 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
115 120
116 code_range_ = new base::VirtualMemory( 121 code_range_ = new base::VirtualMemory(
117 requested, Max(kCodeRangeAreaAlignment, 122 requested, Max(kCodeRangeAreaAlignment,
118 static_cast<size_t>(base::OS::AllocateAlignment()))); 123 static_cast<size_t>(base::OS::AllocateAlignment())));
119 CHECK(code_range_ != NULL); 124 CHECK(code_range_ != NULL);
120 if (!code_range_->IsReserved()) { 125 if (!code_range_->IsReserved()) {
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after
538 543
539 return chunk; 544 return chunk;
540 } 545 }
541 546
542 547
543 // Commit MemoryChunk area to the requested size. 548 // Commit MemoryChunk area to the requested size.
544 bool MemoryChunk::CommitArea(size_t requested) { 549 bool MemoryChunk::CommitArea(size_t requested) {
545 size_t guard_size = 550 size_t guard_size =
546 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; 551 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
547 size_t header_size = area_start() - address() - guard_size; 552 size_t header_size = area_start() - address() - guard_size;
548 size_t commit_size = 553 size_t commit_size = RoundUp(header_size + requested, GetCommitPageSize());
549 RoundUp(header_size + requested, base::OS::CommitPageSize()); 554 size_t committed_size =
550 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), 555 RoundUp(header_size + (area_end() - area_start()), GetCommitPageSize());
551 base::OS::CommitPageSize());
552 556
553 if (commit_size > committed_size) { 557 if (commit_size > committed_size) {
554 // Commit size should be less or equal than the reserved size. 558 // Commit size should be less or equal than the reserved size.
555 DCHECK(commit_size <= size() - 2 * guard_size); 559 DCHECK(commit_size <= size() - 2 * guard_size);
556 // Append the committed area. 560 // Append the committed area.
557 Address start = address() + committed_size + guard_size; 561 Address start = address() + committed_size + guard_size;
558 size_t length = commit_size - committed_size; 562 size_t length = commit_size - committed_size;
559 if (reservation_.IsReserved()) { 563 if (reservation_.IsReserved()) {
560 Executability executable = 564 Executability executable =
561 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 565 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
609 void MemoryChunk::Unlink() { 613 void MemoryChunk::Unlink() {
610 MemoryChunk* next_element = next_chunk(); 614 MemoryChunk* next_element = next_chunk();
611 MemoryChunk* prev_element = prev_chunk(); 615 MemoryChunk* prev_element = prev_chunk();
612 next_element->set_prev_chunk(prev_element); 616 next_element->set_prev_chunk(prev_element);
613 prev_element->set_next_chunk(next_element); 617 prev_element->set_next_chunk(next_element);
614 set_prev_chunk(NULL); 618 set_prev_chunk(NULL);
615 set_next_chunk(NULL); 619 set_next_chunk(NULL);
616 } 620 }
617 621
618 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { 622 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
619 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); 623 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
620 DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); 624 DCHECK_EQ(0, bytes_to_shrink % GetCommitPageSize());
621 Address free_start = chunk->area_end_ - bytes_to_shrink; 625 Address free_start = chunk->area_end_ - bytes_to_shrink;
622 // Don't adjust the size of the page. The area is just uncomitted but not 626 // Don't adjust the size of the page. The area is just uncomitted but not
623 // released. 627 // released.
624 chunk->area_end_ -= bytes_to_shrink; 628 chunk->area_end_ -= bytes_to_shrink;
625 UncommitBlock(free_start, bytes_to_shrink); 629 UncommitBlock(free_start, bytes_to_shrink);
626 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { 630 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
627 if (chunk->reservation_.IsReserved()) 631 if (chunk->reservation_.IsReserved())
628 chunk->reservation_.Guard(chunk->area_end_); 632 chunk->reservation_.Guard(chunk->area_end_);
629 else 633 else
630 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); 634 base::OS::Guard(chunk->area_end_, GetCommitPageSize());
631 } 635 }
632 } 636 }
633 637
634 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, 638 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
635 size_t commit_area_size, 639 size_t commit_area_size,
636 Executability executable, 640 Executability executable,
637 Space* owner) { 641 Space* owner) {
638 DCHECK_LE(commit_area_size, reserve_area_size); 642 DCHECK_LE(commit_area_size, reserve_area_size);
639 643
640 size_t chunk_size; 644 size_t chunk_size;
(...skipping 28 matching lines...) Expand all
669 // | Area | 673 // | Area |
670 // +----------------------------+<- area_end_ (area_start + commit_area_size) 674 // +----------------------------+<- area_end_ (area_start + commit_area_size)
671 // | Committed but not used | 675 // | Committed but not used |
672 // +----------------------------+<- aligned at OS page boundary 676 // +----------------------------+<- aligned at OS page boundary
673 // | Reserved but not committed | 677 // | Reserved but not committed |
674 // +----------------------------+<- base + chunk_size 678 // +----------------------------+<- base + chunk_size
675 // 679 //
676 680
677 if (executable == EXECUTABLE) { 681 if (executable == EXECUTABLE) {
678 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, 682 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
679 base::OS::CommitPageSize()) + 683 GetCommitPageSize()) +
680 CodePageGuardSize(); 684 CodePageGuardSize();
681 685
682 // Check executable memory limit. 686 // Check executable memory limit.
683 if ((size_executable_.Value() + chunk_size) > capacity_executable_) { 687 if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
684 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", 688 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
685 "V8 Executable Allocation capacity exceeded")); 689 "V8 Executable Allocation capacity exceeded"));
686 return NULL; 690 return NULL;
687 } 691 }
688 692
689 // Size of header (not executable) plus area (executable). 693 // Size of header (not executable) plus area (executable).
690 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, 694 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
691 base::OS::CommitPageSize()); 695 GetCommitPageSize());
692 // Allocate executable memory either from code range or from the 696 // Allocate executable memory either from code range or from the
693 // OS. 697 // OS.
694 #ifdef V8_TARGET_ARCH_MIPS64 698 #ifdef V8_TARGET_ARCH_MIPS64
695 // Use code range only for large object space on mips64 to keep address 699 // Use code range only for large object space on mips64 to keep address
696 // range within 256-MB memory region. 700 // range within 256-MB memory region.
697 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { 701 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
698 #else 702 #else
699 if (code_range()->valid()) { 703 if (code_range()->valid()) {
700 #endif 704 #endif
701 base = 705 base =
(...skipping 15 matching lines...) Expand all
717 721
718 if (Heap::ShouldZapGarbage()) { 722 if (Heap::ShouldZapGarbage()) {
719 ZapBlock(base, CodePageGuardStartOffset()); 723 ZapBlock(base, CodePageGuardStartOffset());
720 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); 724 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
721 } 725 }
722 726
723 area_start = base + CodePageAreaStartOffset(); 727 area_start = base + CodePageAreaStartOffset();
724 area_end = area_start + commit_area_size; 728 area_end = area_start + commit_area_size;
725 } else { 729 } else {
726 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, 730 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
727 base::OS::CommitPageSize()); 731 GetCommitPageSize());
728 size_t commit_size = 732 size_t commit_size =
729 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, 733 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
730 base::OS::CommitPageSize()); 734 GetCommitPageSize());
731 base = 735 base =
732 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, 736 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
733 executable, &reservation); 737 executable, &reservation);
734 738
735 if (base == NULL) return NULL; 739 if (base == NULL) return NULL;
736 740
737 if (Heap::ShouldZapGarbage()) { 741 if (Heap::ShouldZapGarbage()) {
738 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); 742 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
739 } 743 }
740 744
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
797 // iterate even further. 801 // iterate even further.
798 while ((filler2->address() + filler2->Size()) != area_end()) { 802 while ((filler2->address() + filler2->Size()) != area_end()) {
799 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size()); 803 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
800 DCHECK(filler2->IsFiller()); 804 DCHECK(filler2->IsFiller());
801 } 805 }
802 DCHECK_EQ(filler->address(), filler2->address()); 806 DCHECK_EQ(filler->address(), filler2->address());
803 #endif // DEBUG 807 #endif // DEBUG
804 808
805 size_t unused = RoundDown( 809 size_t unused = RoundDown(
806 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), 810 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
807 base::OS::CommitPageSize()); 811 GetCommitPageSize());
808 if (unused > 0) { 812 if (unused > 0) {
809 if (FLAG_trace_gc_verbose) { 813 if (FLAG_trace_gc_verbose) {
810 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", 814 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
811 reinterpret_cast<void*>(this), 815 reinterpret_cast<void*>(this),
812 reinterpret_cast<void*>(area_end()), 816 reinterpret_cast<void*>(area_end()),
813 reinterpret_cast<void*>(area_end() - unused)); 817 reinterpret_cast<void*>(area_end() - unused));
814 } 818 }
815 heap()->CreateFillerObjectAt( 819 heap()->CreateFillerObjectAt(
816 filler->address(), 820 filler->address(),
817 static_cast<int>(area_end() - filler->address() - unused), 821 static_cast<int>(area_end() - filler->address() - unused),
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
995 size_t size = Size(); 999 size_t size = Size();
996 float pct = static_cast<float>(capacity_ - size) / capacity_; 1000 float pct = static_cast<float>(capacity_ - size) / capacity_;
997 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n", 1001 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
998 capacity_, size, static_cast<int>(pct * 100)); 1002 capacity_, size, static_cast<int>(pct * 100));
999 } 1003 }
1000 #endif 1004 #endif
1001 1005
1002 size_t MemoryAllocator::CodePageGuardStartOffset() { 1006 size_t MemoryAllocator::CodePageGuardStartOffset() {
1003 // We are guarding code pages: the first OS page after the header 1007 // We are guarding code pages: the first OS page after the header
1004 // will be protected as non-writable. 1008 // will be protected as non-writable.
1005 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); 1009 return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1006 } 1010 }
1007 1011
1008 size_t MemoryAllocator::CodePageGuardSize() { 1012 size_t MemoryAllocator::CodePageGuardSize() {
1009 return static_cast<int>(base::OS::CommitPageSize()); 1013 return static_cast<int>(GetCommitPageSize());
1010 } 1014 }
1011 1015
1012 size_t MemoryAllocator::CodePageAreaStartOffset() { 1016 size_t MemoryAllocator::CodePageAreaStartOffset() {
1013 // We are guarding code pages: the first OS page after the header 1017 // We are guarding code pages: the first OS page after the header
1014 // will be protected as non-writable. 1018 // will be protected as non-writable.
1015 return CodePageGuardStartOffset() + CodePageGuardSize(); 1019 return CodePageGuardStartOffset() + CodePageGuardSize();
1016 } 1020 }
1017 1021
1018 size_t MemoryAllocator::CodePageAreaEndOffset() { 1022 size_t MemoryAllocator::CodePageAreaEndOffset() {
1019 // We are guarding code pages: the last OS page will be protected as 1023 // We are guarding code pages: the last OS page will be protected as
1020 // non-writable. 1024 // non-writable.
1021 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); 1025 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1022 } 1026 }
1023 1027
1024 1028
1025 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, 1029 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
1026 Address start, size_t commit_size, 1030 Address start, size_t commit_size,
1027 size_t reserved_size) { 1031 size_t reserved_size) {
1028 // Commit page header (not executable). 1032 // Commit page header (not executable).
1029 Address header = start; 1033 Address header = start;
1030 size_t header_size = CodePageGuardStartOffset(); 1034 size_t header_size = CodePageGuardStartOffset();
1031 if (vm->Commit(header, header_size, false)) { 1035 if (vm->Commit(header, header_size, false)) {
(...skipping 1853 matching lines...) Expand 10 before | Expand all | Expand 10 after
2885 #ifdef VERIFY_HEAP 2889 #ifdef VERIFY_HEAP
2886 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } 2890 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2887 #endif 2891 #endif
2888 2892
2889 Address LargePage::GetAddressToShrink() { 2893 Address LargePage::GetAddressToShrink() {
2890 HeapObject* object = GetObject(); 2894 HeapObject* object = GetObject();
2891 if (executable() == EXECUTABLE) { 2895 if (executable() == EXECUTABLE) {
2892 return 0; 2896 return 0;
2893 } 2897 }
2894 size_t used_size = RoundUp((object->address() - address()) + object->Size(), 2898 size_t used_size = RoundUp((object->address() - address()) + object->Size(),
2895 base::OS::CommitPageSize()); 2899 GetCommitPageSize());
2896 if (used_size < CommittedPhysicalMemory()) { 2900 if (used_size < CommittedPhysicalMemory()) {
2897 return address() + used_size; 2901 return address() + used_size;
2898 } 2902 }
2899 return 0; 2903 return 0;
2900 } 2904 }
2901 2905
2902 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { 2906 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
2903 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), 2907 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
2904 SlotSet::FREE_EMPTY_BUCKETS); 2908 SlotSet::FREE_EMPTY_BUCKETS);
2905 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), 2909 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
3230 object->ShortPrint(); 3234 object->ShortPrint();
3231 PrintF("\n"); 3235 PrintF("\n");
3232 } 3236 }
3233 printf(" --------------------------------------\n"); 3237 printf(" --------------------------------------\n");
3234 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3238 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3235 } 3239 }
3236 3240
3237 #endif // DEBUG 3241 #endif // DEBUG
3238 } // namespace internal 3242 } // namespace internal
3239 } // namespace v8 3243 } // namespace v8
OLDNEW
« src/deoptimizer.cc ('K') | « src/flag-definitions.h ('k') | src/v8.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698