Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(209)

Side by Side Diff: src/heap/spaces.cc

Issue 2488403003: Add v8_os_page_size flag for cross compilation (Closed)
Patch Set: merge with master Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/v8.gyp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
100 } else { 100 } else {
101 return true; 101 return true;
102 } 102 }
103 } 103 }
104 104
105 if (requested <= kMinimumCodeRangeSize) { 105 if (requested <= kMinimumCodeRangeSize) {
106 requested = kMinimumCodeRangeSize; 106 requested = kMinimumCodeRangeSize;
107 } 107 }
108 108
109 const size_t reserved_area = 109 const size_t reserved_area =
110 kReservedCodeRangePages * base::OS::CommitPageSize(); 110 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
111 if (requested < (kMaximalCodeRangeSize - reserved_area)) 111 if (requested < (kMaximalCodeRangeSize - reserved_area))
112 requested += reserved_area; 112 requested += reserved_area;
113 113
114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); 114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
115 115
116 code_range_ = new base::VirtualMemory( 116 code_range_ = new base::VirtualMemory(
117 requested, Max(kCodeRangeAreaAlignment, 117 requested, Max(kCodeRangeAreaAlignment,
118 static_cast<size_t>(base::OS::AllocateAlignment()))); 118 static_cast<size_t>(base::OS::AllocateAlignment())));
119 CHECK(code_range_ != NULL); 119 CHECK(code_range_ != NULL);
120 if (!code_range_->IsReserved()) { 120 if (!code_range_->IsReserved()) {
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
541 return chunk; 541 return chunk;
542 } 542 }
543 543
544 544
545 // Commit MemoryChunk area to the requested size. 545 // Commit MemoryChunk area to the requested size.
546 bool MemoryChunk::CommitArea(size_t requested) { 546 bool MemoryChunk::CommitArea(size_t requested) {
547 size_t guard_size = 547 size_t guard_size =
548 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; 548 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
549 size_t header_size = area_start() - address() - guard_size; 549 size_t header_size = area_start() - address() - guard_size;
550 size_t commit_size = 550 size_t commit_size =
551 RoundUp(header_size + requested, base::OS::CommitPageSize()); 551 RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
552 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), 552 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
553 base::OS::CommitPageSize()); 553 MemoryAllocator::GetCommitPageSize());
554 554
555 if (commit_size > committed_size) { 555 if (commit_size > committed_size) {
556 // Commit size should be less or equal than the reserved size. 556 // Commit size should be less or equal than the reserved size.
557 DCHECK(commit_size <= size() - 2 * guard_size); 557 DCHECK(commit_size <= size() - 2 * guard_size);
558 // Append the committed area. 558 // Append the committed area.
559 Address start = address() + committed_size + guard_size; 559 Address start = address() + committed_size + guard_size;
560 size_t length = commit_size - committed_size; 560 size_t length = commit_size - committed_size;
561 if (reservation_.IsReserved()) { 561 if (reservation_.IsReserved()) {
562 Executability executable = 562 Executability executable =
563 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 563 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
611 void MemoryChunk::Unlink() { 611 void MemoryChunk::Unlink() {
612 MemoryChunk* next_element = next_chunk(); 612 MemoryChunk* next_element = next_chunk();
613 MemoryChunk* prev_element = prev_chunk(); 613 MemoryChunk* prev_element = prev_chunk();
614 next_element->set_prev_chunk(prev_element); 614 next_element->set_prev_chunk(prev_element);
615 prev_element->set_next_chunk(next_element); 615 prev_element->set_next_chunk(next_element);
616 set_prev_chunk(NULL); 616 set_prev_chunk(NULL);
617 set_next_chunk(NULL); 617 set_next_chunk(NULL);
618 } 618 }
619 619
620 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { 620 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
621 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); 621 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
622 DCHECK_EQ(0u, bytes_to_shrink % base::OS::CommitPageSize()); 622 DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
623 Address free_start = chunk->area_end_ - bytes_to_shrink; 623 Address free_start = chunk->area_end_ - bytes_to_shrink;
624 // Don't adjust the size of the page. The area is just uncomitted but not 624 // Don't adjust the size of the page. The area is just uncomitted but not
625 // released. 625 // released.
626 chunk->area_end_ -= bytes_to_shrink; 626 chunk->area_end_ -= bytes_to_shrink;
627 UncommitBlock(free_start, bytes_to_shrink); 627 UncommitBlock(free_start, bytes_to_shrink);
628 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { 628 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
629 if (chunk->reservation_.IsReserved()) 629 if (chunk->reservation_.IsReserved())
630 chunk->reservation_.Guard(chunk->area_end_); 630 chunk->reservation_.Guard(chunk->area_end_);
631 else 631 else
632 base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); 632 base::OS::Guard(chunk->area_end_, GetCommitPageSize());
633 } 633 }
634 } 634 }
635 635
636 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, 636 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
637 size_t commit_area_size, 637 size_t commit_area_size,
638 Executability executable, 638 Executability executable,
639 Space* owner) { 639 Space* owner) {
640 DCHECK_LE(commit_area_size, reserve_area_size); 640 DCHECK_LE(commit_area_size, reserve_area_size);
641 641
642 size_t chunk_size; 642 size_t chunk_size;
(...skipping 28 matching lines...) Expand all
671 // | Area | 671 // | Area |
672 // +----------------------------+<- area_end_ (area_start + commit_area_size) 672 // +----------------------------+<- area_end_ (area_start + commit_area_size)
673 // | Committed but not used | 673 // | Committed but not used |
674 // +----------------------------+<- aligned at OS page boundary 674 // +----------------------------+<- aligned at OS page boundary
675 // | Reserved but not committed | 675 // | Reserved but not committed |
676 // +----------------------------+<- base + chunk_size 676 // +----------------------------+<- base + chunk_size
677 // 677 //
678 678
679 if (executable == EXECUTABLE) { 679 if (executable == EXECUTABLE) {
680 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, 680 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
681 base::OS::CommitPageSize()) + 681 GetCommitPageSize()) +
682 CodePageGuardSize(); 682 CodePageGuardSize();
683 683
684 // Check executable memory limit. 684 // Check executable memory limit.
685 if ((size_executable_.Value() + chunk_size) > capacity_executable_) { 685 if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
686 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", 686 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
687 "V8 Executable Allocation capacity exceeded")); 687 "V8 Executable Allocation capacity exceeded"));
688 return NULL; 688 return NULL;
689 } 689 }
690 690
691 // Size of header (not executable) plus area (executable). 691 // Size of header (not executable) plus area (executable).
692 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, 692 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
693 base::OS::CommitPageSize()); 693 GetCommitPageSize());
694 // Allocate executable memory either from code range or from the 694 // Allocate executable memory either from code range or from the
695 // OS. 695 // OS.
696 #ifdef V8_TARGET_ARCH_MIPS64 696 #ifdef V8_TARGET_ARCH_MIPS64
697 // Use code range only for large object space on mips64 to keep address 697 // Use code range only for large object space on mips64 to keep address
698 // range within 256-MB memory region. 698 // range within 256-MB memory region.
699 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) { 699 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
700 #else 700 #else
701 if (code_range()->valid()) { 701 if (code_range()->valid()) {
702 #endif 702 #endif
703 base = 703 base =
(...skipping 15 matching lines...) Expand all
719 719
720 if (Heap::ShouldZapGarbage()) { 720 if (Heap::ShouldZapGarbage()) {
721 ZapBlock(base, CodePageGuardStartOffset()); 721 ZapBlock(base, CodePageGuardStartOffset());
722 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); 722 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
723 } 723 }
724 724
725 area_start = base + CodePageAreaStartOffset(); 725 area_start = base + CodePageAreaStartOffset();
726 area_end = area_start + commit_area_size; 726 area_end = area_start + commit_area_size;
727 } else { 727 } else {
728 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, 728 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
729 base::OS::CommitPageSize()); 729 GetCommitPageSize());
730 size_t commit_size = 730 size_t commit_size =
731 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, 731 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
732 base::OS::CommitPageSize()); 732 GetCommitPageSize());
733 base = 733 base =
734 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, 734 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
735 executable, &reservation); 735 executable, &reservation);
736 736
737 if (base == NULL) return NULL; 737 if (base == NULL) return NULL;
738 738
739 if (Heap::ShouldZapGarbage()) { 739 if (Heap::ShouldZapGarbage()) {
740 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); 740 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
741 } 741 }
742 742
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 // iterate even further. 807 // iterate even further.
808 while ((filler2->address() + filler2->Size()) != area_end()) { 808 while ((filler2->address() + filler2->Size()) != area_end()) {
809 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size()); 809 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
810 DCHECK(filler2->IsFiller()); 810 DCHECK(filler2->IsFiller());
811 } 811 }
812 DCHECK_EQ(filler->address(), filler2->address()); 812 DCHECK_EQ(filler->address(), filler2->address());
813 #endif // DEBUG 813 #endif // DEBUG
814 814
815 size_t unused = RoundDown( 815 size_t unused = RoundDown(
816 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize), 816 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
817 base::OS::CommitPageSize()); 817 MemoryAllocator::GetCommitPageSize());
818 if (unused > 0) { 818 if (unused > 0) {
819 if (FLAG_trace_gc_verbose) { 819 if (FLAG_trace_gc_verbose) {
820 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", 820 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
821 reinterpret_cast<void*>(this), 821 reinterpret_cast<void*>(this),
822 reinterpret_cast<void*>(area_end()), 822 reinterpret_cast<void*>(area_end()),
823 reinterpret_cast<void*>(area_end() - unused)); 823 reinterpret_cast<void*>(area_end() - unused));
824 } 824 }
825 heap()->CreateFillerObjectAt( 825 heap()->CreateFillerObjectAt(
826 filler->address(), 826 filler->address(),
827 static_cast<int>(area_end() - filler->address() - unused), 827 static_cast<int>(area_end() - filler->address() - unused),
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
1005 size_t size = Size(); 1005 size_t size = Size();
1006 float pct = static_cast<float>(capacity_ - size) / capacity_; 1006 float pct = static_cast<float>(capacity_ - size) / capacity_;
1007 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n", 1007 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
1008 capacity_, size, static_cast<int>(pct * 100)); 1008 capacity_, size, static_cast<int>(pct * 100));
1009 } 1009 }
1010 #endif 1010 #endif
1011 1011
1012 size_t MemoryAllocator::CodePageGuardStartOffset() { 1012 size_t MemoryAllocator::CodePageGuardStartOffset() {
1013 // We are guarding code pages: the first OS page after the header 1013 // We are guarding code pages: the first OS page after the header
1014 // will be protected as non-writable. 1014 // will be protected as non-writable.
1015 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); 1015 return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1016 } 1016 }
1017 1017
1018 size_t MemoryAllocator::CodePageGuardSize() { 1018 size_t MemoryAllocator::CodePageGuardSize() {
1019 return static_cast<int>(base::OS::CommitPageSize()); 1019 return static_cast<int>(GetCommitPageSize());
1020 } 1020 }
1021 1021
1022 size_t MemoryAllocator::CodePageAreaStartOffset() { 1022 size_t MemoryAllocator::CodePageAreaStartOffset() {
1023 // We are guarding code pages: the first OS page after the header 1023 // We are guarding code pages: the first OS page after the header
1024 // will be protected as non-writable. 1024 // will be protected as non-writable.
1025 return CodePageGuardStartOffset() + CodePageGuardSize(); 1025 return CodePageGuardStartOffset() + CodePageGuardSize();
1026 } 1026 }
1027 1027
1028 size_t MemoryAllocator::CodePageAreaEndOffset() { 1028 size_t MemoryAllocator::CodePageAreaEndOffset() {
1029 // We are guarding code pages: the last OS page will be protected as 1029 // We are guarding code pages: the last OS page will be protected as
1030 // non-writable. 1030 // non-writable.
1031 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); 1031 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1032 }
1033
1034 intptr_t MemoryAllocator::GetCommitPageSize() {
1035 if (FLAG_v8_os_page_size != 0) {
1036 DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
1037 return FLAG_v8_os_page_size * KB;
1038 } else {
1039 return base::OS::CommitPageSize();
1040 }
1032 } 1041 }
1033 1042
1034 1043
1035 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, 1044 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
1036 Address start, size_t commit_size, 1045 Address start, size_t commit_size,
1037 size_t reserved_size) { 1046 size_t reserved_size) {
1038 // Commit page header (not executable). 1047 // Commit page header (not executable).
1039 Address header = start; 1048 Address header = start;
1040 size_t header_size = CodePageGuardStartOffset(); 1049 size_t header_size = CodePageGuardStartOffset();
1041 if (vm->Commit(header, header_size, false)) { 1050 if (vm->Commit(header, header_size, false)) {
(...skipping 1844 matching lines...) Expand 10 before | Expand all | Expand 10 after
2886 #ifdef VERIFY_HEAP 2895 #ifdef VERIFY_HEAP
2887 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } 2896 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2888 #endif 2897 #endif
2889 2898
2890 Address LargePage::GetAddressToShrink() { 2899 Address LargePage::GetAddressToShrink() {
2891 HeapObject* object = GetObject(); 2900 HeapObject* object = GetObject();
2892 if (executable() == EXECUTABLE) { 2901 if (executable() == EXECUTABLE) {
2893 return 0; 2902 return 0;
2894 } 2903 }
2895 size_t used_size = RoundUp((object->address() - address()) + object->Size(), 2904 size_t used_size = RoundUp((object->address() - address()) + object->Size(),
2896 base::OS::CommitPageSize()); 2905 MemoryAllocator::GetCommitPageSize());
2897 if (used_size < CommittedPhysicalMemory()) { 2906 if (used_size < CommittedPhysicalMemory()) {
2898 return address() + used_size; 2907 return address() + used_size;
2899 } 2908 }
2900 return 0; 2909 return 0;
2901 } 2910 }
2902 2911
2903 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { 2912 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
2904 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), 2913 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
2905 SlotSet::FREE_EMPTY_BUCKETS); 2914 SlotSet::FREE_EMPTY_BUCKETS);
2906 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), 2915 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
3231 object->ShortPrint(); 3240 object->ShortPrint();
3232 PrintF("\n"); 3241 PrintF("\n");
3233 } 3242 }
3234 printf(" --------------------------------------\n"); 3243 printf(" --------------------------------------\n");
3235 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3244 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3236 } 3245 }
3237 3246
3238 #endif // DEBUG 3247 #endif // DEBUG
3239 } // namespace internal 3248 } // namespace internal
3240 } // namespace v8 3249 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/v8.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698