Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1868)

Side by Side Diff: src/heap/spaces.h

Issue 1340923004: [heap] Concurrency support for heap book-keeping info (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Added MemoryAllocator's size_ and capacity_ fields Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/atomic-utils.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h"
9 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
10 #include "src/base/bits.h" 11 #include "src/base/bits.h"
11 #include "src/base/platform/mutex.h" 12 #include "src/base/platform/mutex.h"
12 #include "src/flags.h" 13 #include "src/flags.h"
13 #include "src/hashmap.h" 14 #include "src/hashmap.h"
14 #include "src/list.h" 15 #include "src/list.h"
15 #include "src/objects.h" 16 #include "src/objects.h"
16 #include "src/utils.h" 17 #include "src/utils.h"
17 18
18 namespace v8 { 19 namespace v8 {
(...skipping 534 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 554
554 static const size_t kSlotsBufferOffset = 555 static const size_t kSlotsBufferOffset =
555 kLiveBytesOffset + kIntSize; // int live_byte_count_ 556 kLiveBytesOffset + kIntSize; // int live_byte_count_
556 557
557 static const size_t kWriteBarrierCounterOffset = 558 static const size_t kWriteBarrierCounterOffset =
558 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; 559 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
559 + kPointerSize; // SkipList* skip_list_; 560 + kPointerSize; // SkipList* skip_list_;
560 561
561 static const size_t kMinHeaderSize = 562 static const size_t kMinHeaderSize =
562 kWriteBarrierCounterOffset + 563 kWriteBarrierCounterOffset +
563 kIntptrSize // intptr_t write_barrier_counter_ 564 kIntptrSize // intptr_t write_barrier_counter_
564 + kIntSize // int progress_bar_ 565 + kIntSize // int progress_bar_
565 + kIntSize // int high_water_mark_ 566 + kPointerSize // AtomicValue high_water_mark_
566 + kPointerSize // base::Mutex* mutex_ 567 + kPointerSize // base::Mutex* mutex_
567 + kPointerSize // base::AtomicWord parallel_sweeping_ 568 + kPointerSize // base::AtomicWord parallel_sweeping_
568 + 5 * kIntSize // int free-list statistics 569 + 5 * kPointerSize // AtomicNumber free-list statistics
569 + kPointerSize // base::AtomicWord next_chunk_ 570 + kPointerSize // base::AtomicWord next_chunk_
570 + kPointerSize; // base::AtomicWord prev_chunk_ 571 + kPointerSize; // base::AtomicWord prev_chunk_
571 572
572 // We add some more space to the computed header size to amount for missing 573 // We add some more space to the computed header size to amount for missing
573 // alignment requirements in our computation. 574 // alignment requirements in our computation.
574 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 575 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
575 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; 576 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
576 577
577 static const int kBodyOffset = 578 static const int kBodyOffset =
578 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 579 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
579 580
580 // The start offset of the object area in a page. Aligned to both maps and 581 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
667 DCHECK(slots_buffer_ == NULL); 668 DCHECK(slots_buffer_ == NULL);
668 ClearFlag(EVACUATION_CANDIDATE); 669 ClearFlag(EVACUATION_CANDIDATE);
669 } 670 }
670 671
671 Address area_start() { return area_start_; } 672 Address area_start() { return area_start_; }
672 Address area_end() { return area_end_; } 673 Address area_end() { return area_end_; }
673 int area_size() { return static_cast<int>(area_end() - area_start()); } 674 int area_size() { return static_cast<int>(area_end() - area_start()); }
674 bool CommitArea(size_t requested); 675 bool CommitArea(size_t requested);
675 676
676 // Approximate amount of physical memory committed for this chunk. 677 // Approximate amount of physical memory committed for this chunk.
677 size_t CommittedPhysicalMemory() { return high_water_mark_; } 678 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
678 679
679 // Should be called when memory chunk is about to be freed. 680 // Should be called when memory chunk is about to be freed.
680 void ReleaseAllocatedMemory(); 681 void ReleaseAllocatedMemory();
681 682
682 static inline void UpdateHighWaterMark(Address mark) { 683 static inline void UpdateHighWaterMark(Address mark) {
683 if (mark == NULL) return; 684 if (mark == nullptr) return;
684 // Need to subtract one from the mark because when a chunk is full the 685 // Need to subtract one from the mark because when a chunk is full the
685 // top points to the next address after the chunk, which effectively belongs 686 // top points to the next address after the chunk, which effectively belongs
686 // to another chunk. See the comment to Page::FromAllocationTop. 687 // to another chunk. See the comment to Page::FromAllocationTop.
687 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); 688 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
688 int new_mark = static_cast<int>(mark - chunk->address()); 689 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
689 if (new_mark > chunk->high_water_mark_) { 690 intptr_t old_mark = 0;
690 chunk->high_water_mark_ = new_mark; 691 do {
691 } 692 old_mark = chunk->high_water_mark_.Value();
693 } while ((new_mark > old_mark) &&
694 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
692 } 695 }
693 696
694 protected: 697 protected:
695 size_t size_; 698 size_t size_;
696 intptr_t flags_; 699 intptr_t flags_;
697 700
698 // Start and end of allocatable memory on this chunk. 701 // Start and end of allocatable memory on this chunk.
699 Address area_start_; 702 Address area_start_;
700 Address area_end_; 703 Address area_end_;
701 704
(...skipping 10 matching lines...) Expand all
712 // Count of bytes marked black on page. 715 // Count of bytes marked black on page.
713 int live_byte_count_; 716 int live_byte_count_;
714 SlotsBuffer* slots_buffer_; 717 SlotsBuffer* slots_buffer_;
715 SkipList* skip_list_; 718 SkipList* skip_list_;
716 intptr_t write_barrier_counter_; 719 intptr_t write_barrier_counter_;
717 // Used by the incremental marker to keep track of the scanning progress in 720 // Used by the incremental marker to keep track of the scanning progress in
718 // large objects that have a progress bar and are scanned in increments. 721 // large objects that have a progress bar and are scanned in increments.
719 int progress_bar_; 722 int progress_bar_;
720 // Assuming the initial allocation on a page is sequential, 723 // Assuming the initial allocation on a page is sequential,
721 // count highest number of bytes ever allocated on the page. 724 // count highest number of bytes ever allocated on the page.
722 int high_water_mark_; 725 AtomicValue<intptr_t> high_water_mark_;
723 726
724 base::Mutex* mutex_; 727 base::Mutex* mutex_;
725 base::AtomicWord parallel_sweeping_; 728 base::AtomicWord parallel_sweeping_;
726 729
727 // PagedSpace free-list statistics. 730 // PagedSpace free-list statistics.
728 int available_in_small_free_list_; 731 AtomicNumber<intptr_t> available_in_small_free_list_;
729 int available_in_medium_free_list_; 732 AtomicNumber<intptr_t> available_in_medium_free_list_;
730 int available_in_large_free_list_; 733 AtomicNumber<intptr_t> available_in_large_free_list_;
731 int available_in_huge_free_list_; 734 AtomicNumber<intptr_t> available_in_huge_free_list_;
732 int non_available_small_blocks_; 735 AtomicNumber<intptr_t> non_available_small_blocks_;
733 736
734 // next_chunk_ holds a pointer of type MemoryChunk 737 // next_chunk_ holds a pointer of type MemoryChunk
735 base::AtomicWord next_chunk_; 738 base::AtomicWord next_chunk_;
736 // prev_chunk_ holds a pointer of type MemoryChunk 739 // prev_chunk_ holds a pointer of type MemoryChunk
737 base::AtomicWord prev_chunk_; 740 base::AtomicWord prev_chunk_;
738 741
739 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, 742 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
740 Address area_start, Address area_end, 743 Address area_start, Address area_end,
741 Executability executable, Space* owner); 744 Executability executable, Space* owner);
742 745
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
821 824
822 void InitializeAsAnchor(PagedSpace* owner); 825 void InitializeAsAnchor(PagedSpace* owner);
823 826
824 bool WasSwept() { return IsFlagSet(WAS_SWEPT); } 827 bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
825 void SetWasSwept() { SetFlag(WAS_SWEPT); } 828 void SetWasSwept() { SetFlag(WAS_SWEPT); }
826 void ClearWasSwept() { ClearFlag(WAS_SWEPT); } 829 void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
827 830
828 void ResetFreeListStatistics(); 831 void ResetFreeListStatistics();
829 832
830 int LiveBytesFromFreeList() { 833 int LiveBytesFromFreeList() {
831 return area_size() - non_available_small_blocks_ - 834 return static_cast<int>(
832 available_in_small_free_list_ - available_in_medium_free_list_ - 835 area_size() - non_available_small_blocks() -
833 available_in_large_free_list_ - available_in_huge_free_list_; 836 available_in_small_free_list() - available_in_medium_free_list() -
837 available_in_large_free_list() - available_in_huge_free_list());
834 } 838 }
835 839
836 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ 840 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
837 type name() { return name##_; } \ 841 type name() { return name##_.Value(); } \
838 void set_##name(type name) { name##_ = name; } \ 842 void set_##name(type name) { name##_.SetValue(name); } \
839 void add_##name(type name) { name##_ += name; } 843 void add_##name(type name) { name##_.Increment(name); }
840 844
841 FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) 845 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
842 FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) 846 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
843 FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) 847 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
844 FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) 848 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
845 FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) 849 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
846 850
847 #undef FRAGMENTATION_STATS_ACCESSORS 851 #undef FRAGMENTATION_STATS_ACCESSORS
848 852
849 #ifdef DEBUG 853 #ifdef DEBUG
850 void Print(); 854 void Print();
851 #endif // DEBUG 855 #endif // DEBUG
852 856
853 friend class MemoryAllocator; 857 friend class MemoryAllocator;
854 }; 858 };
855 859
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
1121 // bookkeeping and calls the allocation callback. 1125 // bookkeeping and calls the allocation callback.
1122 void PreFreeMemory(MemoryChunk* chunk); 1126 void PreFreeMemory(MemoryChunk* chunk);
1123 1127
1124 // FreeMemory can be called concurrently when PreFree was executed before. 1128 // FreeMemory can be called concurrently when PreFree was executed before.
1125 void PerformFreeMemory(MemoryChunk* chunk); 1129 void PerformFreeMemory(MemoryChunk* chunk);
1126 1130
1127 // Free is a wrapper method, which calls PreFree and PerformFreeMemory 1131 // Free is a wrapper method, which calls PreFree and PerformFreeMemory
1128 // together. 1132 // together.
1129 void Free(MemoryChunk* chunk); 1133 void Free(MemoryChunk* chunk);
1130 1134
1135 // Returns allocated spaces in bytes.
1136 intptr_t Size() { return size_.Value(); }
1137
1138 // Returns allocated executable spaces in bytes.
1139 intptr_t SizeExecutable() { return size_executable_.Value(); }
1140
1131 // Returns the maximum available bytes of heaps. 1141 // Returns the maximum available bytes of heaps.
1132 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 1142 intptr_t Available() {
1133 1143 intptr_t size = Size();
Hannes Payer (out of office) 2015/09/15 13:35:42 drop intptr_t size = Size(); and make it a one-lin
Michael Lippautz 2015/09/15 13:44:47 As discussed offline, there are 2 uses of the valu
1134 // Returns allocated spaces in bytes. 1144 return capacity_ < size ? 0 : capacity_ - size;
1135 intptr_t Size() { return size_; } 1145 }
1136 1146
1137 // Returns the maximum available executable bytes of heaps. 1147 // Returns the maximum available executable bytes of heaps.
1138 intptr_t AvailableExecutable() { 1148 intptr_t AvailableExecutable() {
1139 if (capacity_executable_ < size_executable_) return 0; 1149 intptr_t executable_size = SizeExecutable();
1140 return capacity_executable_ - size_executable_; 1150 if (capacity_executable_ < executable_size) return 0;
1151 return capacity_executable_ - executable_size;
1141 } 1152 }
1142 1153
1143 // Returns allocated executable spaces in bytes.
1144 intptr_t SizeExecutable() { return size_executable_; }
1145
1146 // Returns maximum available bytes that the old space can have. 1154 // Returns maximum available bytes that the old space can have.
1147 intptr_t MaxAvailable() { 1155 intptr_t MaxAvailable() {
1148 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; 1156 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1149 } 1157 }
1150 1158
1151 // Returns an indication of whether a pointer is in a space that has 1159 // Returns an indication of whether a pointer is in a space that has
1152 // been allocated by this MemoryAllocator. 1160 // been allocated by this MemoryAllocator.
1153 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { 1161 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1154 return address < lowest_ever_allocated_ || 1162 return address < lowest_ever_allocated_.Value() ||
1155 address >= highest_ever_allocated_; 1163 address >= highest_ever_allocated_.Value();
1156 } 1164 }
1157 1165
1158 #ifdef DEBUG 1166 #ifdef DEBUG
1159 // Reports statistic info of the space. 1167 // Reports statistic info of the space.
1160 void ReportStatistics(); 1168 void ReportStatistics();
1161 #endif 1169 #endif
1162 1170
1163 // Returns a MemoryChunk in which the memory region from commit_area_size to 1171 // Returns a MemoryChunk in which the memory region from commit_area_size to
1164 // reserve_area_size of the chunk area is reserved but not committed, it 1172 // reserve_area_size of the chunk area is reserved but not committed, it
1165 // could be committed later by calling MemoryChunk::CommitArea. 1173 // could be committed later by calling MemoryChunk::CommitArea.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1225 } 1233 }
1226 1234
1227 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, 1235 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1228 Address start, size_t commit_size, 1236 Address start, size_t commit_size,
1229 size_t reserved_size); 1237 size_t reserved_size);
1230 1238
1231 private: 1239 private:
1232 Isolate* isolate_; 1240 Isolate* isolate_;
1233 1241
1234 // Maximum space size in bytes. 1242 // Maximum space size in bytes.
1235 size_t capacity_; 1243 intptr_t capacity_;
1236 // Maximum subset of capacity_ that can be executable 1244 // Maximum subset of capacity_ that can be executable
1237 size_t capacity_executable_; 1245 intptr_t capacity_executable_;
1238 1246
1239 // Allocated space size in bytes. 1247 // Allocated space size in bytes.
1240 size_t size_; 1248 AtomicNumber<intptr_t> size_;
1241 // Allocated executable space size in bytes. 1249 // Allocated executable space size in bytes.
1242 size_t size_executable_; 1250 AtomicNumber<intptr_t> size_executable_;
1243 1251
1244 // We keep the lowest and highest addresses allocated as a quick way 1252 // We keep the lowest and highest addresses allocated as a quick way
1245 // of determining that pointers are outside the heap. The estimate is 1253 // of determining that pointers are outside the heap. The estimate is
1246 // conservative, i.e. not all addrsses in 'allocated' space are allocated 1254 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1247 // to our heap. The range is [lowest, highest[, inclusive on the low end 1255 // to our heap. The range is [lowest, highest[, inclusive on the low end
1248 // and exclusive on the high end. 1256 // and exclusive on the high end.
1249 void* lowest_ever_allocated_; 1257 AtomicValue<void*> lowest_ever_allocated_;
1250 void* highest_ever_allocated_; 1258 AtomicValue<void*> highest_ever_allocated_;
1251 1259
1252 struct MemoryAllocationCallbackRegistration { 1260 struct MemoryAllocationCallbackRegistration {
1253 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 1261 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1254 ObjectSpace space, 1262 ObjectSpace space,
1255 AllocationAction action) 1263 AllocationAction action)
1256 : callback(callback), space(space), action(action) {} 1264 : callback(callback), space(space), action(action) {}
1257 MemoryAllocationCallback callback; 1265 MemoryAllocationCallback callback;
1258 ObjectSpace space; 1266 ObjectSpace space;
1259 AllocationAction action; 1267 AllocationAction action;
1260 }; 1268 };
1261 1269
1262 // A List of callback that are triggered when memory is allocated or free'd 1270 // A List of callback that are triggered when memory is allocated or free'd
1263 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; 1271 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
1264 1272
1265 // Initializes pages in a chunk. Returns the first page address. 1273 // Initializes pages in a chunk. Returns the first page address.
1266 // This function and GetChunkId() are provided for the mark-compact 1274 // This function and GetChunkId() are provided for the mark-compact
1267 // collector to rebuild page headers in the from space, which is 1275 // collector to rebuild page headers in the from space, which is
1268 // used as a marking stack and its page headers are destroyed. 1276 // used as a marking stack and its page headers are destroyed.
1269 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, 1277 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1270 PagedSpace* owner); 1278 PagedSpace* owner);
1271 1279
1272 void UpdateAllocatedSpaceLimits(void* low, void* high) { 1280 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1273 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); 1281 // The use of atomic primitives does not guarantee correctness (wrt.
1274 highest_ever_allocated_ = Max(highest_ever_allocated_, high); 1282 // desired semantics) by default. The loop here ensures that we update the
1283 // values only if they did not change in between.
1284 void* ptr = nullptr;
1285 do {
1286 ptr = lowest_ever_allocated_.Value();
1287 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1288 do {
1289 ptr = highest_ever_allocated_.Value();
1290 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1275 } 1291 }
1276 1292
1277 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); 1293 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1278 }; 1294 };
1279 1295
1280 1296
1281 // ----------------------------------------------------------------------------- 1297 // -----------------------------------------------------------------------------
1282 // Interface for heap object iterator to be implemented by all object space 1298 // Interface for heap object iterator to be implemented by all object space
1283 // object iterators. 1299 // object iterators.
1284 // 1300 //
(...skipping 1636 matching lines...) Expand 10 before | Expand all | Expand 10 after
2921 count = 0; 2937 count = 0;
2922 } 2938 }
2923 // Must be small, since an iteration is used for lookup. 2939 // Must be small, since an iteration is used for lookup.
2924 static const int kMaxComments = 64; 2940 static const int kMaxComments = 64;
2925 }; 2941 };
2926 #endif 2942 #endif
2927 } 2943 }
2928 } // namespace v8::internal 2944 } // namespace v8::internal
2929 2945
2930 #endif // V8_HEAP_SPACES_H_ 2946 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/atomic-utils.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698