Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(708)

Side by Side Diff: src/heap/spaces.h

Issue 2783873002: [heap] Reland: Make SlotSet allocation thread-safe and refactor code. (Closed)
Patch Set: comment Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/remembered-set.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set> 10 #include <unordered_set>
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 kHuge, 124 kHuge,
125 125
126 kFirstCategory = kTiniest, 126 kFirstCategory = kTiniest,
127 kLastCategory = kHuge, 127 kLastCategory = kHuge,
128 kNumberOfCategories = kLastCategory + 1, 128 kNumberOfCategories = kLastCategory + 1,
129 kInvalidCategory 129 kInvalidCategory
130 }; 130 };
131 131
132 enum FreeMode { kLinkCategory, kDoNotLinkCategory }; 132 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
133 133
134 enum RememberedSetType {
135 OLD_TO_NEW,
136 OLD_TO_OLD,
137 NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
138 };
139
134 // A free list category maintains a linked list of free memory blocks. 140 // A free list category maintains a linked list of free memory blocks.
135 class FreeListCategory { 141 class FreeListCategory {
136 public: 142 public:
137 static const int kSize = kIntSize + // FreeListCategoryType type_ 143 static const int kSize = kIntSize + // FreeListCategoryType type_
138 kIntSize + // padding for type_ 144 kIntSize + // padding for type_
139 kSizetSize + // size_t available_ 145 kSizetSize + // size_t available_
140 kPointerSize + // FreeSpace* top_ 146 kPointerSize + // FreeSpace* top_
141 kPointerSize + // FreeListCategory* prev_ 147 kPointerSize + // FreeListCategory* prev_
142 kPointerSize; // FreeListCategory* next_ 148 kPointerSize; // FreeListCategory* next_
143 149
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 kSizeOffset // NOLINT 333 kSizeOffset // NOLINT
328 + kSizetSize // size_t size 334 + kSizetSize // size_t size
329 + kIntptrSize // Flags flags_ 335 + kIntptrSize // Flags flags_
330 + kPointerSize // Address area_start_ 336 + kPointerSize // Address area_start_
331 + kPointerSize // Address area_end_ 337 + kPointerSize // Address area_end_
332 + 2 * kPointerSize // base::VirtualMemory reservation_ 338 + 2 * kPointerSize // base::VirtualMemory reservation_
333 + kPointerSize // Address owner_ 339 + kPointerSize // Address owner_
334 + kPointerSize // Heap* heap_ 340 + kPointerSize // Heap* heap_
335 + kIntptrSize // intptr_t progress_bar_ 341 + kIntptrSize // intptr_t progress_bar_
336 + kIntptrSize // intptr_t live_byte_count_ 342 + kIntptrSize // intptr_t live_byte_count_
337 + kPointerSize // SlotSet* old_to_new_slots_ 343 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
338 + kPointerSize // SlotSet* old_to_old_slots_ 344 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
339 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_ 345 + kPointerSize // SkipList* skip_list_
340 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_ 346 + kPointerSize // AtomicValue high_water_mark_
341 + kPointerSize // SkipList* skip_list_ 347 + kPointerSize // base::Mutex* mutex_
342 + kPointerSize // AtomicValue high_water_mark_ 348 + kPointerSize // base::AtomicWord concurrent_sweeping_
343 + kPointerSize // base::Mutex* mutex_ 349 + 2 * kSizetSize // AtomicNumber free-list statistics
344 + kPointerSize // base::AtomicWord concurrent_sweeping_ 350 + kPointerSize // AtomicValue next_chunk_
345 + 2 * kSizetSize // AtomicNumber free-list statistics 351 + kPointerSize // AtomicValue prev_chunk_
346 + kPointerSize // AtomicValue next_chunk_
347 + kPointerSize // AtomicValue prev_chunk_
348 + FreeListCategory::kSize * kNumberOfCategories 352 + FreeListCategory::kSize * kNumberOfCategories
349 // FreeListCategory categories_[kNumberOfCategories] 353 // FreeListCategory categories_[kNumberOfCategories]
350 + kPointerSize // LocalArrayBufferTracker* local_tracker_ 354 + kPointerSize // LocalArrayBufferTracker* local_tracker_
351 + kIntptrSize // intptr_t young_generation_live_byte_count_ 355 + kIntptrSize // intptr_t young_generation_live_byte_count_
352 + kPointerSize; // Bitmap* young_generation_bitmap_ 356 + kPointerSize; // Bitmap* young_generation_bitmap_
353 357
354 // We add some more space to the computed header size to amount for missing 358 // We add some more space to the computed header size to amount for missing
355 // alignment requirements in our computation. 359 // alignment requirements in our computation.
356 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 360 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
357 static const size_t kHeaderSize = kMinHeaderSize; 361 static const size_t kHeaderSize = kMinHeaderSize;
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
422 426
423 size_t size() const { return size_; } 427 size_t size() const { return size_; }
424 void set_size(size_t size) { size_ = size; } 428 void set_size(size_t size) { size_ = size; }
425 429
426 inline Heap* heap() const { return heap_; } 430 inline Heap* heap() const { return heap_; }
427 431
428 inline SkipList* skip_list() { return skip_list_; } 432 inline SkipList* skip_list() { return skip_list_; }
429 433
430 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } 434 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
431 435
432 inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); } 436 template <RememberedSetType type>
433 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } 437 SlotSet* slot_set() {
434 inline TypedSlotSet* typed_old_to_new_slots() { 438 return slot_set_[type].Value();
435 return typed_old_to_new_slots_.Value();
436 } 439 }
437 inline TypedSlotSet* typed_old_to_old_slots() { 440
438 return typed_old_to_old_slots_; 441 template <RememberedSetType type>
442 TypedSlotSet* typed_slot_set() {
443 return typed_slot_set_[type].Value();
439 } 444 }
445
440 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } 446 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
441 447
442 V8_EXPORT_PRIVATE void AllocateOldToNewSlots(); 448 template <RememberedSetType type>
443 void ReleaseOldToNewSlots(); 449 SlotSet* AllocateSlotSet();
444 V8_EXPORT_PRIVATE void AllocateOldToOldSlots(); 450 template <RememberedSetType type>
445 void ReleaseOldToOldSlots(); 451 void ReleaseSlotSet();
446 void AllocateTypedOldToNewSlots(); 452 template <RememberedSetType type>
447 void ReleaseTypedOldToNewSlots(); 453 TypedSlotSet* AllocateTypedSlotSet();
448 void AllocateTypedOldToOldSlots(); 454 template <RememberedSetType type>
449 void ReleaseTypedOldToOldSlots(); 455 void ReleaseTypedSlotSet();
450 void AllocateLocalTracker(); 456 void AllocateLocalTracker();
451 void ReleaseLocalTracker(); 457 void ReleaseLocalTracker();
452 void AllocateYoungGenerationBitmap(); 458 void AllocateYoungGenerationBitmap();
453 void ReleaseYoungGenerationBitmap(); 459 void ReleaseYoungGenerationBitmap();
454 460
455 Address area_start() { return area_start_; } 461 Address area_start() { return area_start_; }
456 Address area_end() { return area_end_; } 462 Address area_end() { return area_end_; }
457 size_t area_size() { return static_cast<size_t>(area_end() - area_start()); } 463 size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
458 464
459 bool CommitArea(size_t requested); 465 bool CommitArea(size_t requested);
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
590 // Used by the incremental marker to keep track of the scanning progress in 596 // Used by the incremental marker to keep track of the scanning progress in
591 // large objects that have a progress bar and are scanned in increments. 597 // large objects that have a progress bar and are scanned in increments.
592 intptr_t progress_bar_; 598 intptr_t progress_bar_;
593 599
594 // Count of bytes marked black on page. 600 // Count of bytes marked black on page.
595 intptr_t live_byte_count_; 601 intptr_t live_byte_count_;
596 602
597 // A single slot set for small pages (of size kPageSize) or an array of slot 603 // A single slot set for small pages (of size kPageSize) or an array of slot
598 // set for large pages. In the latter case the number of entries in the array 604 // set for large pages. In the latter case the number of entries in the array
599 // is ceil(size() / kPageSize). 605 // is ceil(size() / kPageSize).
600 base::AtomicValue<SlotSet*> old_to_new_slots_; 606 base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
601 SlotSet* old_to_old_slots_; 607 base::AtomicValue<TypedSlotSet*>
602 base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_; 608 typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
603 TypedSlotSet* typed_old_to_old_slots_;
604 609
605 SkipList* skip_list_; 610 SkipList* skip_list_;
606 611
607 // Assuming the initial allocation on a page is sequential, 612 // Assuming the initial allocation on a page is sequential,
608 // count highest number of bytes ever allocated on the page. 613 // count highest number of bytes ever allocated on the page.
609 base::AtomicValue<intptr_t> high_water_mark_; 614 base::AtomicValue<intptr_t> high_water_mark_;
610 615
611 base::Mutex* mutex_; 616 base::Mutex* mutex_;
612 617
613 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; 618 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
(...skipping 2340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2954 PageIterator old_iterator_; 2959 PageIterator old_iterator_;
2955 PageIterator code_iterator_; 2960 PageIterator code_iterator_;
2956 PageIterator map_iterator_; 2961 PageIterator map_iterator_;
2957 LargePageIterator lo_iterator_; 2962 LargePageIterator lo_iterator_;
2958 }; 2963 };
2959 2964
2960 } // namespace internal 2965 } // namespace internal
2961 } // namespace v8 2966 } // namespace v8
2962 2967
2963 #endif // V8_HEAP_SPACES_H_ 2968 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/remembered-set.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698