Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/spaces.h

Issue 2773093002: Revert "[heap] Make SlotSet allocation thread-safe and refactor code." (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/remembered-set.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set> 10 #include <unordered_set>
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 kHuge, 124 kHuge,
125 125
126 kFirstCategory = kTiniest, 126 kFirstCategory = kTiniest,
127 kLastCategory = kHuge, 127 kLastCategory = kHuge,
128 kNumberOfCategories = kLastCategory + 1, 128 kNumberOfCategories = kLastCategory + 1,
129 kInvalidCategory 129 kInvalidCategory
130 }; 130 };
131 131
132 enum FreeMode { kLinkCategory, kDoNotLinkCategory }; 132 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
133 133
134 enum RememberedSetType {
135 OLD_TO_NEW,
136 OLD_TO_OLD,
137 NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
138 };
139
140 // A free list category maintains a linked list of free memory blocks. 134 // A free list category maintains a linked list of free memory blocks.
141 class FreeListCategory { 135 class FreeListCategory {
142 public: 136 public:
143 static const int kSize = kIntSize + // FreeListCategoryType type_ 137 static const int kSize = kIntSize + // FreeListCategoryType type_
144 kIntSize + // padding for type_ 138 kIntSize + // padding for type_
145 kSizetSize + // size_t available_ 139 kSizetSize + // size_t available_
146 kPointerSize + // FreeSpace* top_ 140 kPointerSize + // FreeSpace* top_
147 kPointerSize + // FreeListCategory* prev_ 141 kPointerSize + // FreeListCategory* prev_
148 kPointerSize; // FreeListCategory* next_ 142 kPointerSize; // FreeListCategory* next_
149 143
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 kSizeOffset // NOLINT 331 kSizeOffset // NOLINT
338 + kSizetSize // size_t size 332 + kSizetSize // size_t size
339 + kIntptrSize // Flags flags_ 333 + kIntptrSize // Flags flags_
340 + kPointerSize // Address area_start_ 334 + kPointerSize // Address area_start_
341 + kPointerSize // Address area_end_ 335 + kPointerSize // Address area_end_
342 + 2 * kPointerSize // base::VirtualMemory reservation_ 336 + 2 * kPointerSize // base::VirtualMemory reservation_
343 + kPointerSize // Address owner_ 337 + kPointerSize // Address owner_
344 + kPointerSize // Heap* heap_ 338 + kPointerSize // Heap* heap_
345 + kIntptrSize // intptr_t progress_bar_ 339 + kIntptrSize // intptr_t progress_bar_
346 + kIntptrSize // intptr_t live_byte_count_ 340 + kIntptrSize // intptr_t live_byte_count_
347 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array 341 + kPointerSize // SlotSet* old_to_new_slots_
348 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array 342 + kPointerSize // SlotSet* old_to_old_slots_
349 + kPointerSize // SkipList* skip_list_ 343 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_
350 + kPointerSize // AtomicValue high_water_mark_ 344 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_
351 + kPointerSize // base::Mutex* mutex_ 345 + kPointerSize // SkipList* skip_list_
352 + kPointerSize // base::AtomicWord concurrent_sweeping_ 346 + kPointerSize // AtomicValue high_water_mark_
353 + 2 * kSizetSize // AtomicNumber free-list statistics 347 + kPointerSize // base::Mutex* mutex_
354 + kPointerSize // AtomicValue next_chunk_ 348 + kPointerSize // base::AtomicWord concurrent_sweeping_
355 + kPointerSize // AtomicValue prev_chunk_ 349 + 2 * kSizetSize // AtomicNumber free-list statistics
350 + kPointerSize // AtomicValue next_chunk_
351 + kPointerSize // AtomicValue prev_chunk_
356 + FreeListCategory::kSize * kNumberOfCategories 352 + FreeListCategory::kSize * kNumberOfCategories
357 // FreeListCategory categories_[kNumberOfCategories] 353 // FreeListCategory categories_[kNumberOfCategories]
358 + kPointerSize // LocalArrayBufferTracker* local_tracker_ 354 + kPointerSize // LocalArrayBufferTracker* local_tracker_
359 + kIntptrSize // intptr_t young_generation_live_byte_count_ 355 + kIntptrSize // intptr_t young_generation_live_byte_count_
360 + kPointerSize; // Bitmap* young_generation_bitmap_ 356 + kPointerSize; // Bitmap* young_generation_bitmap_
361 357
362 // We add some more space to the computed header size to amount for missing 358 // We add some more space to the computed header size to amount for missing
363 // alignment requirements in our computation. 359 // alignment requirements in our computation.
364 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 360 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
365 static const size_t kHeaderSize = kMinHeaderSize; 361 static const size_t kHeaderSize = kMinHeaderSize;
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
460 456
461 size_t size() const { return size_; } 457 size_t size() const { return size_; }
462 void set_size(size_t size) { size_ = size; } 458 void set_size(size_t size) { size_ = size; }
463 459
464 inline Heap* heap() const { return heap_; } 460 inline Heap* heap() const { return heap_; }
465 461
466 inline SkipList* skip_list() { return skip_list_; } 462 inline SkipList* skip_list() { return skip_list_; }
467 463
468 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } 464 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
469 465
470 template <RememberedSetType type> 466 inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
471 SlotSet* slot_set() { 467 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
472 return slot_set_[type].Value(); 468 inline TypedSlotSet* typed_old_to_new_slots() {
469 return typed_old_to_new_slots_.Value();
473 } 470 }
474 471 inline TypedSlotSet* typed_old_to_old_slots() {
475 template <RememberedSetType type> 472 return typed_old_to_old_slots_;
476 TypedSlotSet* typed_slot_set() {
477 return typed_slot_set_[type].Value();
478 } 473 }
479
480 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; } 474 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
481 475
482 template <RememberedSetType type> 476 V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
483 SlotSet* AllocateSlotSet(); 477 void ReleaseOldToNewSlots();
484 template <RememberedSetType type> 478 V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
485 void ReleaseSlotSet(); 479 void ReleaseOldToOldSlots();
486 template <RememberedSetType type> 480 void AllocateTypedOldToNewSlots();
487 TypedSlotSet* AllocateTypedSlotSet(); 481 void ReleaseTypedOldToNewSlots();
488 template <RememberedSetType type> 482 void AllocateTypedOldToOldSlots();
489 void ReleaseTypedSlotSet(); 483 void ReleaseTypedOldToOldSlots();
490 void AllocateLocalTracker(); 484 void AllocateLocalTracker();
491 void ReleaseLocalTracker(); 485 void ReleaseLocalTracker();
492 void AllocateYoungGenerationBitmap(); 486 void AllocateYoungGenerationBitmap();
493 void ReleaseYoungGenerationBitmap(); 487 void ReleaseYoungGenerationBitmap();
494 488
495 Address area_start() { return area_start_; } 489 Address area_start() { return area_start_; }
496 Address area_end() { return area_end_; } 490 Address area_end() { return area_end_; }
497 size_t area_size() { return static_cast<size_t>(area_end() - area_start()); } 491 size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
498 492
499 bool CommitArea(size_t requested); 493 bool CommitArea(size_t requested);
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 // Used by the incremental marker to keep track of the scanning progress in 645 // Used by the incremental marker to keep track of the scanning progress in
652 // large objects that have a progress bar and are scanned in increments. 646 // large objects that have a progress bar and are scanned in increments.
653 intptr_t progress_bar_; 647 intptr_t progress_bar_;
654 648
655 // Count of bytes marked black on page. 649 // Count of bytes marked black on page.
656 intptr_t live_byte_count_; 650 intptr_t live_byte_count_;
657 651
658 // A single slot set for small pages (of size kPageSize) or an array of slot 652 // A single slot set for small pages (of size kPageSize) or an array of slot
659 // set for large pages. In the latter case the number of entries in the array 653 // set for large pages. In the latter case the number of entries in the array
660 // is ceil(size() / kPageSize). 654 // is ceil(size() / kPageSize).
661 base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; 655 base::AtomicValue<SlotSet*> old_to_new_slots_;
662 base::AtomicValue<TypedSlotSet*> 656 SlotSet* old_to_old_slots_;
663 typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; 657 base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
658 TypedSlotSet* typed_old_to_old_slots_;
664 659
665 SkipList* skip_list_; 660 SkipList* skip_list_;
666 661
667 // Assuming the initial allocation on a page is sequential, 662 // Assuming the initial allocation on a page is sequential,
668 // count highest number of bytes ever allocated on the page. 663 // count highest number of bytes ever allocated on the page.
669 base::AtomicValue<intptr_t> high_water_mark_; 664 base::AtomicValue<intptr_t> high_water_mark_;
670 665
671 base::Mutex* mutex_; 666 base::Mutex* mutex_;
672 667
673 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; 668 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
(...skipping 2295 matching lines...) Expand 10 before | Expand all | Expand 10 after
2969 PageIterator old_iterator_; 2964 PageIterator old_iterator_;
2970 PageIterator code_iterator_; 2965 PageIterator code_iterator_;
2971 PageIterator map_iterator_; 2966 PageIterator map_iterator_;
2972 LargePageIterator lo_iterator_; 2967 LargePageIterator lo_iterator_;
2973 }; 2968 };
2974 2969
2975 } // namespace internal 2970 } // namespace internal
2976 } // namespace v8 2971 } // namespace v8
2977 2972
2978 #endif // V8_HEAP_SPACES_H_ 2973 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/remembered-set.h ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698