Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(506)

Side by Side Diff: src/heap/spaces.h

Issue 2024063002: Reland "[heap] Fine-grained JSArrayBuffer tracking" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: bugfix and test added Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 9
10 #include "src/allocation.h" 10 #include "src/allocation.h"
11 #include "src/base/atomic-utils.h" 11 #include "src/base/atomic-utils.h"
12 #include "src/base/atomicops.h" 12 #include "src/base/atomicops.h"
13 #include "src/base/bits.h" 13 #include "src/base/bits.h"
14 #include "src/base/platform/mutex.h" 14 #include "src/base/platform/mutex.h"
15 #include "src/flags.h" 15 #include "src/flags.h"
16 #include "src/hashmap.h" 16 #include "src/hashmap.h"
17 #include "src/heap/array-buffer-tracker.h"
17 #include "src/list.h" 18 #include "src/list.h"
18 #include "src/objects.h" 19 #include "src/objects.h"
19 #include "src/utils.h" 20 #include "src/utils.h"
20 21
21 namespace v8 { 22 namespace v8 {
22 namespace internal { 23 namespace internal {
23 24
24 class AllocationInfo; 25 class AllocationInfo;
25 class AllocationObserver; 26 class AllocationObserver;
26 class CompactionSpace; 27 class CompactionSpace;
(...skipping 434 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 // not be performed on that page. Sweeper threads that are done with their 462 // not be performed on that page. Sweeper threads that are done with their
462 // work will set this value and not touch the page anymore. 463 // work will set this value and not touch the page anymore.
463 // |kSweepingPending|: This page is ready for parallel sweeping. 464 // |kSweepingPending|: This page is ready for parallel sweeping.
464 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. 465 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
465 enum ConcurrentSweepingState { 466 enum ConcurrentSweepingState {
466 kSweepingDone, 467 kSweepingDone,
467 kSweepingPending, 468 kSweepingPending,
468 kSweepingInProgress, 469 kSweepingInProgress,
469 }; 470 };
470 471
472 enum ArrayBufferTrackerAccessMode { kDontCreate, kCreateIfNotPresent };
473
471 // Every n write barrier invocations we go to runtime even though 474 // Every n write barrier invocations we go to runtime even though
472 // we could have handled it in generated code. This lets us check 475 // we could have handled it in generated code. This lets us check
473 // whether we have hit the limit and should do some more marking. 476 // whether we have hit the limit and should do some more marking.
474 static const int kWriteBarrierCounterGranularity = 500; 477 static const int kWriteBarrierCounterGranularity = 500;
475 478
476 static const int kPointersToHereAreInterestingMask = 479 static const int kPointersToHereAreInterestingMask =
477 1 << POINTERS_TO_HERE_ARE_INTERESTING; 480 1 << POINTERS_TO_HERE_ARE_INTERESTING;
478 481
479 static const int kPointersFromHereAreInterestingMask = 482 static const int kPointersFromHereAreInterestingMask =
480 1 << POINTERS_FROM_HERE_ARE_INTERESTING; 483 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
516 static const size_t kMinHeaderSize = 519 static const size_t kMinHeaderSize =
517 kWriteBarrierCounterOffset + 520 kWriteBarrierCounterOffset +
518 kIntptrSize // intptr_t write_barrier_counter_ 521 kIntptrSize // intptr_t write_barrier_counter_
519 + kPointerSize // AtomicValue high_water_mark_ 522 + kPointerSize // AtomicValue high_water_mark_
520 + kPointerSize // base::Mutex* mutex_ 523 + kPointerSize // base::Mutex* mutex_
521 + kPointerSize // base::AtomicWord concurrent_sweeping_ 524 + kPointerSize // base::AtomicWord concurrent_sweeping_
522 + 2 * kPointerSize // AtomicNumber free-list statistics 525 + 2 * kPointerSize // AtomicNumber free-list statistics
523 + kPointerSize // AtomicValue next_chunk_ 526 + kPointerSize // AtomicValue next_chunk_
524 + kPointerSize // AtomicValue prev_chunk_ 527 + kPointerSize // AtomicValue prev_chunk_
525 // FreeListCategory categories_[kNumberOfCategories] 528 // FreeListCategory categories_[kNumberOfCategories]
526 + FreeListCategory::kSize * kNumberOfCategories; 529 + FreeListCategory::kSize * kNumberOfCategories +
530 kPointerSize; // LocalArrayBufferTracker tracker_
527 531
528 // We add some more space to the computed header size to amount for missing 532 // We add some more space to the computed header size to amount for missing
529 // alignment requirements in our computation. 533 // alignment requirements in our computation.
530 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 534 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
531 static const size_t kHeaderSize = kMinHeaderSize; 535 static const size_t kHeaderSize = kMinHeaderSize;
532 536
533 static const int kBodyOffset = 537 static const int kBodyOffset =
534 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 538 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
535 539
536 // The start offset of the object area in a page. Aligned to both maps and 540 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
635 639
636 void AllocateOldToNewSlots(); 640 void AllocateOldToNewSlots();
637 void ReleaseOldToNewSlots(); 641 void ReleaseOldToNewSlots();
638 void AllocateOldToOldSlots(); 642 void AllocateOldToOldSlots();
639 void ReleaseOldToOldSlots(); 643 void ReleaseOldToOldSlots();
640 void AllocateTypedOldToNewSlots(); 644 void AllocateTypedOldToNewSlots();
641 void ReleaseTypedOldToNewSlots(); 645 void ReleaseTypedOldToNewSlots();
642 void AllocateTypedOldToOldSlots(); 646 void AllocateTypedOldToOldSlots();
643 void ReleaseTypedOldToOldSlots(); 647 void ReleaseTypedOldToOldSlots();
644 648
649 template <ArrayBufferTrackerAccessMode tracker_access>
650 inline LocalArrayBufferTracker* local_tracker() {
651 LocalArrayBufferTracker* tracker = local_tracker_.Value();
652 if (tracker == nullptr && tracker_access == kCreateIfNotPresent) {
653 tracker = new LocalArrayBufferTracker(heap_);
654 if (!local_tracker_.TrySetValue(nullptr, tracker)) {
655 tracker = local_tracker_.Value();
656 }
657 DCHECK_NOT_NULL(tracker);
658 }
659 return tracker;
660 }
661
662 void ReleaseLocalTracker();
663
645 Address area_start() { return area_start_; } 664 Address area_start() { return area_start_; }
646 Address area_end() { return area_end_; } 665 Address area_end() { return area_end_; }
647 int area_size() { return static_cast<int>(area_end() - area_start()); } 666 int area_size() { return static_cast<int>(area_end() - area_start()); }
648 667
649 bool CommitArea(size_t requested); 668 bool CommitArea(size_t requested);
650 669
651 // Approximate amount of physical memory committed for this chunk. 670 // Approximate amount of physical memory committed for this chunk.
652 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } 671 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
653 672
654 Address HighWaterMark() { return address() + high_water_mark_.Value(); } 673 Address HighWaterMark() { return address() + high_water_mark_.Value(); }
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
820 base::AtomicNumber<intptr_t> available_in_free_list_; 839 base::AtomicNumber<intptr_t> available_in_free_list_;
821 base::AtomicNumber<intptr_t> wasted_memory_; 840 base::AtomicNumber<intptr_t> wasted_memory_;
822 841
823 // next_chunk_ holds a pointer of type MemoryChunk 842 // next_chunk_ holds a pointer of type MemoryChunk
824 base::AtomicValue<MemoryChunk*> next_chunk_; 843 base::AtomicValue<MemoryChunk*> next_chunk_;
825 // prev_chunk_ holds a pointer of type MemoryChunk 844 // prev_chunk_ holds a pointer of type MemoryChunk
826 base::AtomicValue<MemoryChunk*> prev_chunk_; 845 base::AtomicValue<MemoryChunk*> prev_chunk_;
827 846
828 FreeListCategory categories_[kNumberOfCategories]; 847 FreeListCategory categories_[kNumberOfCategories];
829 848
849 base::AtomicValue<LocalArrayBufferTracker*> local_tracker_;
850
830 private: 851 private:
831 void InitializeReservedMemory() { reservation_.Reset(); } 852 void InitializeReservedMemory() { reservation_.Reset(); }
832 853
833 friend class MemoryAllocator; 854 friend class MemoryAllocator;
834 friend class MemoryChunkValidator; 855 friend class MemoryChunkValidator;
835 }; 856 };
836 857
837 // ----------------------------------------------------------------------------- 858 // -----------------------------------------------------------------------------
838 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 859 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
839 // 860 //
(...skipping 1433 matching lines...) Expand 10 before | Expand all | Expand 10 after
2273 // sweeper. 2294 // sweeper.
2274 virtual void RefillFreeList(); 2295 virtual void RefillFreeList();
2275 2296
2276 FreeList* free_list() { return &free_list_; } 2297 FreeList* free_list() { return &free_list_; }
2277 2298
2278 base::Mutex* mutex() { return &space_mutex_; } 2299 base::Mutex* mutex() { return &space_mutex_; }
2279 2300
2280 inline void UnlinkFreeListCategories(Page* page); 2301 inline void UnlinkFreeListCategories(Page* page);
2281 inline intptr_t RelinkFreeListCategories(Page* page); 2302 inline intptr_t RelinkFreeListCategories(Page* page);
2282 2303
2304 // Callback signature:
2305 // void Callback(Page*);
2306 template <typename Callback>
2307 void ForAllPages(Callback callback) {
2308 PageIterator it(this);
2309 while (it.has_next()) {
2310 callback(it.next());
2311 }
2312 }
2313
2283 protected: 2314 protected:
2284 // PagedSpaces that should be included in snapshots have different, i.e., 2315 // PagedSpaces that should be included in snapshots have different, i.e.,
2285 // smaller, initial pages. 2316 // smaller, initial pages.
2286 virtual bool snapshotable() { return true; } 2317 virtual bool snapshotable() { return true; }
2287 2318
2288 bool HasPages() { return anchor_.next_page() != &anchor_; } 2319 bool HasPages() { return anchor_.next_page() != &anchor_; }
2289 2320
2290 // Cleans up the space, frees all pages in this space except those belonging 2321 // Cleans up the space, frees all pages in this space except those belonging
2291 // to the initial chunk, uncommits addresses in the initial chunk. 2322 // to the initial chunk, uncommits addresses in the initial chunk.
2292 void TearDown(); 2323 void TearDown();
(...skipping 843 matching lines...) Expand 10 before | Expand all | Expand 10 after
3136 count = 0; 3167 count = 0;
3137 } 3168 }
3138 // Must be small, since an iteration is used for lookup. 3169 // Must be small, since an iteration is used for lookup.
3139 static const int kMaxComments = 64; 3170 static const int kMaxComments = 64;
3140 }; 3171 };
3141 #endif 3172 #endif
3142 } // namespace internal 3173 } // namespace internal
3143 } // namespace v8 3174 } // namespace v8
3144 3175
3145 #endif // V8_HEAP_SPACES_H_ 3176 #endif // V8_HEAP_SPACES_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698