| OLD | NEW | 
|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ | 
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ | 
| 7 | 7 | 
| 8 #include <list> | 8 #include <list> | 
| 9 | 9 | 
| 10 #include "src/allocation.h" | 10 #include "src/allocation.h" | 
| 11 #include "src/base/atomic-utils.h" | 11 #include "src/base/atomic-utils.h" | 
| 12 #include "src/base/atomicops.h" | 12 #include "src/base/atomicops.h" | 
| 13 #include "src/base/bits.h" | 13 #include "src/base/bits.h" | 
| 14 #include "src/base/platform/mutex.h" | 14 #include "src/base/platform/mutex.h" | 
| 15 #include "src/flags.h" | 15 #include "src/flags.h" | 
| 16 #include "src/hashmap.h" | 16 #include "src/hashmap.h" | 
| 17 #include "src/heap/array-buffer-tracker.h" |  | 
| 18 #include "src/list.h" | 17 #include "src/list.h" | 
| 19 #include "src/objects.h" | 18 #include "src/objects.h" | 
| 20 #include "src/utils.h" | 19 #include "src/utils.h" | 
| 21 | 20 | 
| 22 namespace v8 { | 21 namespace v8 { | 
| 23 namespace internal { | 22 namespace internal { | 
| 24 | 23 | 
| 25 class AllocationInfo; | 24 class AllocationInfo; | 
| 26 class AllocationObserver; | 25 class AllocationObserver; | 
| 27 class CompactionSpace; | 26 class CompactionSpace; | 
| (...skipping 434 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 462   //   not be performed on that page. Sweeper threads that are done with their | 461   //   not be performed on that page. Sweeper threads that are done with their | 
| 463   //   work will set this value and not touch the page anymore. | 462   //   work will set this value and not touch the page anymore. | 
| 464   // |kSweepingPending|: This page is ready for parallel sweeping. | 463   // |kSweepingPending|: This page is ready for parallel sweeping. | 
| 465   // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 464   // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 
| 466   enum ConcurrentSweepingState { | 465   enum ConcurrentSweepingState { | 
| 467     kSweepingDone, | 466     kSweepingDone, | 
| 468     kSweepingPending, | 467     kSweepingPending, | 
| 469     kSweepingInProgress, | 468     kSweepingInProgress, | 
| 470   }; | 469   }; | 
| 471 | 470 | 
| 472   enum ArrayBufferTrackerAccessMode { kDontCreate, kCreateIfNotPresent }; |  | 
| 473 |  | 
| 474   // Every n write barrier invocations we go to runtime even though | 471   // Every n write barrier invocations we go to runtime even though | 
| 475   // we could have handled it in generated code.  This lets us check | 472   // we could have handled it in generated code.  This lets us check | 
| 476   // whether we have hit the limit and should do some more marking. | 473   // whether we have hit the limit and should do some more marking. | 
| 477   static const int kWriteBarrierCounterGranularity = 500; | 474   static const int kWriteBarrierCounterGranularity = 500; | 
| 478 | 475 | 
| 479   static const int kPointersToHereAreInterestingMask = | 476   static const int kPointersToHereAreInterestingMask = | 
| 480       1 << POINTERS_TO_HERE_ARE_INTERESTING; | 477       1 << POINTERS_TO_HERE_ARE_INTERESTING; | 
| 481 | 478 | 
| 482   static const int kPointersFromHereAreInterestingMask = | 479   static const int kPointersFromHereAreInterestingMask = | 
| 483       1 << POINTERS_FROM_HERE_ARE_INTERESTING; | 480       1 << POINTERS_FROM_HERE_ARE_INTERESTING; | 
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 519   static const size_t kMinHeaderSize = | 516   static const size_t kMinHeaderSize = | 
| 520       kWriteBarrierCounterOffset + | 517       kWriteBarrierCounterOffset + | 
| 521       kIntptrSize         // intptr_t write_barrier_counter_ | 518       kIntptrSize         // intptr_t write_barrier_counter_ | 
| 522       + kPointerSize      // AtomicValue high_water_mark_ | 519       + kPointerSize      // AtomicValue high_water_mark_ | 
| 523       + kPointerSize      // base::Mutex* mutex_ | 520       + kPointerSize      // base::Mutex* mutex_ | 
| 524       + kPointerSize      // base::AtomicWord concurrent_sweeping_ | 521       + kPointerSize      // base::AtomicWord concurrent_sweeping_ | 
| 525       + 2 * kPointerSize  // AtomicNumber free-list statistics | 522       + 2 * kPointerSize  // AtomicNumber free-list statistics | 
| 526       + kPointerSize      // AtomicValue next_chunk_ | 523       + kPointerSize      // AtomicValue next_chunk_ | 
| 527       + kPointerSize      // AtomicValue prev_chunk_ | 524       + kPointerSize      // AtomicValue prev_chunk_ | 
| 528       // FreeListCategory categories_[kNumberOfCategories] | 525       // FreeListCategory categories_[kNumberOfCategories] | 
| 529       + FreeListCategory::kSize * kNumberOfCategories + | 526       + FreeListCategory::kSize * kNumberOfCategories; | 
| 530       kPointerSize;  // LocalArrayBufferTracker tracker_ |  | 
| 531 | 527 | 
| 532   // We add some more space to the computed header size to amount for missing | 528   // We add some more space to the computed header size to amount for missing | 
| 533   // alignment requirements in our computation. | 529   // alignment requirements in our computation. | 
| 534   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 530   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 
| 535   static const size_t kHeaderSize = kMinHeaderSize; | 531   static const size_t kHeaderSize = kMinHeaderSize; | 
| 536 | 532 | 
| 537   static const int kBodyOffset = | 533   static const int kBodyOffset = | 
| 538       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 534       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 
| 539 | 535 | 
| 540   // The start offset of the object area in a page. Aligned to both maps and | 536   // The start offset of the object area in a page. Aligned to both maps and | 
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 639 | 635 | 
| 640   void AllocateOldToNewSlots(); | 636   void AllocateOldToNewSlots(); | 
| 641   void ReleaseOldToNewSlots(); | 637   void ReleaseOldToNewSlots(); | 
| 642   void AllocateOldToOldSlots(); | 638   void AllocateOldToOldSlots(); | 
| 643   void ReleaseOldToOldSlots(); | 639   void ReleaseOldToOldSlots(); | 
| 644   void AllocateTypedOldToNewSlots(); | 640   void AllocateTypedOldToNewSlots(); | 
| 645   void ReleaseTypedOldToNewSlots(); | 641   void ReleaseTypedOldToNewSlots(); | 
| 646   void AllocateTypedOldToOldSlots(); | 642   void AllocateTypedOldToOldSlots(); | 
| 647   void ReleaseTypedOldToOldSlots(); | 643   void ReleaseTypedOldToOldSlots(); | 
| 648 | 644 | 
| 649   template <ArrayBufferTrackerAccessMode tracker_access> |  | 
| 650   inline LocalArrayBufferTracker* local_tracker() { |  | 
| 651     LocalArrayBufferTracker* tracker = local_tracker_.Value(); |  | 
| 652     if (tracker == nullptr && tracker_access == kCreateIfNotPresent) { |  | 
| 653       tracker = new LocalArrayBufferTracker(heap_); |  | 
| 654       if (!local_tracker_.TrySetValue(nullptr, tracker)) { |  | 
| 655         tracker = local_tracker_.Value(); |  | 
| 656       } |  | 
| 657       DCHECK_NOT_NULL(tracker); |  | 
| 658     } |  | 
| 659     return tracker; |  | 
| 660   } |  | 
| 661 |  | 
| 662   void ReleaseLocalTracker(); |  | 
| 663 |  | 
| 664   Address area_start() { return area_start_; } | 645   Address area_start() { return area_start_; } | 
| 665   Address area_end() { return area_end_; } | 646   Address area_end() { return area_end_; } | 
| 666   int area_size() { return static_cast<int>(area_end() - area_start()); } | 647   int area_size() { return static_cast<int>(area_end() - area_start()); } | 
| 667 | 648 | 
| 668   bool CommitArea(size_t requested); | 649   bool CommitArea(size_t requested); | 
| 669 | 650 | 
| 670   // Approximate amount of physical memory committed for this chunk. | 651   // Approximate amount of physical memory committed for this chunk. | 
| 671   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 652   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 
| 672 | 653 | 
| 673   int progress_bar() { | 654   int progress_bar() { | 
| (...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 836   base::AtomicNumber<intptr_t> available_in_free_list_; | 817   base::AtomicNumber<intptr_t> available_in_free_list_; | 
| 837   base::AtomicNumber<intptr_t> wasted_memory_; | 818   base::AtomicNumber<intptr_t> wasted_memory_; | 
| 838 | 819 | 
| 839   // next_chunk_ holds a pointer of type MemoryChunk | 820   // next_chunk_ holds a pointer of type MemoryChunk | 
| 840   base::AtomicValue<MemoryChunk*> next_chunk_; | 821   base::AtomicValue<MemoryChunk*> next_chunk_; | 
| 841   // prev_chunk_ holds a pointer of type MemoryChunk | 822   // prev_chunk_ holds a pointer of type MemoryChunk | 
| 842   base::AtomicValue<MemoryChunk*> prev_chunk_; | 823   base::AtomicValue<MemoryChunk*> prev_chunk_; | 
| 843 | 824 | 
| 844   FreeListCategory categories_[kNumberOfCategories]; | 825   FreeListCategory categories_[kNumberOfCategories]; | 
| 845 | 826 | 
| 846   base::AtomicValue<LocalArrayBufferTracker*> local_tracker_; |  | 
| 847 |  | 
| 848  private: | 827  private: | 
| 849   void InitializeReservedMemory() { reservation_.Reset(); } | 828   void InitializeReservedMemory() { reservation_.Reset(); } | 
| 850 | 829 | 
| 851   friend class MemoryAllocator; | 830   friend class MemoryAllocator; | 
| 852   friend class MemoryChunkValidator; | 831   friend class MemoryChunkValidator; | 
| 853 }; | 832 }; | 
| 854 | 833 | 
| 855 // ----------------------------------------------------------------------------- | 834 // ----------------------------------------------------------------------------- | 
| 856 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 835 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 
| 857 // | 836 // | 
| (...skipping 1456 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2314   // sweeper. | 2293   // sweeper. | 
| 2315   virtual void RefillFreeList(); | 2294   virtual void RefillFreeList(); | 
| 2316 | 2295 | 
| 2317   FreeList* free_list() { return &free_list_; } | 2296   FreeList* free_list() { return &free_list_; } | 
| 2318 | 2297 | 
| 2319   base::Mutex* mutex() { return &space_mutex_; } | 2298   base::Mutex* mutex() { return &space_mutex_; } | 
| 2320 | 2299 | 
| 2321   inline void UnlinkFreeListCategories(Page* page); | 2300   inline void UnlinkFreeListCategories(Page* page); | 
| 2322   inline intptr_t RelinkFreeListCategories(Page* page); | 2301   inline intptr_t RelinkFreeListCategories(Page* page); | 
| 2323 | 2302 | 
| 2324   // Callback signature: |  | 
| 2325   //   void Callback(Page*); |  | 
| 2326   template <typename Callback> |  | 
| 2327   void ForAllPages(Callback callback) { |  | 
| 2328     PageIterator it(this); |  | 
| 2329     while (it.has_next()) { |  | 
| 2330       callback(it.next()); |  | 
| 2331     } |  | 
| 2332   } |  | 
| 2333 |  | 
| 2334  protected: | 2303  protected: | 
| 2335   // PagedSpaces that should be included in snapshots have different, i.e., | 2304   // PagedSpaces that should be included in snapshots have different, i.e., | 
| 2336   // smaller, initial pages. | 2305   // smaller, initial pages. | 
| 2337   virtual bool snapshotable() { return true; } | 2306   virtual bool snapshotable() { return true; } | 
| 2338 | 2307 | 
| 2339   bool HasPages() { return anchor_.next_page() != &anchor_; } | 2308   bool HasPages() { return anchor_.next_page() != &anchor_; } | 
| 2340 | 2309 | 
| 2341   // Cleans up the space, frees all pages in this space except those belonging | 2310   // Cleans up the space, frees all pages in this space except those belonging | 
| 2342   // to the initial chunk, uncommits addresses in the initial chunk. | 2311   // to the initial chunk, uncommits addresses in the initial chunk. | 
| 2343   void TearDown(); | 2312   void TearDown(); | 
| (...skipping 843 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3187     count = 0; | 3156     count = 0; | 
| 3188   } | 3157   } | 
| 3189   // Must be small, since an iteration is used for lookup. | 3158   // Must be small, since an iteration is used for lookup. | 
| 3190   static const int kMaxComments = 64; | 3159   static const int kMaxComments = 64; | 
| 3191 }; | 3160 }; | 
| 3192 #endif | 3161 #endif | 
| 3193 }  // namespace internal | 3162 }  // namespace internal | 
| 3194 }  // namespace v8 | 3163 }  // namespace v8 | 
| 3195 | 3164 | 
| 3196 #endif  // V8_HEAP_SPACES_H_ | 3165 #endif  // V8_HEAP_SPACES_H_ | 
| OLD | NEW | 
|---|