OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <memory> | 9 #include <memory> |
10 #include <unordered_set> | 10 #include <unordered_set> |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
303 // not be performed on that page. Sweeper threads that are done with their | 303 // not be performed on that page. Sweeper threads that are done with their |
304 // work will set this value and not touch the page anymore. | 304 // work will set this value and not touch the page anymore. |
305 // |kSweepingPending|: This page is ready for parallel sweeping. | 305 // |kSweepingPending|: This page is ready for parallel sweeping. |
306 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 306 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
307 enum ConcurrentSweepingState { | 307 enum ConcurrentSweepingState { |
308 kSweepingDone, | 308 kSweepingDone, |
309 kSweepingPending, | 309 kSweepingPending, |
310 kSweepingInProgress, | 310 kSweepingInProgress, |
311 }; | 311 }; |
312 | 312 |
313 // Every n write barrier invocations we go to runtime even though | |
314 // we could have handled it in generated code. This lets us check | |
315 // whether we have hit the limit and should do some more marking. | |
316 static const int kWriteBarrierCounterGranularity = 500; | |
317 | |
318 static const intptr_t kAlignment = | 313 static const intptr_t kAlignment = |
319 (static_cast<uintptr_t>(1) << kPageSizeBits); | 314 (static_cast<uintptr_t>(1) << kPageSizeBits); |
320 | 315 |
321 static const intptr_t kAlignmentMask = kAlignment - 1; | 316 static const intptr_t kAlignmentMask = kAlignment - 1; |
322 | 317 |
323 static const intptr_t kSizeOffset = 0; | 318 static const intptr_t kSizeOffset = 0; |
324 | 319 |
325 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize; | 320 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize; |
326 | 321 |
327 static const size_t kWriteBarrierCounterOffset = | 322 static const size_t kMinHeaderSize = |
328 kSizeOffset + kPointerSize // size_t size | 323 kSizeOffset + kPointerSize // size_t size |
329 + kIntptrSize // Flags flags_ | 324 + kIntptrSize // Flags flags_ |
330 + kPointerSize // Address area_start_ | 325 + kPointerSize // Address area_start_ |
331 + kPointerSize // Address area_end_ | 326 + kPointerSize // Address area_end_ |
332 + 2 * kPointerSize // base::VirtualMemory reservation_ | 327 + 2 * kPointerSize // base::VirtualMemory reservation_ |
333 + kPointerSize // Address owner_ | 328 + kPointerSize // Address owner_ |
334 + kPointerSize // Heap* heap_ | 329 + kPointerSize // Heap* heap_ |
335 + kIntSize // int progress_bar_ | 330 + kIntSize // int progress_bar_ |
336 + kIntSize // int live_bytes_count_ | 331 + kIntSize // int live_bytes_count_ |
337 + kPointerSize // SlotSet* old_to_new_slots_; | 332 + kPointerSize // SlotSet* old_to_new_slots_ |
338 + kPointerSize // SlotSet* old_to_old_slots_; | 333 + kPointerSize // SlotSet* old_to_old_slots_ |
339 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_; | 334 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_ |
340 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; | 335 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_ |
341 + kPointerSize; // SkipList* skip_list_; | 336 + kPointerSize // SkipList* skip_list_ |
342 | 337 + kPointerSize // AtomicValue high_water_mark_ |
343 static const size_t kMinHeaderSize = | 338 + kPointerSize // base::Mutex* mutex_ |
344 kWriteBarrierCounterOffset + | 339 + kPointerSize // base::AtomicWord concurrent_sweeping_ |
345 kIntptrSize // intptr_t write_barrier_counter_ | 340 + 2 * kPointerSize // AtomicNumber free-list statistics |
346 + kPointerSize // AtomicValue high_water_mark_ | 341 + kPointerSize // AtomicValue next_chunk_ |
347 + kPointerSize // base::Mutex* mutex_ | 342 + kPointerSize // AtomicValue prev_chunk_ |
348 + kPointerSize // base::AtomicWord concurrent_sweeping_ | |
349 + 2 * kPointerSize // AtomicNumber free-list statistics | |
350 + kPointerSize // AtomicValue next_chunk_ | |
351 + kPointerSize // AtomicValue prev_chunk_ | |
352 // FreeListCategory categories_[kNumberOfCategories] | 343 // FreeListCategory categories_[kNumberOfCategories] |
353 + FreeListCategory::kSize * kNumberOfCategories + | 344 + FreeListCategory::kSize * kNumberOfCategories + |
354 kPointerSize // LocalArrayBufferTracker* local_tracker_ | 345 kPointerSize // LocalArrayBufferTracker* local_tracker_ |
355 // std::unordered_set<Address>* black_area_end_marker_map_ | 346 // std::unordered_set<Address>* black_area_end_marker_map_ |
356 + kPointerSize; | 347 + kPointerSize; |
357 | 348 |
358 // We add some more space to the computed header size to amount for missing | 349 // We add some more space to the computed header size to amount for missing |
359 // alignment requirements in our computation. | 350 // alignment requirements in our computation. |
360 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 351 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
361 static const size_t kHeaderSize = kMinHeaderSize; | 352 static const size_t kHeaderSize = kMinHeaderSize; |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
429 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); | 420 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
430 return live_byte_count_; | 421 return live_byte_count_; |
431 } | 422 } |
432 | 423 |
433 void SetLiveBytes(int live_bytes) { | 424 void SetLiveBytes(int live_bytes) { |
434 DCHECK_GE(live_bytes, 0); | 425 DCHECK_GE(live_bytes, 0); |
435 DCHECK_LE(static_cast<size_t>(live_bytes), size_); | 426 DCHECK_LE(static_cast<size_t>(live_bytes), size_); |
436 live_byte_count_ = live_bytes; | 427 live_byte_count_ = live_bytes; |
437 } | 428 } |
438 | 429 |
439 int write_barrier_counter() { | |
440 return static_cast<int>(write_barrier_counter_); | |
441 } | |
442 | |
443 void set_write_barrier_counter(int counter) { | |
444 write_barrier_counter_ = counter; | |
445 } | |
446 | |
447 size_t size() const { return size_; } | 430 size_t size() const { return size_; } |
448 void set_size(size_t size) { size_ = size; } | 431 void set_size(size_t size) { size_ = size; } |
449 | 432 |
450 inline Heap* heap() const { return heap_; } | 433 inline Heap* heap() const { return heap_; } |
451 | 434 |
452 inline SkipList* skip_list() { return skip_list_; } | 435 inline SkipList* skip_list() { return skip_list_; } |
453 | 436 |
454 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | 437 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
455 | 438 |
456 inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); } | 439 inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); } |
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
653 // A single slot set for small pages (of size kPageSize) or an array of slot | 636 // A single slot set for small pages (of size kPageSize) or an array of slot |
654 // set for large pages. In the latter case the number of entries in the array | 637 // set for large pages. In the latter case the number of entries in the array |
655 // is ceil(size() / kPageSize). | 638 // is ceil(size() / kPageSize). |
656 base::AtomicValue<SlotSet*> old_to_new_slots_; | 639 base::AtomicValue<SlotSet*> old_to_new_slots_; |
657 SlotSet* old_to_old_slots_; | 640 SlotSet* old_to_old_slots_; |
658 base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_; | 641 base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_; |
659 TypedSlotSet* typed_old_to_old_slots_; | 642 TypedSlotSet* typed_old_to_old_slots_; |
660 | 643 |
661 SkipList* skip_list_; | 644 SkipList* skip_list_; |
662 | 645 |
663 intptr_t write_barrier_counter_; | |
664 | |
665 // Assuming the initial allocation on a page is sequential, | 646 // Assuming the initial allocation on a page is sequential, |
666 // count highest number of bytes ever allocated on the page. | 647 // count highest number of bytes ever allocated on the page. |
667 base::AtomicValue<intptr_t> high_water_mark_; | 648 base::AtomicValue<intptr_t> high_water_mark_; |
668 | 649 |
669 base::Mutex* mutex_; | 650 base::Mutex* mutex_; |
670 | 651 |
671 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; | 652 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
672 | 653 |
673 // PagedSpace free-list statistics. | 654 // PagedSpace free-list statistics. |
674 base::AtomicNumber<intptr_t> available_in_free_list_; | 655 base::AtomicNumber<intptr_t> available_in_free_list_; |
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
966 size_t committed_; | 947 size_t committed_; |
967 size_t max_committed_; | 948 size_t max_committed_; |
968 | 949 |
969 DISALLOW_COPY_AND_ASSIGN(Space); | 950 DISALLOW_COPY_AND_ASSIGN(Space); |
970 }; | 951 }; |
971 | 952 |
972 | 953 |
973 class MemoryChunkValidator { | 954 class MemoryChunkValidator { |
974 // Computed offsets should match the compiler generated ones. | 955 // Computed offsets should match the compiler generated ones. |
975 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); | 956 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
976 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == | |
977 offsetof(MemoryChunk, write_barrier_counter_)); | |
978 | 957 |
979 // Validate our estimates on the header size. | 958 // Validate our estimates on the header size. |
980 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 959 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
981 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | 960 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
982 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | 961 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
983 }; | 962 }; |
984 | 963 |
985 | 964 |
986 // ---------------------------------------------------------------------------- | 965 // ---------------------------------------------------------------------------- |
987 // All heap objects containing executable code (code objects) must be allocated | 966 // All heap objects containing executable code (code objects) must be allocated |
(...skipping 1959 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2947 PageIterator old_iterator_; | 2926 PageIterator old_iterator_; |
2948 PageIterator code_iterator_; | 2927 PageIterator code_iterator_; |
2949 PageIterator map_iterator_; | 2928 PageIterator map_iterator_; |
2950 LargePageIterator lo_iterator_; | 2929 LargePageIterator lo_iterator_; |
2951 }; | 2930 }; |
2952 | 2931 |
2953 } // namespace internal | 2932 } // namespace internal |
2954 } // namespace v8 | 2933 } // namespace v8 |
2955 | 2934 |
2956 #endif // V8_HEAP_SPACES_H_ | 2935 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |