| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 274 // be finalized. | 274 // be finalized. |
| 275 // |kCompactingAborted|: Parallel compaction has been aborted, which should | 275 // |kCompactingAborted|: Parallel compaction has been aborted, which should |
| 276 // for now only happen in OOM scenarios. | 276 // for now only happen in OOM scenarios. |
| 277 enum ParallelCompactingState { | 277 enum ParallelCompactingState { |
| 278 kCompactingDone, | 278 kCompactingDone, |
| 279 kCompactingInProgress, | 279 kCompactingInProgress, |
| 280 kCompactingFinalize, | 280 kCompactingFinalize, |
| 281 kCompactingAborted, | 281 kCompactingAborted, |
| 282 }; | 282 }; |
| 283 | 283 |
| 284 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
| 285 // not be performed on that page. |
| 286 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will |
| 287 // not touch the page memory anymore. |
| 288 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
| 289 // |kSweepingPending|: This page is ready for parallel sweeping. |
| 290 enum ParallelSweepingState { |
| 291 kSweepingDone, |
| 292 kSweepingFinalize, |
| 293 kSweepingInProgress, |
| 294 kSweepingPending |
| 295 }; |
| 296 |
| 284 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 297 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 285 static MemoryChunk* FromAddress(Address a) { | 298 static MemoryChunk* FromAddress(Address a) { |
| 286 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 299 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 287 } | 300 } |
| 288 static const MemoryChunk* FromAddress(const byte* a) { | 301 static const MemoryChunk* FromAddress(const byte* a) { |
| 289 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | 302 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & |
| 290 ~kAlignmentMask); | 303 ~kAlignmentMask); |
| 291 } | 304 } |
| 292 | 305 |
| 293 // Only works for addresses in pointer spaces, not data or code spaces. | 306 // Only works for addresses in pointer spaces, not data or code spaces. |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 441 // Set or clear multiple flags at a time. The flags in the mask | 454 // Set or clear multiple flags at a time. The flags in the mask |
| 442 // are set to the value in "flags", the rest retain the current value | 455 // are set to the value in "flags", the rest retain the current value |
| 443 // in flags_. | 456 // in flags_. |
| 444 void SetFlags(intptr_t flags, intptr_t mask) { | 457 void SetFlags(intptr_t flags, intptr_t mask) { |
| 445 flags_ = (flags_ & ~mask) | (flags & mask); | 458 flags_ = (flags_ & ~mask) | (flags & mask); |
| 446 } | 459 } |
| 447 | 460 |
| 448 // Return all current flags. | 461 // Return all current flags. |
| 449 intptr_t GetFlags() { return flags_; } | 462 intptr_t GetFlags() { return flags_; } |
| 450 | 463 |
| 451 | 464 AtomicValue<ParallelSweepingState>& parallel_sweeping_state() { |
| 452 // SWEEPING_DONE - The page state when sweeping is complete or sweeping must | 465 return parallel_sweeping_; |
| 453 // not be performed on that page. | |
| 454 // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will | |
| 455 // not touch the page memory anymore. | |
| 456 // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread. | |
| 457 // SWEEPING_PENDING - This page is ready for parallel sweeping. | |
| 458 enum ParallelSweepingState { | |
| 459 SWEEPING_DONE, | |
| 460 SWEEPING_FINALIZE, | |
| 461 SWEEPING_IN_PROGRESS, | |
| 462 SWEEPING_PENDING | |
| 463 }; | |
| 464 | |
| 465 ParallelSweepingState parallel_sweeping() { | |
| 466 return static_cast<ParallelSweepingState>( | |
| 467 base::Acquire_Load(¶llel_sweeping_)); | |
| 468 } | |
| 469 | |
| 470 void set_parallel_sweeping(ParallelSweepingState state) { | |
| 471 base::Release_Store(¶llel_sweeping_, state); | |
| 472 } | 466 } |
| 473 | 467 |
| 474 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { | 468 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
| 475 return parallel_compaction_; | 469 return parallel_compaction_; |
| 476 } | 470 } |
| 477 | 471 |
| 478 bool TryLock() { return mutex_->TryLock(); } | 472 bool TryLock() { return mutex_->TryLock(); } |
| 479 | 473 |
| 480 base::Mutex* mutex() { return mutex_; } | 474 base::Mutex* mutex() { return mutex_; } |
| 481 | 475 |
| 482 // WaitUntilSweepingCompleted only works when concurrent sweeping is in | 476 // WaitUntilSweepingCompleted only works when concurrent sweeping is in |
| 483 // progress. In particular, when we know that right before this call a | 477 // progress. In particular, when we know that right before this call a |
| 484 // sweeper thread was sweeping this page. | 478 // sweeper thread was sweeping this page. |
| 485 void WaitUntilSweepingCompleted() { | 479 void WaitUntilSweepingCompleted() { |
| 486 mutex_->Lock(); | 480 mutex_->Lock(); |
| 487 mutex_->Unlock(); | 481 mutex_->Unlock(); |
| 488 DCHECK(SweepingCompleted()); | 482 DCHECK(SweepingCompleted()); |
| 489 } | 483 } |
| 490 | 484 |
| 491 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } | 485 bool SweepingCompleted() { |
| 486 return parallel_sweeping_state().Value() <= kSweepingFinalize; |
| 487 } |
| 492 | 488 |
| 493 // Manage live byte count (count of bytes known to be live, | 489 // Manage live byte count (count of bytes known to be live, |
| 494 // because they are marked black). | 490 // because they are marked black). |
| 495 void ResetLiveBytes() { | 491 void ResetLiveBytes() { |
| 496 if (FLAG_gc_verbose) { | 492 if (FLAG_gc_verbose) { |
| 497 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), | 493 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
| 498 live_byte_count_); | 494 live_byte_count_); |
| 499 } | 495 } |
| 500 live_byte_count_ = 0; | 496 live_byte_count_ = 0; |
| 501 } | 497 } |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 736 SkipList* skip_list_; | 732 SkipList* skip_list_; |
| 737 intptr_t write_barrier_counter_; | 733 intptr_t write_barrier_counter_; |
| 738 // Used by the incremental marker to keep track of the scanning progress in | 734 // Used by the incremental marker to keep track of the scanning progress in |
| 739 // large objects that have a progress bar and are scanned in increments. | 735 // large objects that have a progress bar and are scanned in increments. |
| 740 int progress_bar_; | 736 int progress_bar_; |
| 741 // Assuming the initial allocation on a page is sequential, | 737 // Assuming the initial allocation on a page is sequential, |
| 742 // count highest number of bytes ever allocated on the page. | 738 // count highest number of bytes ever allocated on the page. |
| 743 AtomicValue<intptr_t> high_water_mark_; | 739 AtomicValue<intptr_t> high_water_mark_; |
| 744 | 740 |
| 745 base::Mutex* mutex_; | 741 base::Mutex* mutex_; |
| 746 base::AtomicWord parallel_sweeping_; | 742 AtomicValue<ParallelSweepingState> parallel_sweeping_; |
| 747 AtomicValue<ParallelCompactingState> parallel_compaction_; | 743 AtomicValue<ParallelCompactingState> parallel_compaction_; |
| 748 | 744 |
| 749 // PagedSpace free-list statistics. | 745 // PagedSpace free-list statistics. |
| 750 AtomicNumber<intptr_t> available_in_small_free_list_; | 746 AtomicNumber<intptr_t> available_in_small_free_list_; |
| 751 AtomicNumber<intptr_t> available_in_medium_free_list_; | 747 AtomicNumber<intptr_t> available_in_medium_free_list_; |
| 752 AtomicNumber<intptr_t> available_in_large_free_list_; | 748 AtomicNumber<intptr_t> available_in_large_free_list_; |
| 753 AtomicNumber<intptr_t> available_in_huge_free_list_; | 749 AtomicNumber<intptr_t> available_in_huge_free_list_; |
| 754 AtomicNumber<intptr_t> non_available_small_blocks_; | 750 AtomicNumber<intptr_t> non_available_small_blocks_; |
| 755 | 751 |
| 756 // next_chunk_ holds a pointer of type MemoryChunk | 752 // next_chunk_ holds a pointer of type MemoryChunk |
| (...skipping 2224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2981 count = 0; | 2977 count = 0; |
| 2982 } | 2978 } |
| 2983 // Must be small, since an iteration is used for lookup. | 2979 // Must be small, since an iteration is used for lookup. |
| 2984 static const int kMaxComments = 64; | 2980 static const int kMaxComments = 64; |
| 2985 }; | 2981 }; |
| 2986 #endif | 2982 #endif |
| 2987 } | 2983 } |
| 2988 } // namespace v8::internal | 2984 } // namespace v8::internal |
| 2989 | 2985 |
| 2990 #endif // V8_HEAP_SPACES_H_ | 2986 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |