OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 434 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
445 | 445 |
446 ParallelSweepingState parallel_sweeping() { | 446 ParallelSweepingState parallel_sweeping() { |
447 return static_cast<ParallelSweepingState>( | 447 return static_cast<ParallelSweepingState>( |
448 base::Acquire_Load(¶llel_sweeping_)); | 448 base::Acquire_Load(¶llel_sweeping_)); |
449 } | 449 } |
450 | 450 |
451 void set_parallel_sweeping(ParallelSweepingState state) { | 451 void set_parallel_sweeping(ParallelSweepingState state) { |
452 base::Release_Store(¶llel_sweeping_, state); | 452 base::Release_Store(¶llel_sweeping_, state); |
453 } | 453 } |
454 | 454 |
455 bool TryParallelSweeping() { | 455 bool TryLock() { return mutex_->TryLock(); } |
456 return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING, | 456 |
457 SWEEPING_IN_PROGRESS) == | 457 base::Mutex* mutex() { return mutex_; } |
458 SWEEPING_PENDING; | 458 |
| 459 // WaitUntilSweepingCompleted only works when concurrent sweeping is in |
| 460 // progress. In particular, when we know that right before this call a |
| 461 // sweeper thread was sweeping this page. |
| 462 void WaitUntilSweepingCompleted() { |
| 463 mutex_->Lock(); |
| 464 mutex_->Unlock(); |
| 465 DCHECK(SweepingCompleted()); |
459 } | 466 } |
460 | 467 |
461 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } | 468 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } |
462 | 469 |
463 // Manage live byte count (count of bytes known to be live, | 470 // Manage live byte count (count of bytes known to be live, |
464 // because they are marked black). | 471 // because they are marked black). |
465 void ResetLiveBytes() { | 472 void ResetLiveBytes() { |
466 if (FLAG_gc_verbose) { | 473 if (FLAG_gc_verbose) { |
467 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), | 474 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
468 live_byte_count_); | 475 live_byte_count_); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
530 | 537 |
531 static const intptr_t kLiveBytesOffset = | 538 static const intptr_t kLiveBytesOffset = |
532 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + | 539 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + |
533 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; | 540 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; |
534 | 541 |
535 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 542 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
536 | 543 |
537 static const size_t kWriteBarrierCounterOffset = | 544 static const size_t kWriteBarrierCounterOffset = |
538 kSlotsBufferOffset + kPointerSize + kPointerSize; | 545 kSlotsBufferOffset + kPointerSize + kPointerSize; |
539 | 546 |
540 static const size_t kHeaderSize = | 547 static const size_t kHeaderSize = kWriteBarrierCounterOffset + |
541 kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize + | 548 kPointerSize + // write_barrier_counter_ |
542 kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize; | 549 kIntSize + // progress_bar_ |
| 550 kIntSize + // high_water_mark_ |
| 551 kPointerSize + // mutex_ page lock |
| 552 kPointerSize + // parallel_sweeping_ |
| 553 5 * kPointerSize + // free list statistics |
| 554 kPointerSize + // next_chunk_ |
| 555 kPointerSize; // prev_chunk_ |
543 | 556 |
544 static const int kBodyOffset = | 557 static const int kBodyOffset = |
545 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 558 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
546 | 559 |
547 // The start offset of the object area in a page. Aligned to both maps and | 560 // The start offset of the object area in a page. Aligned to both maps and |
548 // code alignment to be suitable for both. Also aligned to 32 words because | 561 // code alignment to be suitable for both. Also aligned to 32 words because |
549 // the marking bitmap is arranged in 32 bit chunks. | 562 // the marking bitmap is arranged in 32 bit chunks. |
550 static const int kObjectStartAlignment = 32 * kPointerSize; | 563 static const int kObjectStartAlignment = 32 * kPointerSize; |
551 static const int kObjectStartOffset = | 564 static const int kObjectStartOffset = |
552 kBodyOffset - 1 + | 565 kBodyOffset - 1 + |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
668 SlotsBuffer* slots_buffer_; | 681 SlotsBuffer* slots_buffer_; |
669 SkipList* skip_list_; | 682 SkipList* skip_list_; |
670 intptr_t write_barrier_counter_; | 683 intptr_t write_barrier_counter_; |
671 // Used by the incremental marker to keep track of the scanning progress in | 684 // Used by the incremental marker to keep track of the scanning progress in |
672 // large objects that have a progress bar and are scanned in increments. | 685 // large objects that have a progress bar and are scanned in increments. |
673 int progress_bar_; | 686 int progress_bar_; |
674 // Assuming the initial allocation on a page is sequential, | 687 // Assuming the initial allocation on a page is sequential, |
675 // count highest number of bytes ever allocated on the page. | 688 // count highest number of bytes ever allocated on the page. |
676 int high_water_mark_; | 689 int high_water_mark_; |
677 | 690 |
| 691 base::Mutex* mutex_; |
678 base::AtomicWord parallel_sweeping_; | 692 base::AtomicWord parallel_sweeping_; |
679 | 693 |
680 // PagedSpace free-list statistics. | 694 // PagedSpace free-list statistics. |
681 int available_in_small_free_list_; | 695 int available_in_small_free_list_; |
682 int available_in_medium_free_list_; | 696 int available_in_medium_free_list_; |
683 int available_in_large_free_list_; | 697 int available_in_large_free_list_; |
684 int available_in_huge_free_list_; | 698 int available_in_huge_free_list_; |
685 int non_available_small_blocks_; | 699 int non_available_small_blocks_; |
686 | 700 |
687 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | 701 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
(...skipping 2197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2885 count = 0; | 2899 count = 0; |
2886 } | 2900 } |
2887 // Must be small, since an iteration is used for lookup. | 2901 // Must be small, since an iteration is used for lookup. |
2888 static const int kMaxComments = 64; | 2902 static const int kMaxComments = 64; |
2889 }; | 2903 }; |
2890 #endif | 2904 #endif |
2891 } | 2905 } |
2892 } // namespace v8::internal | 2906 } // namespace v8::internal |
2893 | 2907 |
2894 #endif // V8_HEAP_SPACES_H_ | 2908 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |