OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 434 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
445 | 445 |
446 ParallelSweepingState parallel_sweeping() { | 446 ParallelSweepingState parallel_sweeping() { |
447 return static_cast<ParallelSweepingState>( | 447 return static_cast<ParallelSweepingState>( |
448 base::Acquire_Load(¶llel_sweeping_)); | 448 base::Acquire_Load(¶llel_sweeping_)); |
449 } | 449 } |
450 | 450 |
451 void set_parallel_sweeping(ParallelSweepingState state) { | 451 void set_parallel_sweeping(ParallelSweepingState state) { |
452 base::Release_Store(¶llel_sweeping_, state); | 452 base::Release_Store(¶llel_sweeping_, state); |
453 } | 453 } |
454 | 454 |
455 bool TryParallelSweeping() { | 455 bool TryLock() { return mutex_->TryLock(); } |
456 return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING, | 456 |
457 SWEEPING_IN_PROGRESS) == | 457 base::Mutex* mutex() { return mutex_; } |
458 SWEEPING_PENDING; | 458 |
459 void WaitUntilSweepingCompleted() { | |
460 mutex_->Lock(); | |
461 mutex_->Unlock(); | |
462 DCHECK(SweepingCompleted()); | |
459 } | 463 } |
460 | 464 |
461 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } | 465 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } |
462 | 466 |
463 // Manage live byte count (count of bytes known to be live, | 467 // Manage live byte count (count of bytes known to be live, |
464 // because they are marked black). | 468 // because they are marked black). |
465 void ResetLiveBytes() { | 469 void ResetLiveBytes() { |
466 if (FLAG_gc_verbose) { | 470 if (FLAG_gc_verbose) { |
467 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), | 471 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
468 live_byte_count_); | 472 live_byte_count_); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
530 | 534 |
531 static const intptr_t kLiveBytesOffset = | 535 static const intptr_t kLiveBytesOffset = |
532 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + | 536 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + |
533 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; | 537 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; |
534 | 538 |
535 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 539 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
536 | 540 |
537 static const size_t kWriteBarrierCounterOffset = | 541 static const size_t kWriteBarrierCounterOffset = |
538 kSlotsBufferOffset + kPointerSize + kPointerSize; | 542 kSlotsBufferOffset + kPointerSize + kPointerSize; |
539 | 543 |
540 static const size_t kHeaderSize = | 544 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + |
541 kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize + | 545 kIntSize + kIntSize + kPointerSize + |
542 kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize; | 546 5 * kPointerSize + kPointerSize + |
547 kPointerSize + kPointerSize; | |
Michael Lippautz
2015/07/22 13:17:01
Can we make this more verbose by adding a comment?
Hannes Payer (out of office)
2015/07/22 13:29:25
Done.
| |
543 | 548 |
544 static const int kBodyOffset = | 549 static const int kBodyOffset = |
545 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 550 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
546 | 551 |
547 // The start offset of the object area in a page. Aligned to both maps and | 552 // The start offset of the object area in a page. Aligned to both maps and |
548 // code alignment to be suitable for both. Also aligned to 32 words because | 553 // code alignment to be suitable for both. Also aligned to 32 words because |
549 // the marking bitmap is arranged in 32 bit chunks. | 554 // the marking bitmap is arranged in 32 bit chunks. |
550 static const int kObjectStartAlignment = 32 * kPointerSize; | 555 static const int kObjectStartAlignment = 32 * kPointerSize; |
551 static const int kObjectStartOffset = | 556 static const int kObjectStartOffset = |
552 kBodyOffset - 1 + | 557 kBodyOffset - 1 + |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
668 SlotsBuffer* slots_buffer_; | 673 SlotsBuffer* slots_buffer_; |
669 SkipList* skip_list_; | 674 SkipList* skip_list_; |
670 intptr_t write_barrier_counter_; | 675 intptr_t write_barrier_counter_; |
671 // Used by the incremental marker to keep track of the scanning progress in | 676 // Used by the incremental marker to keep track of the scanning progress in |
672 // large objects that have a progress bar and are scanned in increments. | 677 // large objects that have a progress bar and are scanned in increments. |
673 int progress_bar_; | 678 int progress_bar_; |
674 // Assuming the initial allocation on a page is sequential, | 679 // Assuming the initial allocation on a page is sequential, |
675 // count highest number of bytes ever allocated on the page. | 680 // count highest number of bytes ever allocated on the page. |
676 int high_water_mark_; | 681 int high_water_mark_; |
677 | 682 |
683 base::Mutex* mutex_; | |
678 base::AtomicWord parallel_sweeping_; | 684 base::AtomicWord parallel_sweeping_; |
679 | 685 |
680 // PagedSpace free-list statistics. | 686 // PagedSpace free-list statistics. |
681 int available_in_small_free_list_; | 687 int available_in_small_free_list_; |
682 int available_in_medium_free_list_; | 688 int available_in_medium_free_list_; |
683 int available_in_large_free_list_; | 689 int available_in_large_free_list_; |
684 int available_in_huge_free_list_; | 690 int available_in_huge_free_list_; |
685 int non_available_small_blocks_; | 691 int non_available_small_blocks_; |
686 | 692 |
687 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | 693 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
(...skipping 2197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2885 count = 0; | 2891 count = 0; |
2886 } | 2892 } |
2887 // Must be small, since an iteration is used for lookup. | 2893 // Must be small, since an iteration is used for lookup. |
2888 static const int kMaxComments = 64; | 2894 static const int kMaxComments = 64; |
2889 }; | 2895 }; |
2890 #endif | 2896 #endif |
2891 } | 2897 } |
2892 } // namespace v8::internal | 2898 } // namespace v8::internal |
2893 | 2899 |
2894 #endif // V8_HEAP_SPACES_H_ | 2900 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |