| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 389 static const intptr_t kSizeOffset = 0; | 389 static const intptr_t kSizeOffset = 0; |
| 390 | 390 |
| 391 static const intptr_t kLiveBytesOffset = | 391 static const intptr_t kLiveBytesOffset = |
| 392 kSizeOffset + kPointerSize // size_t size | 392 kSizeOffset + kPointerSize // size_t size |
| 393 + kIntptrSize // intptr_t flags_ | 393 + kIntptrSize // intptr_t flags_ |
| 394 + kPointerSize // Address area_start_ | 394 + kPointerSize // Address area_start_ |
| 395 + kPointerSize // Address area_end_ | 395 + kPointerSize // Address area_end_ |
| 396 + 2 * kPointerSize // base::VirtualMemory reservation_ | 396 + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 397 + kPointerSize // Address owner_ | 397 + kPointerSize // Address owner_ |
| 398 + kPointerSize // Heap* heap_ | 398 + kPointerSize // Heap* heap_ |
| 399 + kIntSize; // int store_buffer_counter_ | 399 + kIntSize; // int progress_bar_ |
| 400 | 400 |
| 401 static const size_t kSlotsBufferOffset = | 401 static const size_t kSlotsBufferOffset = |
| 402 kLiveBytesOffset + kIntSize; // int live_byte_count_ | 402 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 403 | 403 |
| 404 static const size_t kWriteBarrierCounterOffset = | 404 static const size_t kWriteBarrierCounterOffset = |
| 405 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | 405 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 406 + kPointerSize; // SkipList* skip_list_; | 406 + kPointerSize; // SkipList* skip_list_; |
| 407 | 407 |
| 408 static const size_t kMinHeaderSize = | 408 static const size_t kMinHeaderSize = |
| 409 kWriteBarrierCounterOffset + | 409 kWriteBarrierCounterOffset + |
| 410 kIntptrSize // intptr_t write_barrier_counter_ | 410 kIntptrSize // intptr_t write_barrier_counter_ |
| 411 + kIntSize // int progress_bar_ | |
| 412 + kPointerSize // AtomicValue high_water_mark_ | 411 + kPointerSize // AtomicValue high_water_mark_ |
| 413 + kPointerSize // base::Mutex* mutex_ | 412 + kPointerSize // base::Mutex* mutex_ |
| 414 + kPointerSize // base::AtomicWord parallel_sweeping_ | 413 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 415 + kPointerSize // AtomicValue parallel_compaction_ | 414 + kPointerSize // AtomicValue parallel_compaction_ |
| 416 + 5 * kPointerSize // AtomicNumber free-list statistics | 415 + 5 * kPointerSize // AtomicNumber free-list statistics |
| 417 + kPointerSize // AtomicValue next_chunk_ | 416 + kPointerSize // AtomicValue next_chunk_ |
| 418 + kPointerSize; // AtomicValue prev_chunk_ | 417 + kPointerSize; // AtomicValue prev_chunk_ |
| 419 | 418 |
| 420 // We add some more space to the computed header size to amount for missing | 419 // We add some more space to the computed header size to amount for missing |
| 421 // alignment requirements in our computation. | 420 // alignment requirements in our computation. |
| 422 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 421 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 423 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; | 422 static const size_t kHeaderSize = kMinHeaderSize; |
| 424 | 423 |
| 425 static const int kBodyOffset = | 424 static const int kBodyOffset = |
| 426 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 425 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 427 | 426 |
| 428 // The start offset of the object area in a page. Aligned to both maps and | 427 // The start offset of the object area in a page. Aligned to both maps and |
| 429 // code alignment to be suitable for both. Also aligned to 32 words because | 428 // code alignment to be suitable for both. Also aligned to 32 words because |
| 430 // the marking bitmap is arranged in 32 bit chunks. | 429 // the marking bitmap is arranged in 32 bit chunks. |
| 431 static const int kObjectStartAlignment = 32 * kPointerSize; | 430 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 432 static const int kObjectStartOffset = | 431 static const int kObjectStartOffset = |
| 433 kBodyOffset - 1 + | 432 kBodyOffset - 1 + |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 512 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | 511 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 513 void initialize_scan_on_scavenge(bool scan) { | 512 void initialize_scan_on_scavenge(bool scan) { |
| 514 if (scan) { | 513 if (scan) { |
| 515 SetFlag(SCAN_ON_SCAVENGE); | 514 SetFlag(SCAN_ON_SCAVENGE); |
| 516 } else { | 515 } else { |
| 517 ClearFlag(SCAN_ON_SCAVENGE); | 516 ClearFlag(SCAN_ON_SCAVENGE); |
| 518 } | 517 } |
| 519 } | 518 } |
| 520 inline void set_scan_on_scavenge(bool scan); | 519 inline void set_scan_on_scavenge(bool scan); |
| 521 | 520 |
| 522 int store_buffer_counter() { return store_buffer_counter_; } | |
| 523 void set_store_buffer_counter(int counter) { | |
| 524 store_buffer_counter_ = counter; | |
| 525 } | |
| 526 | |
| 527 bool Contains(Address addr) { | 521 bool Contains(Address addr) { |
| 528 return addr >= area_start() && addr < area_end(); | 522 return addr >= area_start() && addr < area_end(); |
| 529 } | 523 } |
| 530 | 524 |
| 531 // Checks whether addr can be a limit of addresses in this page. | 525 // Checks whether addr can be a limit of addresses in this page. |
| 532 // It's a limit if it's in the page, or if it's just after the | 526 // It's a limit if it's in the page, or if it's just after the |
| 533 // last byte of the page. | 527 // last byte of the page. |
| 534 bool ContainsLimit(Address addr) { | 528 bool ContainsLimit(Address addr) { |
| 535 return addr >= area_start() && addr <= area_end(); | 529 return addr >= area_start() && addr <= area_end(); |
| 536 } | 530 } |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 744 Address area_start_; | 738 Address area_start_; |
| 745 Address area_end_; | 739 Address area_end_; |
| 746 | 740 |
| 747 // If the chunk needs to remember its memory reservation, it is stored here. | 741 // If the chunk needs to remember its memory reservation, it is stored here. |
| 748 base::VirtualMemory reservation_; | 742 base::VirtualMemory reservation_; |
| 749 // The identity of the owning space. This is tagged as a failure pointer, but | 743 // The identity of the owning space. This is tagged as a failure pointer, but |
| 750 // no failure can be in an object, so this can be distinguished from any entry | 744 // no failure can be in an object, so this can be distinguished from any entry |
| 751 // in a fixed array. | 745 // in a fixed array. |
| 752 Address owner_; | 746 Address owner_; |
| 753 Heap* heap_; | 747 Heap* heap_; |
| 754 // Used by the store buffer to keep track of which pages to mark scan-on- | 748 // Used by the incremental marker to keep track of the scanning progress in |
| 755 // scavenge. | 749 // large objects that have a progress bar and are scanned in increments. |
| 756 int store_buffer_counter_; | 750 int progress_bar_; |
| 757 // Count of bytes marked black on page. | 751 // Count of bytes marked black on page. |
| 758 int live_byte_count_; | 752 int live_byte_count_; |
| 759 SlotsBuffer* slots_buffer_; | 753 SlotsBuffer* slots_buffer_; |
| 760 SkipList* skip_list_; | 754 SkipList* skip_list_; |
| 761 intptr_t write_barrier_counter_; | 755 intptr_t write_barrier_counter_; |
| 762 // Used by the incremental marker to keep track of the scanning progress in | |
| 763 // large objects that have a progress bar and are scanned in increments. | |
| 764 int progress_bar_; | |
| 765 // Assuming the initial allocation on a page is sequential, | 756 // Assuming the initial allocation on a page is sequential, |
| 766 // count highest number of bytes ever allocated on the page. | 757 // count highest number of bytes ever allocated on the page. |
| 767 AtomicValue<intptr_t> high_water_mark_; | 758 AtomicValue<intptr_t> high_water_mark_; |
| 768 | 759 |
| 769 base::Mutex* mutex_; | 760 base::Mutex* mutex_; |
| 770 AtomicValue<ParallelSweepingState> parallel_sweeping_; | 761 AtomicValue<ParallelSweepingState> parallel_sweeping_; |
| 771 AtomicValue<ParallelCompactingState> parallel_compaction_; | 762 AtomicValue<ParallelCompactingState> parallel_compaction_; |
| 772 | 763 |
| 773 // PagedSpace free-list statistics. | 764 // PagedSpace free-list statistics. |
| 774 AtomicNumber<intptr_t> available_in_small_free_list_; | 765 AtomicNumber<intptr_t> available_in_small_free_list_; |
| (...skipping 2454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3229 count = 0; | 3220 count = 0; |
| 3230 } | 3221 } |
| 3231 // Must be small, since an iteration is used for lookup. | 3222 // Must be small, since an iteration is used for lookup. |
| 3232 static const int kMaxComments = 64; | 3223 static const int kMaxComments = 64; |
| 3233 }; | 3224 }; |
| 3234 #endif | 3225 #endif |
| 3235 } // namespace internal | 3226 } // namespace internal |
| 3236 } // namespace v8 | 3227 } // namespace v8 |
| 3237 | 3228 |
| 3238 #endif // V8_HEAP_SPACES_H_ | 3229 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |