| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 295 enum MemoryChunkFlags { | 295 enum MemoryChunkFlags { |
| 296 IS_EXECUTABLE, | 296 IS_EXECUTABLE, |
| 297 POINTERS_TO_HERE_ARE_INTERESTING, | 297 POINTERS_TO_HERE_ARE_INTERESTING, |
| 298 POINTERS_FROM_HERE_ARE_INTERESTING, | 298 POINTERS_FROM_HERE_ARE_INTERESTING, |
| 299 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | 299 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 300 IN_TO_SPACE, // All pages in new space has one of these two set. | 300 IN_TO_SPACE, // All pages in new space has one of these two set. |
| 301 NEW_SPACE_BELOW_AGE_MARK, | 301 NEW_SPACE_BELOW_AGE_MARK, |
| 302 EVACUATION_CANDIDATE, | 302 EVACUATION_CANDIDATE, |
| 303 RESCAN_ON_EVACUATION, | 303 RESCAN_ON_EVACUATION, |
| 304 NEVER_EVACUATE, // May contain immortal immutables. | 304 NEVER_EVACUATE, // May contain immortal immutables. |
| 305 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. | |
| 306 | 305 |
| 307 // Large objects can have a progress bar in their page header. These object | 306 // Large objects can have a progress bar in their page header. These object |
| 308 // are scanned in increments and will be kept black while being scanned. | 307 // are scanned in increments and will be kept black while being scanned. |
| 309 // Even if the mutator writes to them they will be kept black and a white | 308 // Even if the mutator writes to them they will be kept black and a white |
| 310 // to grey transition is performed in the value. | 309 // to grey transition is performed in the value. |
| 311 HAS_PROGRESS_BAR, | 310 HAS_PROGRESS_BAR, |
| 312 | 311 |
| 313 // This flag is intended to be used for testing. Works only when both | 312 // This flag is intended to be used for testing. Works only when both |
| 314 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection | 313 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
| 315 // are set. It forces the page to become an evacuation candidate at next | 314 // are set. It forces the page to become an evacuation candidate at next |
| 316 // candidates selection cycle. | 315 // candidates selection cycle. |
| 317 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | 316 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
| 318 | 317 |
| 319 // This flag is intended to be used for testing. | 318 // This flag is intended to be used for testing. |
| 320 NEVER_ALLOCATE_ON_PAGE, | 319 NEVER_ALLOCATE_ON_PAGE, |
| 321 | 320 |
| 322 // The memory chunk is already logically freed, however the actual freeing | 321 // The memory chunk is already logically freed, however the actual freeing |
| 323 // still has to be performed. | 322 // still has to be performed. |
| 324 PRE_FREED, | 323 PRE_FREED, |
| 325 | 324 |
| 326 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page | 325 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
| 327 // has been aborted and needs special handling by the sweeper. | 326 // has been aborted and needs special handling by the sweeper. |
| 328 COMPACTION_WAS_ABORTED, | 327 COMPACTION_WAS_ABORTED, |
| 329 | 328 |
| 330 // Last flag, keep at bottom. | 329 // Last flag, keep at bottom. |
| 331 NUM_MEMORY_CHUNK_FLAGS | 330 NUM_MEMORY_CHUNK_FLAGS |
| 332 }; | 331 }; |
| 333 | 332 |
| 334 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. | |
| 335 // |kCompactingInProgress|: Parallel compaction is currently in progress. | |
| 336 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to | |
| 337 // be finalized. | |
| 338 // |kCompactingAborted|: Parallel compaction has been aborted, which should | |
| 339 // for now only happen in OOM scenarios. | |
| 340 enum ParallelCompactingState { | |
| 341 kCompactingDone, | |
| 342 kCompactingInProgress, | |
| 343 kCompactingFinalize, | |
| 344 kCompactingAborted, | |
| 345 }; | |
| 346 | |
| 347 // |kSweepingDone|: The page state when sweeping is complete or sweeping must | 333 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
| 348 // not be performed on that page. Sweeper threads that are done with their | 334 // not be performed on that page. Sweeper threads that are done with their |
| 349 // work will set this value and not touch the page anymore. | 335 // work will set this value and not touch the page anymore. |
| 350 // |kSweepingPending|: This page is ready for parallel sweeping. | 336 // |kSweepingPending|: This page is ready for parallel sweeping. |
| 351 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 337 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
| 352 enum ConcurrentSweepingState { | 338 enum ConcurrentSweepingState { |
| 353 kSweepingDone, | 339 kSweepingDone, |
| 354 kSweepingPending, | 340 kSweepingPending, |
| 355 kSweepingInProgress, | 341 kSweepingInProgress, |
| 356 }; | 342 }; |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 396 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; | 382 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; |
| 397 + kPointerSize // SlotSet* old_to_old_slots_; | 383 + kPointerSize // SlotSet* old_to_old_slots_; |
| 398 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; | 384 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; |
| 399 + kPointerSize; // SkipList* skip_list_; | 385 + kPointerSize; // SkipList* skip_list_; |
| 400 | 386 |
| 401 static const size_t kMinHeaderSize = | 387 static const size_t kMinHeaderSize = |
| 402 kWriteBarrierCounterOffset + | 388 kWriteBarrierCounterOffset + |
| 403 kIntptrSize // intptr_t write_barrier_counter_ | 389 kIntptrSize // intptr_t write_barrier_counter_ |
| 404 + kPointerSize // AtomicValue high_water_mark_ | 390 + kPointerSize // AtomicValue high_water_mark_ |
| 405 + kPointerSize // base::Mutex* mutex_ | 391 + kPointerSize // base::Mutex* mutex_ |
| 406 + kPointerSize // base::AtomicWord parallel_sweeping_ | 392 + kPointerSize // base::AtomicWord concurrent_sweeping_ |
| 407 + kPointerSize // AtomicValue parallel_compaction_ | |
| 408 + 2 * kPointerSize // AtomicNumber free-list statistics | 393 + 2 * kPointerSize // AtomicNumber free-list statistics |
| 409 + kPointerSize // AtomicValue next_chunk_ | 394 + kPointerSize // AtomicValue next_chunk_ |
| 410 + kPointerSize; // AtomicValue prev_chunk_ | 395 + kPointerSize; // AtomicValue prev_chunk_ |
| 411 | 396 |
| 412 // We add some more space to the computed header size to amount for missing | 397 // We add some more space to the computed header size to amount for missing |
| 413 // alignment requirements in our computation. | 398 // alignment requirements in our computation. |
| 414 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 399 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 415 static const size_t kHeaderSize = kMinHeaderSize; | 400 static const size_t kHeaderSize = kMinHeaderSize; |
| 416 | 401 |
| 417 static const int kBodyOffset = | 402 static const int kBodyOffset = |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 464 // Checks whether |addr| can be a limit of addresses in this page. It's a | 449 // Checks whether |addr| can be a limit of addresses in this page. It's a |
| 465 // limit if it's in the page, or if it's just after the last byte of the page. | 450 // limit if it's in the page, or if it's just after the last byte of the page. |
| 466 bool ContainsLimit(Address addr) { | 451 bool ContainsLimit(Address addr) { |
| 467 return addr >= area_start() && addr <= area_end(); | 452 return addr >= area_start() && addr <= area_end(); |
| 468 } | 453 } |
| 469 | 454 |
| 470 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { | 455 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { |
| 471 return concurrent_sweeping_; | 456 return concurrent_sweeping_; |
| 472 } | 457 } |
| 473 | 458 |
| 474 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { | |
| 475 return parallel_compaction_; | |
| 476 } | |
| 477 | |
| 478 // Manage live byte count, i.e., count of bytes in black objects. | 459 // Manage live byte count, i.e., count of bytes in black objects. |
| 479 inline void ResetLiveBytes(); | 460 inline void ResetLiveBytes(); |
| 480 inline void IncrementLiveBytes(int by); | 461 inline void IncrementLiveBytes(int by); |
| 481 | 462 |
| 482 int LiveBytes() { | 463 int LiveBytes() { |
| 483 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_); | 464 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_); |
| 484 return live_byte_count_; | 465 return live_byte_count_; |
| 485 } | 466 } |
| 486 | 467 |
| 487 void SetLiveBytes(int live_bytes) { | 468 void SetLiveBytes(int live_bytes) { |
| (...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 694 | 675 |
| 695 intptr_t write_barrier_counter_; | 676 intptr_t write_barrier_counter_; |
| 696 | 677 |
| 697 // Assuming the initial allocation on a page is sequential, | 678 // Assuming the initial allocation on a page is sequential, |
| 698 // count highest number of bytes ever allocated on the page. | 679 // count highest number of bytes ever allocated on the page. |
| 699 AtomicValue<intptr_t> high_water_mark_; | 680 AtomicValue<intptr_t> high_water_mark_; |
| 700 | 681 |
| 701 base::Mutex* mutex_; | 682 base::Mutex* mutex_; |
| 702 | 683 |
| 703 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; | 684 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
| 704 AtomicValue<ParallelCompactingState> parallel_compaction_; | |
| 705 | 685 |
| 706 // PagedSpace free-list statistics. | 686 // PagedSpace free-list statistics. |
| 707 AtomicNumber<intptr_t> available_in_free_list_; | 687 AtomicNumber<intptr_t> available_in_free_list_; |
| 708 AtomicNumber<intptr_t> wasted_memory_; | 688 AtomicNumber<intptr_t> wasted_memory_; |
| 709 | 689 |
| 710 // next_chunk_ holds a pointer of type MemoryChunk | 690 // next_chunk_ holds a pointer of type MemoryChunk |
| 711 AtomicValue<MemoryChunk*> next_chunk_; | 691 AtomicValue<MemoryChunk*> next_chunk_; |
| 712 // prev_chunk_ holds a pointer of type MemoryChunk | 692 // prev_chunk_ holds a pointer of type MemoryChunk |
| 713 AtomicValue<MemoryChunk*> prev_chunk_; | 693 AtomicValue<MemoryChunk*> prev_chunk_; |
| 714 | 694 |
| (...skipping 2324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3039 count = 0; | 3019 count = 0; |
| 3040 } | 3020 } |
| 3041 // Must be small, since an iteration is used for lookup. | 3021 // Must be small, since an iteration is used for lookup. |
| 3042 static const int kMaxComments = 64; | 3022 static const int kMaxComments = 64; |
| 3043 }; | 3023 }; |
| 3044 #endif | 3024 #endif |
| 3045 } // namespace internal | 3025 } // namespace internal |
| 3046 } // namespace v8 | 3026 } // namespace v8 |
| 3047 | 3027 |
| 3048 #endif // V8_HEAP_SPACES_H_ | 3028 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |