OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
(...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
312 // Even if the mutator writes to them they will be kept black and a white | 312 // Even if the mutator writes to them they will be kept black and a white |
313 // to grey transition is performed in the value. | 313 // to grey transition is performed in the value. |
314 HAS_PROGRESS_BAR, | 314 HAS_PROGRESS_BAR, |
315 | 315 |
316 // This flag is intended to be used for testing. Works only when both | 316 // This flag is intended to be used for testing. Works only when both |
317 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection | 317 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
318 // are set. It forces the page to become an evacuation candidate at next | 318 // are set. It forces the page to become an evacuation candidate at next |
319 // candidates selection cycle. | 319 // candidates selection cycle. |
320 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | 320 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
321 | 321 |
322 // This flag is inteded to be used for testing. | 322 // This flag is intended to be used for testing. |
323 NEVER_ALLOCATE_ON_PAGE, | 323 NEVER_ALLOCATE_ON_PAGE, |
324 | 324 |
325 // The memory chunk is already logically freed, however the actual freeing | 325 // The memory chunk is already logically freed, however the actual freeing |
326 // still has to be performed. | 326 // still has to be performed. |
327 PRE_FREED, | 327 PRE_FREED, |
328 | 328 |
329 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page | 329 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
330 // has been aborted and needs special handling by the sweeper. | 330 // has been aborted and needs special handling by the sweeper. |
331 COMPACTION_WAS_ABORTED, | 331 COMPACTION_WAS_ABORTED, |
332 | 332 |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
422 // The start offset of the object area in a page. Aligned to both maps and | 422 // The start offset of the object area in a page. Aligned to both maps and |
423 // code alignment to be suitable for both. Also aligned to 32 words because | 423 // code alignment to be suitable for both. Also aligned to 32 words because |
424 // the marking bitmap is arranged in 32 bit chunks. | 424 // the marking bitmap is arranged in 32 bit chunks. |
425 static const int kObjectStartAlignment = 32 * kPointerSize; | 425 static const int kObjectStartAlignment = 32 * kPointerSize; |
426 static const int kObjectStartOffset = | 426 static const int kObjectStartOffset = |
427 kBodyOffset - 1 + | 427 kBodyOffset - 1 + |
428 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 428 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
429 | 429 |
430 static const int kFlagsOffset = kPointerSize; | 430 static const int kFlagsOffset = kPointerSize; |
431 | 431 |
432 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); | 432 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 433 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); |
433 | 434 |
434 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 435 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
435 static MemoryChunk* FromAddress(Address a) { | 436 static MemoryChunk* FromAddress(Address a) { |
436 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 437 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
437 } | 438 } |
438 | 439 |
439 static const MemoryChunk* FromAddress(const byte* a) { | |
440 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | |
441 ~kAlignmentMask); | |
442 } | |
443 | |
444 static void IncrementLiveBytesFromGC(HeapObject* object, int by) { | |
445 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); | |
446 } | |
447 | |
448 // Only works for addresses in pointer spaces, not data or code spaces. | 440 // Only works for addresses in pointer spaces, not data or code spaces. |
449 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
450 | 442 |
451 static inline uint32_t FastAddressToMarkbitIndex(Address addr) { | |
452 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | |
453 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | |
454 } | |
455 | |
456 static inline void UpdateHighWaterMark(Address mark) { | 443 static inline void UpdateHighWaterMark(Address mark) { |
457 if (mark == nullptr) return; | 444 if (mark == nullptr) return; |
458 // Need to subtract one from the mark because when a chunk is full the | 445 // Need to subtract one from the mark because when a chunk is full the |
459 // top points to the next address after the chunk, which effectively belongs | 446 // top points to the next address after the chunk, which effectively belongs |
460 // to another chunk. See the comment to Page::FromAllocationTop. | 447 // to another chunk. See the comment to Page::FromAllocationTop. |
461 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | 448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
462 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); | 449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
463 intptr_t old_mark = 0; | 450 intptr_t old_mark = 0; |
464 do { | 451 do { |
465 old_mark = chunk->high_water_mark_.Value(); | 452 old_mark = chunk->high_water_mark_.Value(); |
466 } while ((new_mark > old_mark) && | 453 } while ((new_mark > old_mark) && |
467 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); | 454 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
468 } | 455 } |
469 | 456 |
470 Address address() { return reinterpret_cast<Address>(this); } | 457 Address address() { return reinterpret_cast<Address>(this); } |
471 | 458 |
472 bool is_valid() { return address() != NULL; } | 459 bool is_valid() { return address() != NULL; } |
473 | 460 |
474 MemoryChunk* next_chunk() { return next_chunk_.Value(); } | 461 base::Mutex* mutex() { return mutex_; } |
475 | |
476 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } | |
477 | |
478 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } | |
479 | |
480 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); } | |
481 | |
482 Space* owner() const { | |
483 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | |
484 kPageHeaderTag) { | |
485 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | |
486 kPageHeaderTag); | |
487 } else { | |
488 return NULL; | |
489 } | |
490 } | |
491 | |
492 void set_owner(Space* space) { | |
493 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | |
494 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | |
495 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | |
496 kPageHeaderTag); | |
497 } | |
498 | |
499 base::VirtualMemory* reserved_memory() { return &reservation_; } | |
500 | |
501 void set_reserved_memory(base::VirtualMemory* reservation) { | |
502 DCHECK_NOT_NULL(reservation); | |
503 reservation_.TakeControl(reservation); | |
504 } | |
505 | 462 |
506 bool Contains(Address addr) { | 463 bool Contains(Address addr) { |
507 return addr >= area_start() && addr < area_end(); | 464 return addr >= area_start() && addr < area_end(); |
508 } | 465 } |
509 | 466 |
510 // Checks whether addr can be a limit of addresses in this page. | 467 // Checks whether |addr| can be a limit of addresses in this page. It's a |
511 // It's a limit if it's in the page, or if it's just after the | 468 // limit if it's in the page, or if it's just after the last byte of the page. |
512 // last byte of the page. | |
513 bool ContainsLimit(Address addr) { | 469 bool ContainsLimit(Address addr) { |
514 return addr >= area_start() && addr <= area_end(); | 470 return addr >= area_start() && addr <= area_end(); |
515 } | 471 } |
516 | 472 |
517 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } | |
518 | |
519 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } | |
520 | |
521 void SetFlagTo(int flag, bool value) { | |
522 if (value) { | |
523 SetFlag(flag); | |
524 } else { | |
525 ClearFlag(flag); | |
526 } | |
527 } | |
528 | |
529 bool IsFlagSet(int flag) { | |
530 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; | |
531 } | |
532 | |
533 // Set or clear multiple flags at a time. The flags in the mask | |
534 // are set to the value in "flags", the rest retain the current value | |
535 // in flags_. | |
536 void SetFlags(intptr_t flags, intptr_t mask) { | |
537 flags_ = (flags_ & ~mask) | (flags & mask); | |
538 } | |
539 | |
540 // Return all current flags. | |
541 intptr_t GetFlags() { return flags_; } | |
542 | |
543 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { | 473 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { |
544 return concurrent_sweeping_; | 474 return concurrent_sweeping_; |
545 } | 475 } |
546 | 476 |
547 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { | 477 AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
548 return parallel_compaction_; | 478 return parallel_compaction_; |
549 } | 479 } |
550 | 480 |
551 bool TryLock() { return mutex_->TryLock(); } | 481 // Manage live byte count, i.e., count of bytes in black objects. |
552 | 482 inline void ResetLiveBytes(); |
553 base::Mutex* mutex() { return mutex_; } | 483 inline void IncrementLiveBytes(int by); |
554 | |
555 // Manage live byte count (count of bytes known to be live, | |
556 // because they are marked black). | |
557 void ResetLiveBytes() { | |
558 if (FLAG_gc_verbose) { | |
559 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), | |
560 live_byte_count_); | |
561 } | |
562 live_byte_count_ = 0; | |
563 } | |
564 | |
565 void IncrementLiveBytes(int by) { | |
566 if (FLAG_gc_verbose) { | |
567 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this), | |
568 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), | |
569 live_byte_count_ + by); | |
570 } | |
571 live_byte_count_ += by; | |
572 DCHECK_GE(live_byte_count_, 0); | |
573 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); | |
574 } | |
575 | 484 |
576 int LiveBytes() { | 485 int LiveBytes() { |
577 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); | 486 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_); |
578 return live_byte_count_; | 487 return live_byte_count_; |
579 } | 488 } |
580 | 489 |
581 void SetLiveBytes(int live_bytes) { | 490 void SetLiveBytes(int live_bytes) { |
582 DCHECK_GE(live_bytes, 0); | 491 DCHECK_GE(live_bytes, 0); |
583 DCHECK_LE(static_cast<unsigned>(live_bytes), size_); | 492 DCHECK_LE(static_cast<size_t>(live_bytes), size_); |
584 live_byte_count_ = live_bytes; | 493 live_byte_count_ = live_bytes; |
585 } | 494 } |
586 | 495 |
587 int write_barrier_counter() { | 496 int write_barrier_counter() { |
588 return static_cast<int>(write_barrier_counter_); | 497 return static_cast<int>(write_barrier_counter_); |
589 } | 498 } |
590 | 499 |
591 void set_write_barrier_counter(int counter) { | 500 void set_write_barrier_counter(int counter) { |
592 write_barrier_counter_ = counter; | 501 write_barrier_counter_ = counter; |
593 } | 502 } |
594 | 503 |
| 504 size_t size() const { return size_; } |
| 505 |
| 506 inline Heap* heap() const { return heap_; } |
| 507 |
| 508 inline SkipList* skip_list() { return skip_list_; } |
| 509 |
| 510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 511 |
| 512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
| 513 |
| 514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
| 515 |
| 516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 517 |
| 518 void AllocateOldToNewSlots(); |
| 519 void ReleaseOldToNewSlots(); |
| 520 |
| 521 Address area_start() { return area_start_; } |
| 522 Address area_end() { return area_end_; } |
| 523 int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 524 |
| 525 bool CommitArea(size_t requested); |
| 526 |
| 527 // Approximate amount of physical memory committed for this chunk. |
| 528 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 529 |
595 int progress_bar() { | 530 int progress_bar() { |
596 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); | 531 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
597 return progress_bar_; | 532 return progress_bar_; |
598 } | 533 } |
599 | 534 |
600 void set_progress_bar(int progress_bar) { | 535 void set_progress_bar(int progress_bar) { |
601 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); | 536 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
602 progress_bar_ = progress_bar; | 537 progress_bar_ = progress_bar; |
603 } | 538 } |
604 | 539 |
605 void ResetProgressBar() { | 540 void ResetProgressBar() { |
606 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { | 541 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
607 set_progress_bar(0); | 542 set_progress_bar(0); |
608 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); | 543 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); |
609 } | 544 } |
610 } | 545 } |
611 | 546 |
612 size_t size() const { return size_; } | |
613 | |
614 void set_size(size_t size) { size_ = size; } | |
615 | |
616 void SetArea(Address area_start, Address area_end) { | |
617 area_start_ = area_start; | |
618 area_end_ = area_end; | |
619 } | |
620 | |
621 Executability executable() { | |
622 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | |
623 } | |
624 | |
625 bool InNewSpace() { | |
626 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | |
627 } | |
628 | |
629 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } | |
630 | |
631 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } | |
632 | |
633 // Markbits support | |
634 | |
635 inline Bitmap* markbits() { | 547 inline Bitmap* markbits() { |
636 return Bitmap::FromAddress(address() + kHeaderSize); | 548 return Bitmap::FromAddress(address() + kHeaderSize); |
637 } | 549 } |
638 | 550 |
639 void PrintMarkbits() { markbits()->Print(); } | |
640 | |
641 inline uint32_t AddressToMarkbitIndex(Address addr) { | 551 inline uint32_t AddressToMarkbitIndex(Address addr) { |
642 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; | 552 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
643 } | 553 } |
644 | 554 |
645 inline Address MarkbitIndexToAddress(uint32_t index) { | 555 inline Address MarkbitIndexToAddress(uint32_t index) { |
646 return this->address() + (index << kPointerSizeLog2); | 556 return this->address() + (index << kPointerSizeLog2); |
647 } | 557 } |
648 | 558 |
649 void InsertAfter(MemoryChunk* other); | 559 void PrintMarkbits() { markbits()->Print(); } |
650 void Unlink(); | |
651 | 560 |
652 inline Heap* heap() const { return heap_; } | 561 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } |
| 562 |
| 563 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } |
| 564 |
| 565 bool IsFlagSet(int flag) { |
| 566 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; |
| 567 } |
| 568 |
| 569 // Set or clear multiple flags at a time. The flags in the mask are set to |
| 570 // the value in "flags", the rest retain the current value in |flags_|. |
| 571 void SetFlags(intptr_t flags, intptr_t mask) { |
| 572 flags_ = (flags_ & ~mask) | (flags & mask); |
| 573 } |
| 574 |
| 575 // Return all current flags. |
| 576 intptr_t GetFlags() { return flags_; } |
653 | 577 |
654 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } | 578 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } |
655 | 579 |
656 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } | 580 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } |
657 | 581 |
658 bool IsEvacuationCandidate() { | 582 bool IsEvacuationCandidate() { |
659 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 583 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
660 return IsFlagSet(EVACUATION_CANDIDATE); | 584 return IsFlagSet(EVACUATION_CANDIDATE); |
661 } | 585 } |
662 | 586 |
663 bool CanAllocate() { | 587 bool CanAllocate() { |
664 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 588 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
665 } | 589 } |
666 | 590 |
667 bool ShouldSkipEvacuationSlotRecording() { | |
668 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | |
669 } | |
670 | |
671 inline SkipList* skip_list() { return skip_list_; } | |
672 | |
673 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } | |
674 | |
675 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } | |
676 | |
677 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } | |
678 | |
679 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } | |
680 | |
681 void AllocateOldToNewSlots(); | |
682 void ReleaseOldToNewSlots(); | |
683 | |
684 void MarkEvacuationCandidate() { | 591 void MarkEvacuationCandidate() { |
685 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | 592 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
686 DCHECK(slots_buffer_ == NULL); | 593 DCHECK_NULL(slots_buffer_); |
687 SetFlag(EVACUATION_CANDIDATE); | 594 SetFlag(EVACUATION_CANDIDATE); |
688 } | 595 } |
689 | 596 |
690 void ClearEvacuationCandidate() { | 597 void ClearEvacuationCandidate() { |
691 DCHECK(slots_buffer_ == NULL); | 598 DCHECK(slots_buffer_ == NULL); |
692 ClearFlag(EVACUATION_CANDIDATE); | 599 ClearFlag(EVACUATION_CANDIDATE); |
693 } | 600 } |
694 | 601 |
695 Address area_start() { return area_start_; } | 602 bool ShouldSkipEvacuationSlotRecording() { |
696 Address area_end() { return area_end_; } | 603 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
697 int area_size() { return static_cast<int>(area_end() - area_start()); } | 604 } |
698 bool CommitArea(size_t requested); | |
699 | 605 |
700 // Approximate amount of physical memory committed for this chunk. | 606 Executability executable() { |
701 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 607 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 608 } |
| 609 |
| 610 bool InNewSpace() { |
| 611 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 612 } |
| 613 |
| 614 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
| 615 |
| 616 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
| 617 |
| 618 MemoryChunk* next_chunk() { return next_chunk_.Value(); } |
| 619 |
| 620 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } |
| 621 |
| 622 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } |
| 623 |
| 624 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); } |
| 625 |
| 626 Space* owner() const { |
| 627 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 628 kPageHeaderTag) { |
| 629 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 630 kPageHeaderTag); |
| 631 } else { |
| 632 return nullptr; |
| 633 } |
| 634 } |
| 635 |
| 636 void set_owner(Space* space) { |
| 637 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 638 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 639 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 640 kPageHeaderTag); |
| 641 } |
| 642 |
| 643 void InsertAfter(MemoryChunk* other); |
| 644 void Unlink(); |
| 645 |
| 646 protected: |
| 647 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 648 Address area_start, Address area_end, |
| 649 Executability executable, Space* owner, |
| 650 base::VirtualMemory* reservation); |
702 | 651 |
703 // Should be called when memory chunk is about to be freed. | 652 // Should be called when memory chunk is about to be freed. |
704 void ReleaseAllocatedMemory(); | 653 void ReleaseAllocatedMemory(); |
705 | 654 |
706 protected: | 655 base::VirtualMemory* reserved_memory() { return &reservation_; } |
707 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | |
708 Address area_start, Address area_end, | |
709 Executability executable, Space* owner); | |
710 | 656 |
711 size_t size_; | 657 size_t size_; |
712 intptr_t flags_; | 658 intptr_t flags_; |
713 | 659 |
714 // Start and end of allocatable memory on this chunk. | 660 // Start and end of allocatable memory on this chunk. |
715 Address area_start_; | 661 Address area_start_; |
716 Address area_end_; | 662 Address area_end_; |
717 | 663 |
718 // If the chunk needs to remember its memory reservation, it is stored here. | 664 // If the chunk needs to remember its memory reservation, it is stored here. |
719 base::VirtualMemory reservation_; | 665 base::VirtualMemory reservation_; |
| 666 |
720 // The identity of the owning space. This is tagged as a failure pointer, but | 667 // The identity of the owning space. This is tagged as a failure pointer, but |
721 // no failure can be in an object, so this can be distinguished from any entry | 668 // no failure can be in an object, so this can be distinguished from any entry |
722 // in a fixed array. | 669 // in a fixed array. |
723 Address owner_; | 670 Address owner_; |
| 671 |
724 Heap* heap_; | 672 Heap* heap_; |
| 673 |
725 // Used by the incremental marker to keep track of the scanning progress in | 674 // Used by the incremental marker to keep track of the scanning progress in |
726 // large objects that have a progress bar and are scanned in increments. | 675 // large objects that have a progress bar and are scanned in increments. |
727 int progress_bar_; | 676 int progress_bar_; |
| 677 |
728 // Count of bytes marked black on page. | 678 // Count of bytes marked black on page. |
729 int live_byte_count_; | 679 int live_byte_count_; |
| 680 |
730 SlotsBuffer* slots_buffer_; | 681 SlotsBuffer* slots_buffer_; |
| 682 |
731 // A single slot set for small pages (of size kPageSize) or an array of slot | 683 // A single slot set for small pages (of size kPageSize) or an array of slot |
732 // set for large pages. In the latter case the number of entries in the array | 684 // set for large pages. In the latter case the number of entries in the array |
733 // is ceil(size() / kPageSize). | 685 // is ceil(size() / kPageSize). |
734 SlotSet* old_to_new_slots_; | 686 SlotSet* old_to_new_slots_; |
| 687 |
735 SkipList* skip_list_; | 688 SkipList* skip_list_; |
| 689 |
736 intptr_t write_barrier_counter_; | 690 intptr_t write_barrier_counter_; |
| 691 |
737 // Assuming the initial allocation on a page is sequential, | 692 // Assuming the initial allocation on a page is sequential, |
738 // count highest number of bytes ever allocated on the page. | 693 // count highest number of bytes ever allocated on the page. |
739 AtomicValue<intptr_t> high_water_mark_; | 694 AtomicValue<intptr_t> high_water_mark_; |
740 | 695 |
741 base::Mutex* mutex_; | 696 base::Mutex* mutex_; |
| 697 |
742 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; | 698 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
743 AtomicValue<ParallelCompactingState> parallel_compaction_; | 699 AtomicValue<ParallelCompactingState> parallel_compaction_; |
744 | 700 |
745 // PagedSpace free-list statistics. | 701 // PagedSpace free-list statistics. |
746 AtomicNumber<intptr_t> available_in_small_free_list_; | 702 AtomicNumber<intptr_t> available_in_small_free_list_; |
747 AtomicNumber<intptr_t> available_in_medium_free_list_; | 703 AtomicNumber<intptr_t> available_in_medium_free_list_; |
748 AtomicNumber<intptr_t> available_in_large_free_list_; | 704 AtomicNumber<intptr_t> available_in_large_free_list_; |
749 AtomicNumber<intptr_t> available_in_huge_free_list_; | 705 AtomicNumber<intptr_t> available_in_huge_free_list_; |
750 AtomicNumber<intptr_t> non_available_small_blocks_; | 706 AtomicNumber<intptr_t> non_available_small_blocks_; |
751 | 707 |
(...skipping 2310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3062 count = 0; | 3018 count = 0; |
3063 } | 3019 } |
3064 // Must be small, since an iteration is used for lookup. | 3020 // Must be small, since an iteration is used for lookup. |
3065 static const int kMaxComments = 64; | 3021 static const int kMaxComments = 64; |
3066 }; | 3022 }; |
3067 #endif | 3023 #endif |
3068 } // namespace internal | 3024 } // namespace internal |
3069 } // namespace v8 | 3025 } // namespace v8 |
3070 | 3026 |
3071 #endif // V8_HEAP_SPACES_H_ | 3027 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |