OLD | NEW |
---|---|
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
72 int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; | 72 int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; |
73 int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; | 73 int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; |
74 | 74 |
75 int Heap::old_gen_exhausted_ = false; | 75 int Heap::old_gen_exhausted_ = false; |
76 | 76 |
77 int Heap::amount_of_external_allocated_memory_ = 0; | 77 int Heap::amount_of_external_allocated_memory_ = 0; |
78 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; | 78 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; |
79 | 79 |
80 // semispace_size_ should be a power of 2 and old_generation_size_ should be | 80 // semispace_size_ should be a power of 2 and old_generation_size_ should be |
81 // a multiple of Page::kPageSize. | 81 // a multiple of Page::kPageSize. |
82 int Heap::semispace_size_ = 2*MB; | 82 int Heap::semispace_size_ = 8*MB; |
83 int Heap::old_generation_size_ = 512*MB; | 83 int Heap::old_generation_size_ = 512*MB; |
84 int Heap::initial_semispace_size_ = 256*KB; | 84 int Heap::initial_semispace_size_ = 512*KB; |
85 | 85 |
86 GCCallback Heap::global_gc_prologue_callback_ = NULL; | 86 GCCallback Heap::global_gc_prologue_callback_ = NULL; |
87 GCCallback Heap::global_gc_epilogue_callback_ = NULL; | 87 GCCallback Heap::global_gc_epilogue_callback_ = NULL; |
88 | 88 |
89 // Variables set based on semispace_size_ and old_generation_size_ in | 89 // Variables set based on semispace_size_ and old_generation_size_ in |
90 // ConfigureHeap. | 90 // ConfigureHeap. |
91 int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_. | 91 int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_. |
92 | 92 |
93 // Double the new space after this many scavenge collections. | 93 int Heap::survived_since_last_expansion_ = 0; |
94 int Heap::new_space_growth_limit_ = 8; | 94 |
95 int Heap::scavenge_count_ = 0; | |
96 Heap::HeapState Heap::gc_state_ = NOT_IN_GC; | 95 Heap::HeapState Heap::gc_state_ = NOT_IN_GC; |
97 | 96 |
98 int Heap::mc_count_ = 0; | 97 int Heap::mc_count_ = 0; |
99 int Heap::gc_count_ = 0; | 98 int Heap::gc_count_ = 0; |
100 | 99 |
101 int Heap::always_allocate_scope_depth_ = 0; | 100 int Heap::always_allocate_scope_depth_ = 0; |
102 bool Heap::context_disposed_pending_ = false; | 101 bool Heap::context_disposed_pending_ = false; |
103 | 102 |
104 #ifdef DEBUG | 103 #ifdef DEBUG |
105 bool Heap::allocation_allowed_ = true; | 104 bool Heap::allocation_allowed_ = true; |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
414 global_gc_prologue_callback_(); | 413 global_gc_prologue_callback_(); |
415 } | 414 } |
416 | 415 |
417 if (collector == MARK_COMPACTOR) { | 416 if (collector == MARK_COMPACTOR) { |
418 MarkCompact(tracer); | 417 MarkCompact(tracer); |
419 | 418 |
420 int old_gen_size = PromotedSpaceSize(); | 419 int old_gen_size = PromotedSpaceSize(); |
421 old_gen_promotion_limit_ = | 420 old_gen_promotion_limit_ = |
422 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); | 421 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); |
423 old_gen_allocation_limit_ = | 422 old_gen_allocation_limit_ = |
424 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3); | 423 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); |
425 old_gen_exhausted_ = false; | 424 old_gen_exhausted_ = false; |
426 | 425 |
427 // If we have used the mark-compact collector to collect the new | 426 // If we have used the mark-compact collector to collect the new |
428 // space, and it has not compacted the new space, we force a | 427 // space, and it has not compacted the new space, we force a |
429 // separate scavenge collection. This is a hack. It covers the | 428 // separate scavenge collection. This is a hack. It covers the |
430 // case where (1) a new space collection was requested, (2) the | 429 // case where (1) a new space collection was requested, (2) the |
431 // collector selection policy selected the mark-compact collector, | 430 // collector selection policy selected the mark-compact collector, |
432 // and (3) the mark-compact collector policy selected not to | 431 // and (3) the mark-compact collector policy selected not to |
433 // compact the new space. In that case, there is no more (usable) | 432 // compact the new space. In that case, there is no more (usable) |
434 // free space in the new space after the collection compared to | 433 // free space in the new space after the collection compared to |
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
617 void Heap::Scavenge() { | 616 void Heap::Scavenge() { |
618 #ifdef DEBUG | 617 #ifdef DEBUG |
619 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 618 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
620 #endif | 619 #endif |
621 | 620 |
622 gc_state_ = SCAVENGE; | 621 gc_state_ = SCAVENGE; |
623 | 622 |
624 // Implements Cheney's copying algorithm | 623 // Implements Cheney's copying algorithm |
625 LOG(ResourceEvent("scavenge", "begin")); | 624 LOG(ResourceEvent("scavenge", "begin")); |
626 | 625 |
627 scavenge_count_++; | 626 // Used for updating survived_since_last_expansion_ at function end. |
627 int survived_watermark = PromotedSpaceSize(); | |
628 | |
628 if (new_space_.Capacity() < new_space_.MaximumCapacity() && | 629 if (new_space_.Capacity() < new_space_.MaximumCapacity() && |
629 scavenge_count_ > new_space_growth_limit_) { | 630 survived_since_last_expansion_ > new_space_.Capacity()) { |
630 // Double the size of the new space, and double the limit. The next | 631 // Double the size of new space if there is room to grow and enough |
631 // doubling attempt will occur after the current new_space_growth_limit_ | 632 // data has survived scavenge since the last expansion. |
632 // more collections. | |
633 // TODO(1240712): NewSpace::Double has a return value which is | 633 // TODO(1240712): NewSpace::Double has a return value which is |
634 // ignored here. | 634 // ignored here. |
635 new_space_.Double(); | 635 new_space_.Double(); |
636 new_space_growth_limit_ *= 2; | 636 survived_since_last_expansion_ = 0; |
637 } | 637 } |
638 | 638 |
639 // Flip the semispaces. After flipping, to space is empty, from space has | 639 // Flip the semispaces. After flipping, to space is empty, from space has |
640 // live objects. | 640 // live objects. |
641 new_space_.Flip(); | 641 new_space_.Flip(); |
642 new_space_.ResetAllocationInfo(); | 642 new_space_.ResetAllocationInfo(); |
643 | 643 |
644 // We need to sweep newly copied objects which can be either in the | 644 // We need to sweep newly copied objects which can be either in the |
645 // to space or promoted to the old generation. For to-space | 645 // to space or promoted to the old generation. For to-space |
646 // objects, we treat the bottom of the to space as a queue. Newly | 646 // objects, we treat the bottom of the to space as a queue. Newly |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
730 UpdateRSet(target); | 730 UpdateRSet(target); |
731 } | 731 } |
732 | 732 |
733 // Take another spin if there are now unswept objects in new space | 733 // Take another spin if there are now unswept objects in new space |
734 // (there are currently no more unswept promoted objects). | 734 // (there are currently no more unswept promoted objects). |
735 } while (new_space_front < new_space_.top()); | 735 } while (new_space_front < new_space_.top()); |
736 | 736 |
737 // Set age mark. | 737 // Set age mark. |
738 new_space_.set_age_mark(new_space_.top()); | 738 new_space_.set_age_mark(new_space_.top()); |
739 | 739 |
740 // Update how much has survivived scavenge. | |
Kasper Lund
2009/06/12 11:04:45
survivived -> survived
| |
741 survived_since_last_expansion_ += | |
742 PromotedSpaceSize() + new_space_.Size() - survived_watermark; | |
Kasper Lund
2009/06/12 11:04:45
Maybe reorder these to make it (crystal) clear tha
| |
743 | |
740 LOG(ResourceEvent("scavenge", "end")); | 744 LOG(ResourceEvent("scavenge", "end")); |
741 | 745 |
742 gc_state_ = NOT_IN_GC; | 746 gc_state_ = NOT_IN_GC; |
743 } | 747 } |
744 | 748 |
745 | 749 |
746 void Heap::ClearRSetRange(Address start, int size_in_bytes) { | 750 void Heap::ClearRSetRange(Address start, int size_in_bytes) { |
747 uint32_t start_bit; | 751 uint32_t start_bit; |
748 Address start_word_address = | 752 Address start_word_address = |
749 Page::ComputeRSetBitPosition(start, 0, &start_bit); | 753 Page::ComputeRSetBitPosition(start, 0, &start_bit); |
(...skipping 2721 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3471 #ifdef DEBUG | 3475 #ifdef DEBUG |
3472 bool Heap::GarbageCollectionGreedyCheck() { | 3476 bool Heap::GarbageCollectionGreedyCheck() { |
3473 ASSERT(FLAG_gc_greedy); | 3477 ASSERT(FLAG_gc_greedy); |
3474 if (Bootstrapper::IsActive()) return true; | 3478 if (Bootstrapper::IsActive()) return true; |
3475 if (disallow_allocation_failure()) return true; | 3479 if (disallow_allocation_failure()) return true; |
3476 return CollectGarbage(0, NEW_SPACE); | 3480 return CollectGarbage(0, NEW_SPACE); |
3477 } | 3481 } |
3478 #endif | 3482 #endif |
3479 | 3483 |
3480 } } // namespace v8::internal | 3484 } } // namespace v8::internal |
OLD | NEW |