OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 | 49 |
50 | 50 |
51 namespace v8 { | 51 namespace v8 { |
52 namespace internal { | 52 namespace internal { |
53 | 53 |
54 | 54 |
55 String* Heap::hidden_symbol_; | 55 String* Heap::hidden_symbol_; |
56 Object* Heap::roots_[Heap::kRootListLength]; | 56 Object* Heap::roots_[Heap::kRootListLength]; |
57 Object* Heap::global_contexts_list_; | 57 Object* Heap::global_contexts_list_; |
58 | 58 |
| 59 |
59 NewSpace Heap::new_space_; | 60 NewSpace Heap::new_space_; |
60 OldSpace* Heap::old_pointer_space_ = NULL; | 61 OldSpace* Heap::old_pointer_space_ = NULL; |
61 OldSpace* Heap::old_data_space_ = NULL; | 62 OldSpace* Heap::old_data_space_ = NULL; |
62 OldSpace* Heap::code_space_ = NULL; | 63 OldSpace* Heap::code_space_ = NULL; |
63 MapSpace* Heap::map_space_ = NULL; | 64 MapSpace* Heap::map_space_ = NULL; |
64 CellSpace* Heap::cell_space_ = NULL; | 65 CellSpace* Heap::cell_space_ = NULL; |
65 LargeObjectSpace* Heap::lo_space_ = NULL; | 66 LargeObjectSpace* Heap::lo_space_ = NULL; |
66 | 67 |
| 68 static const intptr_t kMinimumPromotionLimit = 2 * MB; |
| 69 static const intptr_t kMinimumAllocationLimit = 8 * MB; |
| 70 |
67 intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; | 71 intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; |
68 intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; | 72 intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; |
69 | 73 |
70 int Heap::old_gen_exhausted_ = false; | 74 int Heap::old_gen_exhausted_ = false; |
71 | 75 |
72 int Heap::amount_of_external_allocated_memory_ = 0; | 76 int Heap::amount_of_external_allocated_memory_ = 0; |
73 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; | 77 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; |
74 | 78 |
75 // semispace_size_ should be a power of 2 and old_generation_size_ should be | 79 // semispace_size_ should be a power of 2 and old_generation_size_ should be |
76 // a multiple of Page::kPageSize. | 80 // a multiple of Page::kPageSize. |
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
408 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); | 412 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); |
409 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 413 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
410 ReportStatisticsAfterGC(); | 414 ReportStatisticsAfterGC(); |
411 #endif | 415 #endif |
412 #ifdef ENABLE_DEBUGGER_SUPPORT | 416 #ifdef ENABLE_DEBUGGER_SUPPORT |
413 Debug::AfterGarbageCollection(); | 417 Debug::AfterGarbageCollection(); |
414 #endif | 418 #endif |
415 } | 419 } |
416 | 420 |
417 | 421 |
418 void Heap::CollectAllGarbage(bool force_compaction, | 422 void Heap::CollectAllGarbage(bool force_compaction) { |
419 CollectionPolicy collectionPolicy) { | |
420 // Since we are ignoring the return value, the exact choice of space does | 423 // Since we are ignoring the return value, the exact choice of space does |
421 // not matter, so long as we do not specify NEW_SPACE, which would not | 424 // not matter, so long as we do not specify NEW_SPACE, which would not |
422 // cause a full GC. | 425 // cause a full GC. |
423 MarkCompactCollector::SetForceCompaction(force_compaction); | 426 MarkCompactCollector::SetForceCompaction(force_compaction); |
424 CollectGarbage(OLD_POINTER_SPACE, collectionPolicy); | 427 CollectGarbage(OLD_POINTER_SPACE); |
425 MarkCompactCollector::SetForceCompaction(false); | 428 MarkCompactCollector::SetForceCompaction(false); |
426 } | 429 } |
427 | 430 |
428 | 431 |
429 void Heap::CollectAllAvailableGarbage() { | 432 void Heap::CollectGarbage(AllocationSpace space) { |
430 CompilationCache::Clear(); | |
431 CollectAllGarbage(true, AGGRESSIVE); | |
432 } | |
433 | |
434 | |
435 void Heap::CollectGarbage(AllocationSpace space, | |
436 CollectionPolicy collectionPolicy) { | |
437 // The VM is in the GC state until exiting this function. | 433 // The VM is in the GC state until exiting this function. |
438 VMState state(GC); | 434 VMState state(GC); |
439 | 435 |
440 #ifdef DEBUG | 436 #ifdef DEBUG |
441 // Reset the allocation timeout to the GC interval, but make sure to | 437 // Reset the allocation timeout to the GC interval, but make sure to |
442 // allow at least a few allocations after a collection. The reason | 438 // allow at least a few allocations after a collection. The reason |
443 // for this is that we have a lot of allocation sequences and we | 439 // for this is that we have a lot of allocation sequences and we |
444 // assume that a garbage collection will allow the subsequent | 440 // assume that a garbage collection will allow the subsequent |
445 // allocation attempts to go through. | 441 // allocation attempts to go through. |
446 allocation_timeout_ = Max(6, FLAG_gc_interval); | 442 allocation_timeout_ = Max(6, FLAG_gc_interval); |
447 #endif | 443 #endif |
448 | 444 |
449 { GCTracer tracer; | 445 { GCTracer tracer; |
450 GarbageCollectionPrologue(); | 446 GarbageCollectionPrologue(); |
451 // The GC count was incremented in the prologue. Tell the tracer about | 447 // The GC count was incremented in the prologue. Tell the tracer about |
452 // it. | 448 // it. |
453 tracer.set_gc_count(gc_count_); | 449 tracer.set_gc_count(gc_count_); |
454 | 450 |
455 GarbageCollector collector = SelectGarbageCollector(space); | 451 GarbageCollector collector = SelectGarbageCollector(space); |
456 // Tell the tracer which collector we've selected. | 452 // Tell the tracer which collector we've selected. |
457 tracer.set_collector(collector); | 453 tracer.set_collector(collector); |
458 | 454 |
459 HistogramTimer* rate = (collector == SCAVENGER) | 455 HistogramTimer* rate = (collector == SCAVENGER) |
460 ? &Counters::gc_scavenger | 456 ? &Counters::gc_scavenger |
461 : &Counters::gc_compactor; | 457 : &Counters::gc_compactor; |
462 rate->Start(); | 458 rate->Start(); |
463 PerformGarbageCollection(collector, &tracer, collectionPolicy); | 459 PerformGarbageCollection(collector, &tracer); |
464 rate->Stop(); | 460 rate->Stop(); |
465 | 461 |
466 GarbageCollectionEpilogue(); | 462 GarbageCollectionEpilogue(); |
467 } | 463 } |
468 | 464 |
469 | 465 |
470 #ifdef ENABLE_LOGGING_AND_PROFILING | 466 #ifdef ENABLE_LOGGING_AND_PROFILING |
471 if (FLAG_log_gc) HeapProfiler::WriteSample(); | 467 if (FLAG_log_gc) HeapProfiler::WriteSample(); |
472 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions(); | 468 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions(); |
473 #endif | 469 #endif |
474 } | 470 } |
475 | 471 |
476 | 472 |
477 void Heap::PerformScavenge() { | 473 void Heap::PerformScavenge() { |
478 GCTracer tracer; | 474 GCTracer tracer; |
479 PerformGarbageCollection(SCAVENGER, &tracer, NORMAL); | 475 PerformGarbageCollection(SCAVENGER, &tracer); |
480 } | 476 } |
481 | 477 |
482 | 478 |
483 #ifdef DEBUG | 479 #ifdef DEBUG |
484 // Helper class for verifying the symbol table. | 480 // Helper class for verifying the symbol table. |
485 class SymbolTableVerifier : public ObjectVisitor { | 481 class SymbolTableVerifier : public ObjectVisitor { |
486 public: | 482 public: |
487 SymbolTableVerifier() { } | 483 SymbolTableVerifier() { } |
488 void VisitPointers(Object** start, Object** end) { | 484 void VisitPointers(Object** start, Object** end) { |
489 // Visit all HeapObject pointers in [start, end). | 485 // Visit all HeapObject pointers in [start, end). |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
654 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) { | 650 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) { |
655 set_survival_rate_trend(INCREASING); | 651 set_survival_rate_trend(INCREASING); |
656 } else { | 652 } else { |
657 set_survival_rate_trend(STABLE); | 653 set_survival_rate_trend(STABLE); |
658 } | 654 } |
659 | 655 |
660 survival_rate_ = survival_rate; | 656 survival_rate_ = survival_rate; |
661 } | 657 } |
662 | 658 |
663 void Heap::PerformGarbageCollection(GarbageCollector collector, | 659 void Heap::PerformGarbageCollection(GarbageCollector collector, |
664 GCTracer* tracer, | 660 GCTracer* tracer) { |
665 CollectionPolicy collectionPolicy) { | |
666 if (collector != SCAVENGER) { | 661 if (collector != SCAVENGER) { |
667 PROFILE(CodeMovingGCEvent()); | 662 PROFILE(CodeMovingGCEvent()); |
668 } | 663 } |
669 | 664 |
670 VerifySymbolTable(); | 665 VerifySymbolTable(); |
671 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { | 666 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { |
672 ASSERT(!allocation_allowed_); | 667 ASSERT(!allocation_allowed_); |
673 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | 668 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); |
674 global_gc_prologue_callback_(); | 669 global_gc_prologue_callback_(); |
675 } | 670 } |
(...skipping 13 matching lines...) Expand all Loading... |
689 | 684 |
690 if (collector == MARK_COMPACTOR) { | 685 if (collector == MARK_COMPACTOR) { |
691 // Perform mark-sweep with optional compaction. | 686 // Perform mark-sweep with optional compaction. |
692 MarkCompact(tracer); | 687 MarkCompact(tracer); |
693 | 688 |
694 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && | 689 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && |
695 IsStableOrIncreasingSurvivalTrend(); | 690 IsStableOrIncreasingSurvivalTrend(); |
696 | 691 |
697 UpdateSurvivalRateTrend(start_new_space_size); | 692 UpdateSurvivalRateTrend(start_new_space_size); |
698 | 693 |
699 UpdateOldSpaceLimits(); | 694 intptr_t old_gen_size = PromotedSpaceSize(); |
| 695 old_gen_promotion_limit_ = |
| 696 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); |
| 697 old_gen_allocation_limit_ = |
| 698 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); |
700 | 699 |
701 // Major GC would invoke weak handle callbacks on weakly reachable | 700 if (high_survival_rate_during_scavenges && |
702 // handles, but won't collect weakly reachable objects until next | 701 IsStableOrIncreasingSurvivalTrend()) { |
703 // major GC. Therefore if we collect aggressively and weak handle callback | 702 // Stable high survival rates of young objects both during partial and |
704 // has been invoked, we rerun major GC to release objects which become | 703 // full collection indicate that mutator is either building or modifying |
705 // garbage. | 704 // a structure with a long lifetime. |
706 if (collectionPolicy == AGGRESSIVE) { | 705 // In this case we aggressively raise old generation memory limits to |
707 // Note: as weak callbacks can execute arbitrary code, we cannot | 706 // postpone subsequent mark-sweep collection and thus trade memory |
708 // hope that eventually there will be no weak callbacks invocations. | 707 // space for the mutation speed. |
709 // Therefore stop recollecting after several attempts. | 708 old_gen_promotion_limit_ *= 2; |
710 const int kMaxNumberOfAttempts = 7; | 709 old_gen_allocation_limit_ *= 2; |
711 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { | |
712 { DisableAssertNoAllocation allow_allocation; | |
713 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | |
714 if (!GlobalHandles::PostGarbageCollectionProcessing()) break; | |
715 } | |
716 MarkCompact(tracer); | |
717 // Weak handle callbacks can allocate data, so keep limits correct. | |
718 UpdateOldSpaceLimits(); | |
719 } | |
720 } else { | |
721 if (high_survival_rate_during_scavenges && | |
722 IsStableOrIncreasingSurvivalTrend()) { | |
723 // Stable high survival rates of young objects both during partial and | |
724 // full collection indicate that mutator is either building or modifying | |
725 // a structure with a long lifetime. | |
726 // In this case we aggressively raise old generation memory limits to | |
727 // postpone subsequent mark-sweep collection and thus trade memory | |
728 // space for the mutation speed. | |
729 old_gen_promotion_limit_ *= 2; | |
730 old_gen_allocation_limit_ *= 2; | |
731 } | |
732 } | 710 } |
733 | 711 |
734 { DisableAssertNoAllocation allow_allocation; | 712 old_gen_exhausted_ = false; |
735 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | |
736 GlobalHandles::PostGarbageCollectionProcessing(); | |
737 } | |
738 } else { | 713 } else { |
739 tracer_ = tracer; | 714 tracer_ = tracer; |
740 Scavenge(); | 715 Scavenge(); |
741 tracer_ = NULL; | 716 tracer_ = NULL; |
742 | 717 |
743 UpdateSurvivalRateTrend(start_new_space_size); | 718 UpdateSurvivalRateTrend(start_new_space_size); |
744 } | 719 } |
745 | 720 |
746 Counters::objs_since_last_young.Set(0); | 721 Counters::objs_since_last_young.Set(0); |
747 | 722 |
| 723 if (collector == MARK_COMPACTOR) { |
| 724 DisableAssertNoAllocation allow_allocation; |
| 725 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); |
| 726 GlobalHandles::PostGarbageCollectionProcessing(); |
| 727 } |
| 728 |
748 // Update relocatables. | 729 // Update relocatables. |
749 Relocatable::PostGarbageCollectionProcessing(); | 730 Relocatable::PostGarbageCollectionProcessing(); |
750 | 731 |
751 if (collector == MARK_COMPACTOR) { | 732 if (collector == MARK_COMPACTOR) { |
752 // Register the amount of external allocated memory. | 733 // Register the amount of external allocated memory. |
753 amount_of_external_allocated_memory_at_last_global_gc_ = | 734 amount_of_external_allocated_memory_at_last_global_gc_ = |
754 amount_of_external_allocated_memory_; | 735 amount_of_external_allocated_memory_; |
755 } | 736 } |
756 | 737 |
757 GCCallbackFlags callback_flags = tracer->is_compacting() | 738 GCCallbackFlags callback_flags = tracer->is_compacting() |
(...skipping 4268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5026 void ExternalStringTable::TearDown() { | 5007 void ExternalStringTable::TearDown() { |
5027 new_space_strings_.Free(); | 5008 new_space_strings_.Free(); |
5028 old_space_strings_.Free(); | 5009 old_space_strings_.Free(); |
5029 } | 5010 } |
5030 | 5011 |
5031 | 5012 |
5032 List<Object*> ExternalStringTable::new_space_strings_; | 5013 List<Object*> ExternalStringTable::new_space_strings_; |
5033 List<Object*> ExternalStringTable::old_space_strings_; | 5014 List<Object*> ExternalStringTable::old_space_strings_; |
5034 | 5015 |
5035 } } // namespace v8::internal | 5016 } } // namespace v8::internal |
OLD | NEW |