Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1040)

Side by Side Diff: src/heap.cc

Issue 3959001: Revert r5455 from the 2.4 branch.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/2.4/
Patch Set: Created 10 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 #endif 48 #endif
49 49
50 50
51 namespace v8 { 51 namespace v8 {
52 namespace internal { 52 namespace internal {
53 53
54 54
55 String* Heap::hidden_symbol_; 55 String* Heap::hidden_symbol_;
56 Object* Heap::roots_[Heap::kRootListLength]; 56 Object* Heap::roots_[Heap::kRootListLength];
57 57
58
58 NewSpace Heap::new_space_; 59 NewSpace Heap::new_space_;
59 OldSpace* Heap::old_pointer_space_ = NULL; 60 OldSpace* Heap::old_pointer_space_ = NULL;
60 OldSpace* Heap::old_data_space_ = NULL; 61 OldSpace* Heap::old_data_space_ = NULL;
61 OldSpace* Heap::code_space_ = NULL; 62 OldSpace* Heap::code_space_ = NULL;
62 MapSpace* Heap::map_space_ = NULL; 63 MapSpace* Heap::map_space_ = NULL;
63 CellSpace* Heap::cell_space_ = NULL; 64 CellSpace* Heap::cell_space_ = NULL;
64 LargeObjectSpace* Heap::lo_space_ = NULL; 65 LargeObjectSpace* Heap::lo_space_ = NULL;
65 66
67 static const intptr_t kMinimumPromotionLimit = 2 * MB;
68 static const intptr_t kMinimumAllocationLimit = 8 * MB;
69
66 intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; 70 intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
67 intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; 71 intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
68 72
69 int Heap::old_gen_exhausted_ = false; 73 int Heap::old_gen_exhausted_ = false;
70 74
71 int Heap::amount_of_external_allocated_memory_ = 0; 75 int Heap::amount_of_external_allocated_memory_ = 0;
72 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; 76 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
73 77
74 // semispace_size_ should be a power of 2 and old_generation_size_ should be 78 // semispace_size_ should be a power of 2 and old_generation_size_ should be
75 // a multiple of Page::kPageSize. 79 // a multiple of Page::kPageSize.
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after
407 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); 411 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
408 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 412 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
409 ReportStatisticsAfterGC(); 413 ReportStatisticsAfterGC();
410 #endif 414 #endif
411 #ifdef ENABLE_DEBUGGER_SUPPORT 415 #ifdef ENABLE_DEBUGGER_SUPPORT
412 Debug::AfterGarbageCollection(); 416 Debug::AfterGarbageCollection();
413 #endif 417 #endif
414 } 418 }
415 419
416 420
417 void Heap::CollectAllGarbage(bool force_compaction, 421 void Heap::CollectAllGarbage(bool force_compaction) {
418 CollectionPolicy collectionPolicy) {
419 // Since we are ignoring the return value, the exact choice of space does 422 // Since we are ignoring the return value, the exact choice of space does
420 // not matter, so long as we do not specify NEW_SPACE, which would not 423 // not matter, so long as we do not specify NEW_SPACE, which would not
421 // cause a full GC. 424 // cause a full GC.
422 MarkCompactCollector::SetForceCompaction(force_compaction); 425 MarkCompactCollector::SetForceCompaction(force_compaction);
423 CollectGarbage(OLD_POINTER_SPACE, collectionPolicy); 426 CollectGarbage(OLD_POINTER_SPACE);
424 MarkCompactCollector::SetForceCompaction(false); 427 MarkCompactCollector::SetForceCompaction(false);
425 } 428 }
426 429
427 430
428 void Heap::CollectAllAvailableGarbage() { 431 void Heap::CollectGarbage(AllocationSpace space) {
429 CompilationCache::Clear();
430 CollectAllGarbage(true, AGGRESSIVE);
431 }
432
433
434 void Heap::CollectGarbage(AllocationSpace space,
435 CollectionPolicy collectionPolicy) {
436 // The VM is in the GC state until exiting this function. 432 // The VM is in the GC state until exiting this function.
437 VMState state(GC); 433 VMState state(GC);
438 434
439 #ifdef DEBUG 435 #ifdef DEBUG
440 // Reset the allocation timeout to the GC interval, but make sure to 436 // Reset the allocation timeout to the GC interval, but make sure to
441 // allow at least a few allocations after a collection. The reason 437 // allow at least a few allocations after a collection. The reason
442 // for this is that we have a lot of allocation sequences and we 438 // for this is that we have a lot of allocation sequences and we
443 // assume that a garbage collection will allow the subsequent 439 // assume that a garbage collection will allow the subsequent
444 // allocation attempts to go through. 440 // allocation attempts to go through.
445 allocation_timeout_ = Max(6, FLAG_gc_interval); 441 allocation_timeout_ = Max(6, FLAG_gc_interval);
446 #endif 442 #endif
447 443
448 { GCTracer tracer; 444 { GCTracer tracer;
449 GarbageCollectionPrologue(); 445 GarbageCollectionPrologue();
450 // The GC count was incremented in the prologue. Tell the tracer about 446 // The GC count was incremented in the prologue. Tell the tracer about
451 // it. 447 // it.
452 tracer.set_gc_count(gc_count_); 448 tracer.set_gc_count(gc_count_);
453 449
454 GarbageCollector collector = SelectGarbageCollector(space); 450 GarbageCollector collector = SelectGarbageCollector(space);
455 // Tell the tracer which collector we've selected. 451 // Tell the tracer which collector we've selected.
456 tracer.set_collector(collector); 452 tracer.set_collector(collector);
457 453
458 HistogramTimer* rate = (collector == SCAVENGER) 454 HistogramTimer* rate = (collector == SCAVENGER)
459 ? &Counters::gc_scavenger 455 ? &Counters::gc_scavenger
460 : &Counters::gc_compactor; 456 : &Counters::gc_compactor;
461 rate->Start(); 457 rate->Start();
462 PerformGarbageCollection(collector, &tracer, collectionPolicy); 458 PerformGarbageCollection(collector, &tracer);
463 rate->Stop(); 459 rate->Stop();
464 460
465 GarbageCollectionEpilogue(); 461 GarbageCollectionEpilogue();
466 } 462 }
467 463
468 464
469 #ifdef ENABLE_LOGGING_AND_PROFILING 465 #ifdef ENABLE_LOGGING_AND_PROFILING
470 if (FLAG_log_gc) HeapProfiler::WriteSample(); 466 if (FLAG_log_gc) HeapProfiler::WriteSample();
471 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions(); 467 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
472 #endif 468 #endif
473 } 469 }
474 470
475 471
476 void Heap::PerformScavenge() { 472 void Heap::PerformScavenge() {
477 GCTracer tracer; 473 GCTracer tracer;
478 PerformGarbageCollection(SCAVENGER, &tracer, NORMAL); 474 PerformGarbageCollection(SCAVENGER, &tracer);
479 } 475 }
480 476
481 477
482 #ifdef DEBUG 478 #ifdef DEBUG
483 // Helper class for verifying the symbol table. 479 // Helper class for verifying the symbol table.
484 class SymbolTableVerifier : public ObjectVisitor { 480 class SymbolTableVerifier : public ObjectVisitor {
485 public: 481 public:
486 SymbolTableVerifier() { } 482 SymbolTableVerifier() { }
487 void VisitPointers(Object** start, Object** end) { 483 void VisitPointers(Object** start, Object** end) {
488 // Visit all HeapObject pointers in [start, end). 484 // Visit all HeapObject pointers in [start, end).
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) { 654 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
659 set_survival_rate_trend(INCREASING); 655 set_survival_rate_trend(INCREASING);
660 } else { 656 } else {
661 set_survival_rate_trend(STABLE); 657 set_survival_rate_trend(STABLE);
662 } 658 }
663 659
664 survival_rate_ = survival_rate; 660 survival_rate_ = survival_rate;
665 } 661 }
666 662
667 void Heap::PerformGarbageCollection(GarbageCollector collector, 663 void Heap::PerformGarbageCollection(GarbageCollector collector,
668 GCTracer* tracer, 664 GCTracer* tracer) {
669 CollectionPolicy collectionPolicy) {
670 VerifySymbolTable(); 665 VerifySymbolTable();
671 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { 666 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
672 ASSERT(!allocation_allowed_); 667 ASSERT(!allocation_allowed_);
673 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 668 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
674 global_gc_prologue_callback_(); 669 global_gc_prologue_callback_();
675 } 670 }
676 671
677 GCType gc_type = 672 GCType gc_type =
678 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; 673 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
679 674
680 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 675 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
681 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 676 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
682 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); 677 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
683 } 678 }
684 } 679 }
685 680
686 EnsureFromSpaceIsCommitted(); 681 EnsureFromSpaceIsCommitted();
687 682
688 int start_new_space_size = Heap::new_space()->SizeAsInt(); 683 int start_new_space_size = Heap::new_space()->SizeAsInt();
689 684
690 if (collector == MARK_COMPACTOR) { 685 if (collector == MARK_COMPACTOR) {
691 // Perform mark-sweep with optional compaction. 686 // Perform mark-sweep with optional compaction.
692 MarkCompact(tracer); 687 MarkCompact(tracer);
693 688
694 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && 689 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
695 IsStableOrIncreasingSurvivalTrend(); 690 IsStableOrIncreasingSurvivalTrend();
696 691
697 UpdateSurvivalRateTrend(start_new_space_size); 692 UpdateSurvivalRateTrend(start_new_space_size);
698 693
699 UpdateOldSpaceLimits(); 694 int old_gen_size = PromotedSpaceSize();
695 old_gen_promotion_limit_ =
696 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
697 old_gen_allocation_limit_ =
698 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
700 699
701 // Major GC would invoke weak handle callbacks on weakly reachable 700 if (high_survival_rate_during_scavenges &&
702 // handles, but won't collect weakly reachable objects until next 701 IsStableOrIncreasingSurvivalTrend()) {
703 // major GC. Therefore if we collect aggressively and weak handle callback 702 // Stable high survival rates of young objects both during partial and
704 // has been invoked, we rerun major GC to release objects which become 703 // full collection indicate that mutator is either building or modifying
705 // garbage. 704 // a structure with a long lifetime.
706 if (collectionPolicy == AGGRESSIVE) { 705 // In this case we aggressively raise old generation memory limits to
707 // Note: as weak callbacks can execute arbitrary code, we cannot 706 // postpone subsequent mark-sweep collection and thus trade memory
708 // hope that eventually there will be no weak callbacks invocations. 707 // space for the mutation speed.
709 // Therefore stop recollecting after several attempts. 708 old_gen_promotion_limit_ *= 2;
710 const int kMaxNumberOfAttempts = 7; 709 old_gen_allocation_limit_ *= 2;
711 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
712 { DisableAssertNoAllocation allow_allocation;
713 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
714 if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
715 }
716 MarkCompact(tracer);
717 // Weak handle callbacks can allocate data, so keep limits correct.
718 UpdateOldSpaceLimits();
719 }
720 } else {
721 if (high_survival_rate_during_scavenges &&
722 IsStableOrIncreasingSurvivalTrend()) {
723 // Stable high survival rates of young objects both during partial and
724 // full collection indicate that mutator is either building or modifying
725 // a structure with a long lifetime.
726 // In this case we aggressively raise old generation memory limits to
727 // postpone subsequent mark-sweep collection and thus trade memory
728 // space for the mutation speed.
729 old_gen_promotion_limit_ *= 2;
730 old_gen_allocation_limit_ *= 2;
731 }
732 } 710 }
733 711
734 { DisableAssertNoAllocation allow_allocation; 712 old_gen_exhausted_ = false;
735 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
736 GlobalHandles::PostGarbageCollectionProcessing();
737 }
738 } else { 713 } else {
739 tracer_ = tracer; 714 tracer_ = tracer;
740 Scavenge(); 715 Scavenge();
741 tracer_ = NULL; 716 tracer_ = NULL;
742 717
743 UpdateSurvivalRateTrend(start_new_space_size); 718 UpdateSurvivalRateTrend(start_new_space_size);
744 } 719 }
745 720
746 Counters::objs_since_last_young.Set(0); 721 Counters::objs_since_last_young.Set(0);
747 722
723 if (collector == MARK_COMPACTOR) {
724 DisableAssertNoAllocation allow_allocation;
725 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
726 GlobalHandles::PostGarbageCollectionProcessing();
727 }
728
748 // Update relocatables. 729 // Update relocatables.
749 Relocatable::PostGarbageCollectionProcessing(); 730 Relocatable::PostGarbageCollectionProcessing();
750 731
751 if (collector == MARK_COMPACTOR) { 732 if (collector == MARK_COMPACTOR) {
752 // Register the amount of external allocated memory. 733 // Register the amount of external allocated memory.
753 amount_of_external_allocated_memory_at_last_global_gc_ = 734 amount_of_external_allocated_memory_at_last_global_gc_ =
754 amount_of_external_allocated_memory_; 735 amount_of_external_allocated_memory_;
755 } 736 }
756 737
757 GCCallbackFlags callback_flags = tracer->is_compacting() 738 GCCallbackFlags callback_flags = tracer->is_compacting()
(...skipping 4220 matching lines...) Expand 10 before | Expand all | Expand 10 after
4978 void ExternalStringTable::TearDown() { 4959 void ExternalStringTable::TearDown() {
4979 new_space_strings_.Free(); 4960 new_space_strings_.Free();
4980 old_space_strings_.Free(); 4961 old_space_strings_.Free();
4981 } 4962 }
4982 4963
4983 4964
4984 List<Object*> ExternalStringTable::new_space_strings_; 4965 List<Object*> ExternalStringTable::new_space_strings_;
4985 List<Object*> ExternalStringTable::old_space_strings_; 4966 List<Object*> ExternalStringTable::old_space_strings_;
4986 4967
4987 } } // namespace v8::internal 4968 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698