Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/heap/heap.cc

Issue 1180263006: Only shrink new space when we are not in the process of obtaining pretenuring feedback. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 512 matching lines...) Expand 10 before | Expand all | Expand 10 after
523 523
524 void Heap::RepairFreeListsAfterDeserialization() { 524 void Heap::RepairFreeListsAfterDeserialization() {
525 PagedSpaces spaces(this); 525 PagedSpaces spaces(this);
526 for (PagedSpace* space = spaces.next(); space != NULL; 526 for (PagedSpace* space = spaces.next(); space != NULL;
527 space = spaces.next()) { 527 space = spaces.next()) {
528 space->RepairFreeListsAfterDeserialization(); 528 space->RepairFreeListsAfterDeserialization();
529 } 529 }
530 } 530 }
531 531
532 532
533 void Heap::ProcessPretenuringFeedback() { 533 bool Heap::ProcessPretenuringFeedback() {
534 bool trigger_deoptimization = false;
534 if (FLAG_allocation_site_pretenuring) { 535 if (FLAG_allocation_site_pretenuring) {
535 int tenure_decisions = 0; 536 int tenure_decisions = 0;
536 int dont_tenure_decisions = 0; 537 int dont_tenure_decisions = 0;
537 int allocation_mementos_found = 0; 538 int allocation_mementos_found = 0;
538 int allocation_sites = 0; 539 int allocation_sites = 0;
539 int active_allocation_sites = 0; 540 int active_allocation_sites = 0;
540 541
541 // If the scratchpad overflowed, we have to iterate over the allocation 542 // If the scratchpad overflowed, we have to iterate over the allocation
542 // sites list. 543 // sites list.
543 // TODO(hpayer): We iterate over the whole list of allocation sites when 544 // TODO(hpayer): We iterate over the whole list of allocation sites when
544 // we grew to the maximum semi-space size to deopt maybe tenured 545 // we grew to the maximum semi-space size to deopt maybe tenured
545 // allocation sites. We could hold the maybe tenured allocation sites 546 // allocation sites. We could hold the maybe tenured allocation sites
546 // in a seperate data structure if this is a performance problem. 547 // in a seperate data structure if this is a performance problem.
547 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); 548 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
548 bool use_scratchpad = 549 bool use_scratchpad =
549 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize && 550 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
550 !deopt_maybe_tenured; 551 !deopt_maybe_tenured;
551 552
552 int i = 0; 553 int i = 0;
553 Object* list_element = allocation_sites_list(); 554 Object* list_element = allocation_sites_list();
554 bool trigger_deoptimization = false;
555 bool maximum_size_scavenge = MaximumSizeScavenge(); 555 bool maximum_size_scavenge = MaximumSizeScavenge();
556 while (use_scratchpad ? i < allocation_sites_scratchpad_length_ 556 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
557 : list_element->IsAllocationSite()) { 557 : list_element->IsAllocationSite()) {
558 AllocationSite* site = 558 AllocationSite* site =
559 use_scratchpad 559 use_scratchpad
560 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i)) 560 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
561 : AllocationSite::cast(list_element); 561 : AllocationSite::cast(list_element);
562 allocation_mementos_found += site->memento_found_count(); 562 allocation_mementos_found += site->memento_found_count();
563 if (site->memento_found_count() > 0) { 563 if (site->memento_found_count() > 0) {
564 active_allocation_sites++; 564 active_allocation_sites++;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 dont_tenure_decisions > 0)) { 596 dont_tenure_decisions > 0)) {
597 PrintF( 597 PrintF(
598 "GC: (mode, #visited allocation sites, #active allocation sites, " 598 "GC: (mode, #visited allocation sites, #active allocation sites, "
599 "#mementos, #tenure decisions, #donttenure decisions) " 599 "#mementos, #tenure decisions, #donttenure decisions) "
600 "(%s, %d, %d, %d, %d, %d)\n", 600 "(%s, %d, %d, %d, %d, %d)\n",
601 use_scratchpad ? "use scratchpad" : "use list", allocation_sites, 601 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
602 active_allocation_sites, allocation_mementos_found, tenure_decisions, 602 active_allocation_sites, allocation_mementos_found, tenure_decisions,
603 dont_tenure_decisions); 603 dont_tenure_decisions);
604 } 604 }
605 } 605 }
606 return trigger_deoptimization;
606 } 607 }
607 608
608 609
609 void Heap::DeoptMarkedAllocationSites() { 610 void Heap::DeoptMarkedAllocationSites() {
610 // TODO(hpayer): If iterating over the allocation sites list becomes a 611 // TODO(hpayer): If iterating over the allocation sites list becomes a
611 // performance issue, use a cache heap data structure instead (similar to the 612 // performance issue, use a cache heap data structure instead (similar to the
612 // allocation sites scratchpad). 613 // allocation sites scratchpad).
613 Object* list_element = allocation_sites_list(); 614 Object* list_element = allocation_sites_list();
614 while (list_element->IsAllocationSite()) { 615 while (list_element->IsAllocationSite()) {
615 AllocationSite* site = AllocationSite::cast(list_element); 616 AllocationSite* site = AllocationSite::cast(list_element);
616 if (site->deopt_dependent_code()) { 617 if (site->deopt_dependent_code()) {
617 site->dependent_code()->MarkCodeForDeoptimization( 618 site->dependent_code()->MarkCodeForDeoptimization(
618 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); 619 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
619 site->set_deopt_dependent_code(false); 620 site->set_deopt_dependent_code(false);
620 } 621 }
621 list_element = site->weak_next(); 622 list_element = site->weak_next();
622 } 623 }
623 Deoptimizer::DeoptimizeMarkedCode(isolate_); 624 Deoptimizer::DeoptimizeMarkedCode(isolate_);
624 } 625 }
625 626
626 627
627 void Heap::GarbageCollectionEpilogue() { 628 void Heap::GarbageCollectionEpilogue() {
628 store_buffer()->GCEpilogue(); 629 store_buffer()->GCEpilogue();
629 630
630 // In release mode, we only zap the from space under heap verification. 631 // In release mode, we only zap the from space under heap verification.
631 if (Heap::ShouldZapGarbage()) { 632 if (Heap::ShouldZapGarbage()) {
632 ZapFromSpace(); 633 ZapFromSpace();
633 } 634 }
634 635
635 // Process pretenuring feedback and update allocation sites.
636 ProcessPretenuringFeedback();
637
638 #ifdef VERIFY_HEAP 636 #ifdef VERIFY_HEAP
639 if (FLAG_verify_heap) { 637 if (FLAG_verify_heap) {
640 Verify(); 638 Verify();
641 } 639 }
642 #endif 640 #endif
643 641
644 AllowHeapAllocation for_the_rest_of_the_epilogue; 642 AllowHeapAllocation for_the_rest_of_the_epilogue;
645 643
646 #ifdef DEBUG 644 #ifdef DEBUG
647 if (FLAG_print_global_handles) isolate_->global_handles()->Print(); 645 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after
1243 old_generation_size_configured_ = true; 1241 old_generation_size_configured_ = true;
1244 // This should be updated before PostGarbageCollectionProcessing, which can 1242 // This should be updated before PostGarbageCollectionProcessing, which can
1245 // cause another GC. Take into account the objects promoted during GC. 1243 // cause another GC. Take into account the objects promoted during GC.
1246 old_generation_allocation_counter_ += 1244 old_generation_allocation_counter_ +=
1247 static_cast<size_t>(promoted_objects_size_); 1245 static_cast<size_t>(promoted_objects_size_);
1248 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); 1246 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1249 } else { 1247 } else {
1250 Scavenge(); 1248 Scavenge();
1251 } 1249 }
1252 1250
1251 bool deopted = ProcessPretenuringFeedback();
1252 UpdateSurvivalStatistics(start_new_space_size);
1253 1253
1254 UpdateSurvivalStatistics(start_new_space_size); 1254 // When pretenuring is collecting new feedback, we do not shrink the new space
1255 ConfigureNewGenerationSize(); 1255 // right away.
1256 if (!deopted) {
1257 ConfigureNewGenerationSize();
1258 }
1256 ConfigureInitialOldGenerationSize(); 1259 ConfigureInitialOldGenerationSize();
1257 1260
1258 isolate_->counters()->objs_since_last_young()->Set(0); 1261 isolate_->counters()->objs_since_last_young()->Set(0);
1259 1262
1260 if (collector != SCAVENGER) { 1263 if (collector != SCAVENGER) {
1261 // Callbacks that fire after this point might trigger nested GCs and 1264 // Callbacks that fire after this point might trigger nested GCs and
1262 // restart incremental marking, the assertion can't be moved down. 1265 // restart incremental marking, the assertion can't be moved down.
1263 DCHECK(incremental_marking()->IsStopped()); 1266 DCHECK(incremental_marking()->IsStopped());
1264 1267
1265 // We finished a marking cycle. We can uncommit the marking deque until 1268 // We finished a marking cycle. We can uncommit the marking deque until
(...skipping 5495 matching lines...) Expand 10 before | Expand all | Expand 10 after
6761 *object_type = "CODE_TYPE"; \ 6764 *object_type = "CODE_TYPE"; \
6762 *object_sub_type = "CODE_AGE/" #name; \ 6765 *object_sub_type = "CODE_AGE/" #name; \
6763 return true; 6766 return true;
6764 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6767 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6765 #undef COMPARE_AND_RETURN_NAME 6768 #undef COMPARE_AND_RETURN_NAME
6766 } 6769 }
6767 return false; 6770 return false;
6768 } 6771 }
6769 } // namespace internal 6772 } // namespace internal
6770 } // namespace v8 6773 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698