| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 499 PagedSpaces spaces(this); | 499 PagedSpaces spaces(this); |
| 500 for (PagedSpace* space = spaces.next(); | 500 for (PagedSpace* space = spaces.next(); |
| 501 space != NULL; | 501 space != NULL; |
| 502 space = spaces.next()) { | 502 space = spaces.next()) { |
| 503 space->RepairFreeListsAfterBoot(); | 503 space->RepairFreeListsAfterBoot(); |
| 504 } | 504 } |
| 505 } | 505 } |
| 506 | 506 |
| 507 | 507 |
| 508 void Heap::ProcessPretenuringFeedback() { | 508 void Heap::ProcessPretenuringFeedback() { |
| 509 if (FLAG_allocation_site_pretenuring) { | 509 if (FLAG_allocation_site_pretenuring && |
| 510 new_space_high_promotion_mode_active_) { |
| 510 int tenure_decisions = 0; | 511 int tenure_decisions = 0; |
| 511 int dont_tenure_decisions = 0; | 512 int dont_tenure_decisions = 0; |
| 512 int allocation_mementos_found = 0; | 513 int allocation_mementos_found = 0; |
| 513 int allocation_sites = 0; | 514 int allocation_sites = 0; |
| 514 int active_allocation_sites = 0; | 515 int active_allocation_sites = 0; |
| 515 | 516 |
| 516 // If the scratchpad overflowed, we have to iterate over the allocation | 517 // If the scratchpad overflowed, we have to iterate over the allocation |
| 517 // stites list. | 518 // sites list. |
| 518 bool use_scratchpad = | 519 bool use_scratchpad = |
| 519 allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize; | 520 allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize; |
| 520 | 521 |
| 521 int i = 0; | 522 int i = 0; |
| 522 Object* list_element = allocation_sites_list(); | 523 Object* list_element = allocation_sites_list(); |
| 523 while (use_scratchpad ? | 524 while (use_scratchpad ? |
| 524 i < allocation_sites_scratchpad_length : | 525 i < allocation_sites_scratchpad_length : |
| 525 list_element->IsAllocationSite()) { | 526 list_element->IsAllocationSite()) { |
| 526 AllocationSite* site = use_scratchpad ? | 527 AllocationSite* site = use_scratchpad ? |
| 527 allocation_sites_scratchpad[i] : AllocationSite::cast(list_element); | 528 allocation_sites_scratchpad[i] : AllocationSite::cast(list_element); |
| (...skipping 565 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1093 IsHighSurvivalRate()) { | 1094 IsHighSurvivalRate()) { |
| 1094 // Stable high survival rates even though young generation is at | 1095 // Stable high survival rates even though young generation is at |
| 1095 // maximum capacity indicates that most objects will be promoted. | 1096 // maximum capacity indicates that most objects will be promoted. |
| 1096 // To decrease scavenger pauses and final mark-sweep pauses, we | 1097 // To decrease scavenger pauses and final mark-sweep pauses, we |
| 1097 // have to limit maximal capacity of the young generation. | 1098 // have to limit maximal capacity of the young generation. |
| 1098 SetNewSpaceHighPromotionModeActive(true); | 1099 SetNewSpaceHighPromotionModeActive(true); |
| 1099 if (FLAG_trace_gc) { | 1100 if (FLAG_trace_gc) { |
| 1100 PrintPID("Limited new space size due to high promotion rate: %d MB\n", | 1101 PrintPID("Limited new space size due to high promotion rate: %d MB\n", |
| 1101 new_space_.InitialCapacity() / MB); | 1102 new_space_.InitialCapacity() / MB); |
| 1102 } | 1103 } |
| 1103 // Support for global pre-tenuring uses the high promotion mode as a | 1104 // The high promotion mode is our indicator to turn on pretenuring. We have |
| 1104 // heuristic indicator of whether to pretenure or not, we trigger | 1105 // to deoptimize all optimized code in global pretenuring mode and all |
| 1105 // deoptimization here to take advantage of pre-tenuring as soon as | 1106 // code which should be tenured in local pretenuring mode. |
| 1106 // possible. | |
| 1107 if (FLAG_pretenuring) { | 1107 if (FLAG_pretenuring) { |
| 1108 isolate_->stack_guard()->FullDeopt(); | 1108 if (FLAG_allocation_site_pretenuring) { |
| 1109 ResetAllAllocationSitesDependentCode(NOT_TENURED); |
| 1110 } else { |
| 1111 isolate_->stack_guard()->FullDeopt(); |
| 1112 } |
| 1109 } | 1113 } |
| 1110 } else if (new_space_high_promotion_mode_active_ && | 1114 } else if (new_space_high_promotion_mode_active_ && |
| 1111 IsStableOrDecreasingSurvivalTrend() && | 1115 IsStableOrDecreasingSurvivalTrend() && |
| 1112 IsLowSurvivalRate()) { | 1116 IsLowSurvivalRate()) { |
| 1113 // Decreasing low survival rates might indicate that the above high | 1117 // Decreasing low survival rates might indicate that the above high |
| 1114 // promotion mode is over and we should allow the young generation | 1118 // promotion mode is over and we should allow the young generation |
| 1115 // to grow again. | 1119 // to grow again. |
| 1116 SetNewSpaceHighPromotionModeActive(false); | 1120 SetNewSpaceHighPromotionModeActive(false); |
| 1117 if (FLAG_trace_gc) { | 1121 if (FLAG_trace_gc) { |
| 1118 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", | 1122 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", |
| 1119 new_space_.MaximumCapacity() / MB); | 1123 new_space_.MaximumCapacity() / MB); |
| 1120 } | 1124 } |
| 1121 // Trigger deoptimization here to turn off pre-tenuring as soon as | 1125 // Trigger deoptimization here to turn off global pretenuring as soon as |
| 1122 // possible. | 1126 // possible. |
| 1123 if (FLAG_pretenuring) { | 1127 if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) { |
| 1124 isolate_->stack_guard()->FullDeopt(); | 1128 isolate_->stack_guard()->FullDeopt(); |
| 1125 } | 1129 } |
| 1126 } | 1130 } |
| 1127 | 1131 |
| 1128 if (new_space_high_promotion_mode_active_ && | 1132 if (new_space_high_promotion_mode_active_ && |
| 1129 new_space_.Capacity() > new_space_.InitialCapacity()) { | 1133 new_space_.Capacity() > new_space_.InitialCapacity()) { |
| 1130 new_space_.Shrink(); | 1134 new_space_.Shrink(); |
| 1131 } | 1135 } |
| 1132 | 1136 |
| 1133 isolate_->counters()->objs_since_last_young()->Set(0); | 1137 isolate_->counters()->objs_since_last_young()->Set(0); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1205 } | 1209 } |
| 1206 } | 1210 } |
| 1207 } | 1211 } |
| 1208 } | 1212 } |
| 1209 | 1213 |
| 1210 | 1214 |
| 1211 void Heap::MarkCompact(GCTracer* tracer) { | 1215 void Heap::MarkCompact(GCTracer* tracer) { |
| 1212 gc_state_ = MARK_COMPACT; | 1216 gc_state_ = MARK_COMPACT; |
| 1213 LOG(isolate_, ResourceEvent("markcompact", "begin")); | 1217 LOG(isolate_, ResourceEvent("markcompact", "begin")); |
| 1214 | 1218 |
| 1219 uint64_t size_of_objects_before_gc = SizeOfObjects(); |
| 1220 |
| 1215 mark_compact_collector_.Prepare(tracer); | 1221 mark_compact_collector_.Prepare(tracer); |
| 1216 | 1222 |
| 1217 ms_count_++; | 1223 ms_count_++; |
| 1218 tracer->set_full_gc_count(ms_count_); | 1224 tracer->set_full_gc_count(ms_count_); |
| 1219 | 1225 |
| 1220 MarkCompactPrologue(); | 1226 MarkCompactPrologue(); |
| 1221 | 1227 |
| 1222 mark_compact_collector_.CollectGarbage(); | 1228 mark_compact_collector_.CollectGarbage(); |
| 1223 | 1229 |
| 1224 LOG(isolate_, ResourceEvent("markcompact", "end")); | 1230 LOG(isolate_, ResourceEvent("markcompact", "end")); |
| 1225 | 1231 |
| 1226 gc_state_ = NOT_IN_GC; | 1232 gc_state_ = NOT_IN_GC; |
| 1227 | 1233 |
| 1228 isolate_->counters()->objs_since_last_full()->Set(0); | 1234 isolate_->counters()->objs_since_last_full()->Set(0); |
| 1229 | 1235 |
| 1230 flush_monomorphic_ics_ = false; | 1236 flush_monomorphic_ics_ = false; |
| 1237 |
| 1238 if (FLAG_allocation_site_pretenuring) { |
| 1239 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); |
| 1240 } |
| 1231 } | 1241 } |
| 1232 | 1242 |
| 1233 | 1243 |
| 1234 void Heap::MarkCompactPrologue() { | 1244 void Heap::MarkCompactPrologue() { |
| 1235 // At any old GC clear the keyed lookup cache to enable collection of unused | 1245 // At any old GC clear the keyed lookup cache to enable collection of unused |
| 1236 // maps. | 1246 // maps. |
| 1237 isolate_->keyed_lookup_cache()->Clear(); | 1247 isolate_->keyed_lookup_cache()->Clear(); |
| 1238 isolate_->context_slot_cache()->Clear(); | 1248 isolate_->context_slot_cache()->Clear(); |
| 1239 isolate_->descriptor_lookup_cache()->Clear(); | 1249 isolate_->descriptor_lookup_cache()->Clear(); |
| 1240 RegExpResultsCache::Clear(string_split_cache()); | 1250 RegExpResultsCache::Clear(string_split_cache()); |
| (...skipping 718 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1959 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, | 1969 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, |
| 1960 bool record_slots) { | 1970 bool record_slots) { |
| 1961 Object* allocation_site_obj = | 1971 Object* allocation_site_obj = |
| 1962 VisitWeakList<AllocationSite>(this, | 1972 VisitWeakList<AllocationSite>(this, |
| 1963 allocation_sites_list(), | 1973 allocation_sites_list(), |
| 1964 retainer, record_slots); | 1974 retainer, record_slots); |
| 1965 set_allocation_sites_list(allocation_site_obj); | 1975 set_allocation_sites_list(allocation_site_obj); |
| 1966 } | 1976 } |
| 1967 | 1977 |
| 1968 | 1978 |
| 1979 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { |
| 1980 Object* cur = allocation_sites_list(); |
| 1981 while (cur->IsAllocationSite()) { |
| 1982 AllocationSite* casted = AllocationSite::cast(cur); |
| 1983 if (casted->GetPretenureMode() == flag) { |
| 1984 casted->ResetPretenureDecision(); |
| 1985 } |
| 1986 cur = casted->weak_next(); |
| 1987 } |
| 1988 } |
| 1989 |
| 1990 |
| 1991 void Heap::EvaluateOldSpaceLocalPretenuring( |
| 1992 uint64_t size_of_objects_before_gc) { |
| 1993 uint64_t size_of_objects_after_gc = SizeOfObjects(); |
| 1994 double old_generation_survival_rate = |
| 1995 (static_cast<double>(size_of_objects_after_gc) * 100) / |
| 1996 static_cast<double>(size_of_objects_before_gc); |
| 1997 |
| 1998 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { |
| 1999 // Too many objects died in the old generation, pretenuring of wrong |
| 2000 // allocation sites may be the cause for that. We have to deopt all |
| 2001 // dependent code registered in the allocation sites to re-evaluate |
| 2002 // our pretenuring decisions. |
| 2003 ResetAllAllocationSitesDependentCode(TENURED); |
| 2004 if (FLAG_trace_pretenuring) { |
| 2005 PrintF("Deopt all allocation sites dependent code due to low survival " |
| 2006 "rate in the old generation %f\n", old_generation_survival_rate); |
| 2007 } |
| 2008 } |
| 2009 } |
| 2010 |
| 2011 |
| 1969 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { | 2012 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
| 1970 DisallowHeapAllocation no_allocation; | 2013 DisallowHeapAllocation no_allocation; |
| 1971 // All external strings are listed in the external string table. | 2014 // All external strings are listed in the external string table. |
| 1972 | 2015 |
| 1973 class ExternalStringTableVisitorAdapter : public ObjectVisitor { | 2016 class ExternalStringTableVisitorAdapter : public ObjectVisitor { |
| 1974 public: | 2017 public: |
| 1975 explicit ExternalStringTableVisitorAdapter( | 2018 explicit ExternalStringTableVisitorAdapter( |
| 1976 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} | 2019 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} |
| 1977 virtual void VisitPointers(Object** start, Object** end) { | 2020 virtual void VisitPointers(Object** start, Object** end) { |
| 1978 for (Object** p = start; p < end; p++) { | 2021 for (Object** p = start; p < end; p++) { |
| (...skipping 5756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7735 static_cast<int>(object_sizes_last_time_[index])); | 7778 static_cast<int>(object_sizes_last_time_[index])); |
| 7736 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 7779 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
| 7737 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 7780 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
| 7738 | 7781 |
| 7739 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 7782 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
| 7740 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 7783 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
| 7741 ClearObjectStats(); | 7784 ClearObjectStats(); |
| 7742 } | 7785 } |
| 7743 | 7786 |
| 7744 } } // namespace v8::internal | 7787 } } // namespace v8::internal |
| OLD | NEW |