Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(208)

Side by Side Diff: src/heap.cc

Issue 96783002: Allocation site pretenuring. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/hydrogen.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
510 int dont_tenure_decisions = 0; 510 int dont_tenure_decisions = 0;
511 int allocation_mementos_found = 0; 511 int allocation_mementos_found = 0;
512 512
513 Object* cur = allocation_sites_list(); 513 Object* cur = allocation_sites_list();
514 while (cur->IsAllocationSite()) { 514 while (cur->IsAllocationSite()) {
515 AllocationSite* casted = AllocationSite::cast(cur); 515 AllocationSite* casted = AllocationSite::cast(cur);
516 allocation_mementos_found += casted->memento_found_count()->value(); 516 allocation_mementos_found += casted->memento_found_count()->value();
517 if (casted->DigestPretenuringFeedback()) { 517 if (casted->DigestPretenuringFeedback()) {
518 if (casted->GetPretenureMode() == TENURED) { 518 if (casted->GetPretenureMode() == TENURED) {
519 tenure_decisions++; 519 tenure_decisions++;
520 casted->dependent_code()->DeoptimizeDependentCodeGroup(
521 isolate_,
522 DependentCode::kAllocationSiteTenuringChangedGroup);
520 } else { 523 } else {
521 dont_tenure_decisions++; 524 dont_tenure_decisions++;
522 } 525 }
523 } 526 }
524 cur = casted->weak_next(); 527 cur = casted->weak_next();
525 } 528 }
526 529
527 // TODO(mvstanton): Pretenure decisions are only made once for an allocation 530 // TODO(mvstanton): Pretenure decisions are only made once for an allocation
528 // site. Find a sane way to decide about revisiting the decision later. 531 // site. Find a sane way to decide about revisiting the decision later.
529 532
(...skipping 535 matching lines...) Expand 10 before | Expand all | Expand 10 after
1065 IsHighSurvivalRate()) { 1068 IsHighSurvivalRate()) {
1066 // Stable high survival rates even though young generation is at 1069 // Stable high survival rates even though young generation is at
1067 // maximum capacity indicates that most objects will be promoted. 1070 // maximum capacity indicates that most objects will be promoted.
1068 // To decrease scavenger pauses and final mark-sweep pauses, we 1071 // To decrease scavenger pauses and final mark-sweep pauses, we
1069 // have to limit maximal capacity of the young generation. 1072 // have to limit maximal capacity of the young generation.
1070 SetNewSpaceHighPromotionModeActive(true); 1073 SetNewSpaceHighPromotionModeActive(true);
1071 if (FLAG_trace_gc) { 1074 if (FLAG_trace_gc) {
1072 PrintPID("Limited new space size due to high promotion rate: %d MB\n", 1075 PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1073 new_space_.InitialCapacity() / MB); 1076 new_space_.InitialCapacity() / MB);
1074 } 1077 }
1075 // Support for global pre-tenuring uses the high promotion mode as a
1076 // heuristic indicator of whether to pretenure or not, we trigger
1077 // deoptimization here to take advantage of pre-tenuring as soon as
1078 // possible.
1079 if (FLAG_pretenuring) {
1080 isolate_->stack_guard()->FullDeopt();
1081 }
1082 } else if (new_space_high_promotion_mode_active_ && 1078 } else if (new_space_high_promotion_mode_active_ &&
1083 IsStableOrDecreasingSurvivalTrend() && 1079 IsStableOrDecreasingSurvivalTrend() &&
1084 IsLowSurvivalRate()) { 1080 IsLowSurvivalRate()) {
1085 // Decreasing low survival rates might indicate that the above high 1081 // Decreasing low survival rates might indicate that the above high
1086 // promotion mode is over and we should allow the young generation 1082 // promotion mode is over and we should allow the young generation
1087 // to grow again. 1083 // to grow again.
1088 SetNewSpaceHighPromotionModeActive(false); 1084 SetNewSpaceHighPromotionModeActive(false);
1089 if (FLAG_trace_gc) { 1085 if (FLAG_trace_gc) {
1090 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", 1086 PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1091 new_space_.MaximumCapacity() / MB); 1087 new_space_.MaximumCapacity() / MB);
1092 } 1088 }
1093 // Trigger deoptimization here to turn off pre-tenuring as soon as
1094 // possible.
1095 if (FLAG_pretenuring) {
1096 isolate_->stack_guard()->FullDeopt();
1097 }
1098 } 1089 }
1099 1090
1100 if (new_space_high_promotion_mode_active_ && 1091 if (new_space_high_promotion_mode_active_ &&
1101 new_space_.Capacity() > new_space_.InitialCapacity()) { 1092 new_space_.Capacity() > new_space_.InitialCapacity()) {
1102 new_space_.Shrink(); 1093 new_space_.Shrink();
1103 } 1094 }
1104 1095
1105 isolate_->counters()->objs_since_last_young()->Set(0); 1096 isolate_->counters()->objs_since_last_young()->Set(0);
1106 1097
1107 // Callbacks that fire after this point might trigger nested GCs and 1098 // Callbacks that fire after this point might trigger nested GCs and
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 } 1168 }
1178 } 1169 }
1179 } 1170 }
1180 } 1171 }
1181 1172
1182 1173
1183 void Heap::MarkCompact(GCTracer* tracer) { 1174 void Heap::MarkCompact(GCTracer* tracer) {
1184 gc_state_ = MARK_COMPACT; 1175 gc_state_ = MARK_COMPACT;
1185 LOG(isolate_, ResourceEvent("markcompact", "begin")); 1176 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1186 1177
1178 int64_t objects_before_gc = SizeOfObjects();
1179
1187 mark_compact_collector_.Prepare(tracer); 1180 mark_compact_collector_.Prepare(tracer);
1188 1181
1189 ms_count_++; 1182 ms_count_++;
1190 tracer->set_full_gc_count(ms_count_); 1183 tracer->set_full_gc_count(ms_count_);
1191 1184
1192 MarkCompactPrologue(); 1185 MarkCompactPrologue();
1193 1186
1194 mark_compact_collector_.CollectGarbage(); 1187 mark_compact_collector_.CollectGarbage();
1195 1188
1196 LOG(isolate_, ResourceEvent("markcompact", "end")); 1189 LOG(isolate_, ResourceEvent("markcompact", "end"));
1197 1190
1198 gc_state_ = NOT_IN_GC; 1191 gc_state_ = NOT_IN_GC;
1199 1192
1200 isolate_->counters()->objs_since_last_full()->Set(0); 1193 isolate_->counters()->objs_since_last_full()->Set(0);
1201 1194
1202 flush_monomorphic_ics_ = false; 1195 flush_monomorphic_ics_ = false;
1196
1197 EvaluateLocalPretenuring(objects_before_gc);
1203 } 1198 }
1204 1199
1205 1200
1206 void Heap::MarkCompactPrologue() { 1201 void Heap::MarkCompactPrologue() {
1207 // At any old GC clear the keyed lookup cache to enable collection of unused 1202 // At any old GC clear the keyed lookup cache to enable collection of unused
1208 // maps. 1203 // maps.
1209 isolate_->keyed_lookup_cache()->Clear(); 1204 isolate_->keyed_lookup_cache()->Clear();
1210 isolate_->context_slot_cache()->Clear(); 1205 isolate_->context_slot_cache()->Clear();
1211 isolate_->descriptor_lookup_cache()->Clear(); 1206 isolate_->descriptor_lookup_cache()->Clear();
1212 RegExpResultsCache::Clear(string_split_cache()); 1207 RegExpResultsCache::Clear(string_split_cache());
(...skipping 719 matching lines...) Expand 10 before | Expand all | Expand 10 after
1932 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, 1927 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1933 bool record_slots) { 1928 bool record_slots) {
1934 Object* allocation_site_obj = 1929 Object* allocation_site_obj =
1935 VisitWeakList<AllocationSite>(this, 1930 VisitWeakList<AllocationSite>(this,
1936 allocation_sites_list(), 1931 allocation_sites_list(),
1937 retainer, record_slots); 1932 retainer, record_slots);
1938 set_allocation_sites_list(allocation_site_obj); 1933 set_allocation_sites_list(allocation_site_obj);
1939 } 1934 }
1940 1935
1941 1936
1937 void Heap::DeoptAllAllocationSitesDependentCode() {
1938 Object* cur = allocation_sites_list();
1939 while (cur->IsAllocationSite()) {
1940 AllocationSite* casted = AllocationSite::cast(cur);
1941 casted->dependent_code()->DeoptimizeDependentCodeGroup(
mvstanton 2013/11/30 12:20:41 This is a good thing, but don't you also need to r
Hannes Payer (out of office) 2013/12/02 11:27:15 Done.
1942 isolate_,
1943 DependentCode::kAllocationSiteTenuringChangedGroup);
1944 cur = casted->weak_next();
1945 }
1946 }
1947
1948
1949 void Heap::EvaluateLocalPretenuring(int64_t objects_before_gc) {
mvstanton 2013/11/30 12:20:41 objects_before_gc: Can you get "sizeof" in the nam
Hannes Payer (out of office) 2013/12/02 11:27:15 Done.
1950 int64_t objects_after_gc = SizeOfObjects();
1951 int64_t old_generation_survival_rate =
1952 (objects_after_gc * 100) / objects_before_gc;
1953
1954 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1955 // Too many objects died in the old generation, pretenuring of wrong
1956 // allocation sites may be the cause for that. We have to deopt all
1957 // dependent code registered in the allocation sites to re-evaluate
1958 // our pretenuring decisions.
1959 DeoptAllAllocationSitesDependentCode();
1960 if (FLAG_trace_pretenuring) {
1961 PrintF("Deopt all allocation sites dependent code due to low survival "
1962 "rate in the old generation %d\n",
1963 static_cast<intptr_t>(old_generation_survival_rate));
1964 }
1965 }
1966 }
1967
1968
1942 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { 1969 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1943 DisallowHeapAllocation no_allocation; 1970 DisallowHeapAllocation no_allocation;
1944 1971
1945 // Both the external string table and the string table may contain 1972 // Both the external string table and the string table may contain
1946 // external strings, but neither lists them exhaustively, nor is the 1973 // external strings, but neither lists them exhaustively, nor is the
1947 // intersection set empty. Therefore we iterate over the external string 1974 // intersection set empty. Therefore we iterate over the external string
1948 // table first, ignoring internalized strings, and then over the 1975 // table first, ignoring internalized strings, and then over the
1949 // internalized string table. 1976 // internalized string table.
1950 1977
1951 class ExternalStringTableVisitorAdapter : public ObjectVisitor { 1978 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
(...skipping 6036 matching lines...) Expand 10 before | Expand all | Expand 10 after
7988 static_cast<int>(object_sizes_last_time_[index])); 8015 static_cast<int>(object_sizes_last_time_[index]));
7989 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 8016 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7990 #undef ADJUST_LAST_TIME_OBJECT_COUNT 8017 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7991 8018
7992 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 8019 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7993 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 8020 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7994 ClearObjectStats(); 8021 ClearObjectStats();
7995 } 8022 }
7996 8023
7997 } } // namespace v8::internal 8024 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/hydrogen.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698