OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/runtime-profiler.h" | 5 #include "src/runtime-profiler.h" |
6 | 6 |
7 #include "src/assembler.h" | 7 #include "src/assembler.h" |
8 #include "src/ast/scopeinfo.h" | 8 #include "src/ast/scopeinfo.h" |
9 #include "src/base/platform/platform.h" | 9 #include "src/base/platform/platform.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
51 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) | 51 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
52 : isolate_(isolate), | 52 : isolate_(isolate), |
53 any_ic_changed_(false) { | 53 any_ic_changed_(false) { |
54 } | 54 } |
55 | 55 |
56 | 56 |
57 static void GetICCounts(SharedFunctionInfo* shared, | 57 static void GetICCounts(SharedFunctionInfo* shared, |
58 int* ic_with_type_info_count, int* ic_generic_count, | 58 int* ic_with_type_info_count, int* ic_generic_count, |
59 int* ic_total_count, int* type_info_percentage, | 59 int* ic_total_count, int* type_info_percentage, |
60 int* generic_percentage) { | 60 int* generic_percentage) { |
61 Code* shared_code = shared->code(); | |
62 *ic_total_count = 0; | 61 *ic_total_count = 0; |
63 *ic_generic_count = 0; | 62 *ic_generic_count = 0; |
64 *ic_with_type_info_count = 0; | 63 *ic_with_type_info_count = 0; |
65 Object* raw_info = shared_code->type_feedback_info(); | 64 if (shared->code()->kind() == Code::FUNCTION) { |
66 if (raw_info->IsTypeFeedbackInfo()) { | 65 Code* shared_code = shared->code(); |
67 TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info); | 66 Object* raw_info = shared_code->type_feedback_info(); |
68 *ic_with_type_info_count = info->ic_with_type_info_count(); | 67 if (raw_info->IsTypeFeedbackInfo()) { |
69 *ic_generic_count = info->ic_generic_count(); | 68 TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info); |
70 *ic_total_count = info->ic_total_count(); | 69 *ic_with_type_info_count = info->ic_with_type_info_count(); |
70 *ic_generic_count = info->ic_generic_count(); | |
71 *ic_total_count = info->ic_total_count(); | |
72 } | |
71 } | 73 } |
72 | 74 |
73 // Harvest vector-ics as well | 75 // Harvest vector-ics as well |
74 TypeFeedbackVector* vector = shared->feedback_vector(); | 76 TypeFeedbackVector* vector = shared->feedback_vector(); |
75 int with = 0, gen = 0; | 77 int with = 0, gen = 0; |
76 vector->ComputeCounts(&with, &gen); | 78 vector->ComputeCounts(&with, &gen); |
77 *ic_with_type_info_count += with; | 79 *ic_with_type_info_count += with; |
78 *ic_generic_count += gen; | 80 *ic_generic_count += gen; |
79 | 81 |
80 if (*ic_total_count > 0) { | 82 if (*ic_total_count > 0) { |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
129 PrintF("[OSR - patching back edges in "); | 131 PrintF("[OSR - patching back edges in "); |
130 function->PrintName(); | 132 function->PrintName(); |
131 PrintF("]\n"); | 133 PrintF("]\n"); |
132 } | 134 } |
133 | 135 |
134 for (int i = 0; i < loop_nesting_levels; i++) { | 136 for (int i = 0; i < loop_nesting_levels; i++) { |
135 BackEdgeTable::Patch(isolate_, shared->code()); | 137 BackEdgeTable::Patch(isolate_, shared->code()); |
136 } | 138 } |
137 } | 139 } |
138 | 140 |
141 void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function, | |
142 int frame_count, | |
143 bool frame_optimized) { | |
144 SharedFunctionInfo* shared = function->shared(); | |
145 Code* shared_code = shared->code(); | |
146 if (shared_code->kind() != Code::FUNCTION) return; | |
147 if (function->IsInOptimizationQueue()) return; | |
139 | 148 |
140 void RuntimeProfiler::OptimizeNow() { | 149 if (FLAG_always_osr) { |
150 AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker); | |
151 // Fall through and do a normal optimized compile as well. | |
152 } else if (!frame_optimized && | |
153 (function->IsMarkedForOptimization() || | |
154 function->IsMarkedForConcurrentOptimization() || | |
155 function->IsOptimized())) { | |
156 // Attempt OSR if we are still running unoptimized code even though the | |
157 // the function has long been marked or even already been optimized. | |
158 int ticks = shared_code->profiler_ticks(); | |
159 int64_t allowance = | |
160 kOSRCodeSizeAllowanceBase + | |
161 static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick; | |
162 if (shared_code->CodeSize() > allowance && | |
163 ticks < Code::ProfilerTicksField::kMax) { | |
164 shared_code->set_profiler_ticks(ticks + 1); | |
165 } else { | |
166 AttemptOnStackReplacement(function); | |
167 } | |
168 return; | |
169 } | |
170 | |
171 // Only record top-level code on top of the execution stack and | |
172 // avoid optimizing excessively large scripts since top-level code | |
173 // will be executed only once. | |
174 const int kMaxToplevelSourceSize = 10 * 1024; | |
175 if (shared->is_toplevel() && | |
176 (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) { | |
177 return; | |
178 } | |
179 | |
180 // Do not record non-optimizable functions. | |
181 if (shared->optimization_disabled()) { | |
182 if (shared->deopt_count() >= FLAG_max_opt_count) { | |
183 // If optimization was disabled due to many deoptimizations, | |
184 // then check if the function is hot and try to reenable optimization. | |
185 int ticks = shared_code->profiler_ticks(); | |
186 if (ticks >= kProfilerTicksBeforeReenablingOptimization) { | |
187 shared_code->set_profiler_ticks(0); | |
188 shared->TryReenableOptimization(); | |
189 } else { | |
190 shared_code->set_profiler_ticks(ticks + 1); | |
191 } | |
192 } | |
193 return; | |
194 } | |
195 if (function->IsOptimized()) return; | |
196 | |
197 int ticks = shared_code->profiler_ticks(); | |
198 | |
199 if (ticks >= kProfilerTicksBeforeOptimization) { | |
200 int typeinfo, generic, total, type_percentage, generic_percentage; | |
201 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage, | |
202 &generic_percentage); | |
203 if (type_percentage >= FLAG_type_info_threshold && | |
204 generic_percentage <= FLAG_generic_ic_threshold) { | |
205 // If this particular function hasn't had any ICs patched for enough | |
206 // ticks, optimize it now. | |
207 Optimize(function, "hot and stable"); | |
208 } else if (ticks >= kTicksWhenNotEnoughTypeInfo) { | |
209 Optimize(function, "not much type info but very hot"); | |
210 } else { | |
211 shared_code->set_profiler_ticks(ticks + 1); | |
212 if (FLAG_trace_opt_verbose) { | |
213 PrintF("[not yet optimizing "); | |
214 function->PrintName(); | |
215 PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total, | |
216 type_percentage); | |
217 } | |
218 } | |
219 } else if (!any_ic_changed_ && | |
220 shared_code->instruction_size() < kMaxSizeEarlyOpt) { | |
221 // If no IC was patched since the last tick and this function is very | |
222 // small, optimistically optimize it now. | |
223 int typeinfo, generic, total, type_percentage, generic_percentage; | |
224 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage, | |
225 &generic_percentage); | |
226 if (type_percentage >= FLAG_type_info_threshold && | |
227 generic_percentage <= FLAG_generic_ic_threshold) { | |
228 Optimize(function, "small function"); | |
229 } else { | |
230 shared_code->set_profiler_ticks(ticks + 1); | |
231 } | |
232 } else { | |
233 shared_code->set_profiler_ticks(ticks + 1); | |
234 } | |
235 } | |
236 | |
237 void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function) { | |
238 if (function->IsInOptimizationQueue()) return; | |
239 | |
240 SharedFunctionInfo* shared = function->shared(); | |
241 int ticks = shared->profiler_ticks(); | |
242 | |
243 // TODO(rmcilroy): Deal with OSR cases. | |
244 // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller | |
245 // than kMaxToplevelSourceSize. | |
246 // TODO(rmcilroy): Consider whether we should optimize small functions when | |
Jakob Kummerow
2016/02/17 17:45:36
Try to avoid that; it's a SunSpider hack.
rmcilroy
2016/02/18 16:05:56
Acknowledged.
| |
247 // they are first seen on the stack (e.g., kMaxSizeEarlyOpt). | |
248 | |
249 // Do not optimize non-optimizable functions. | |
250 if (shared->optimization_disabled()) { | |
251 if (shared->deopt_count() >= FLAG_max_opt_count) { | |
252 // If optimization was disabled due to many deoptimizations, | |
253 // then check if the function is hot and try to reenable optimization. | |
254 if (ticks >= kProfilerTicksBeforeReenablingOptimization) { | |
255 shared->set_profiler_ticks(0); | |
256 shared->TryReenableOptimization(); | |
257 } | |
258 } | |
259 return; | |
260 } | |
261 | |
262 if (function->IsOptimized()) return; | |
263 | |
264 if (ticks >= kProfilerTicksBeforeOptimization) { | |
265 int typeinfo, generic, total, type_percentage, generic_percentage; | |
266 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage, | |
267 &generic_percentage); | |
268 if (type_percentage >= FLAG_type_info_threshold && | |
269 generic_percentage <= FLAG_generic_ic_threshold) { | |
270 // If this particular function hasn't had any ICs patched for enough | |
271 // ticks, optimize it now. | |
272 Optimize(function, "hot and stable"); | |
273 } else if (ticks >= kTicksWhenNotEnoughTypeInfo) { | |
274 Optimize(function, "not much type info but very hot"); | |
275 } else { | |
276 if (FLAG_trace_opt_verbose) { | |
277 PrintF("[not yet optimizing "); | |
278 function->PrintName(); | |
279 PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total, | |
280 type_percentage); | |
281 } | |
282 } | |
283 } | |
284 } | |
285 | |
286 void RuntimeProfiler::MarkCandidatesForOptimization() { | |
141 HandleScope scope(isolate_); | 287 HandleScope scope(isolate_); |
142 | 288 |
143 if (!isolate_->use_crankshaft()) return; | 289 if (!isolate_->use_crankshaft()) return; |
144 | 290 |
145 DisallowHeapAllocation no_gc; | 291 DisallowHeapAllocation no_gc; |
146 | 292 |
147 // Run through the JavaScript frames and collect them. If we already | 293 // Run through the JavaScript frames and collect them. If we already |
148 // have a sample of the function, we mark it for optimizations | 294 // have a sample of the function, we mark it for optimizations |
149 // (eagerly or lazily). | 295 // (eagerly or lazily). |
150 int frame_count = 0; | 296 int frame_count = 0; |
151 int frame_count_limit = FLAG_frame_count; | 297 int frame_count_limit = FLAG_frame_count; |
152 for (JavaScriptFrameIterator it(isolate_); | 298 for (JavaScriptFrameIterator it(isolate_); |
153 frame_count++ < frame_count_limit && !it.done(); | 299 frame_count++ < frame_count_limit && !it.done(); |
154 it.Advance()) { | 300 it.Advance()) { |
155 JavaScriptFrame* frame = it.frame(); | 301 JavaScriptFrame* frame = it.frame(); |
156 JSFunction* function = frame->function(); | 302 JSFunction* function = frame->function(); |
157 | 303 |
158 SharedFunctionInfo* shared = function->shared(); | |
159 Code* shared_code = shared->code(); | |
160 | |
161 List<JSFunction*> functions(4); | 304 List<JSFunction*> functions(4); |
162 frame->GetFunctions(&functions); | 305 frame->GetFunctions(&functions); |
163 for (int i = functions.length(); --i >= 0; ) { | 306 for (int i = functions.length(); --i >= 0; ) { |
164 SharedFunctionInfo* shared_function_info = functions[i]->shared(); | 307 SharedFunctionInfo* shared_function_info = functions[i]->shared(); |
165 int ticks = shared_function_info->profiler_ticks(); | 308 int ticks = shared_function_info->profiler_ticks(); |
166 if (ticks < Smi::kMaxValue) { | 309 if (ticks < Smi::kMaxValue) { |
167 shared_function_info->set_profiler_ticks(ticks + 1); | 310 shared_function_info->set_profiler_ticks(ticks + 1); |
168 } | 311 } |
169 } | 312 } |
170 | 313 |
171 if (shared_code->kind() != Code::FUNCTION) continue; | 314 if (FLAG_ignition) { |
172 if (function->IsInOptimizationQueue()) continue; | 315 MaybeOptimizeIgnition(function); |
173 | |
174 if (FLAG_always_osr) { | |
175 AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker); | |
176 // Fall through and do a normal optimized compile as well. | |
177 } else if (!frame->is_optimized() && | |
178 (function->IsMarkedForOptimization() || | |
179 function->IsMarkedForConcurrentOptimization() || | |
180 function->IsOptimized())) { | |
181 // Attempt OSR if we are still running unoptimized code even though the | |
182 // the function has long been marked or even already been optimized. | |
183 int ticks = shared_code->profiler_ticks(); | |
184 int64_t allowance = | |
185 kOSRCodeSizeAllowanceBase + | |
186 static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick; | |
187 if (shared_code->CodeSize() > allowance && | |
188 ticks < Code::ProfilerTicksField::kMax) { | |
189 shared_code->set_profiler_ticks(ticks + 1); | |
190 } else { | |
191 AttemptOnStackReplacement(function); | |
192 } | |
193 continue; | |
194 } | |
195 | |
196 // Only record top-level code on top of the execution stack and | |
197 // avoid optimizing excessively large scripts since top-level code | |
198 // will be executed only once. | |
199 const int kMaxToplevelSourceSize = 10 * 1024; | |
200 if (shared->is_toplevel() && | |
201 (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) { | |
202 continue; | |
203 } | |
204 | |
205 // Do not record non-optimizable functions. | |
206 if (shared->optimization_disabled()) { | |
207 if (shared->deopt_count() >= FLAG_max_opt_count) { | |
208 // If optimization was disabled due to many deoptimizations, | |
209 // then check if the function is hot and try to reenable optimization. | |
210 int ticks = shared_code->profiler_ticks(); | |
211 if (ticks >= kProfilerTicksBeforeReenablingOptimization) { | |
212 shared_code->set_profiler_ticks(0); | |
213 shared->TryReenableOptimization(); | |
214 } else { | |
215 shared_code->set_profiler_ticks(ticks + 1); | |
216 } | |
217 } | |
218 continue; | |
219 } | |
220 if (function->IsOptimized()) continue; | |
221 | |
222 int ticks = shared_code->profiler_ticks(); | |
223 | |
224 if (ticks >= kProfilerTicksBeforeOptimization) { | |
225 int typeinfo, generic, total, type_percentage, generic_percentage; | |
226 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage, | |
227 &generic_percentage); | |
228 if (type_percentage >= FLAG_type_info_threshold && | |
229 generic_percentage <= FLAG_generic_ic_threshold) { | |
230 // If this particular function hasn't had any ICs patched for enough | |
231 // ticks, optimize it now. | |
232 Optimize(function, "hot and stable"); | |
233 } else if (ticks >= kTicksWhenNotEnoughTypeInfo) { | |
234 Optimize(function, "not much type info but very hot"); | |
235 } else { | |
236 shared_code->set_profiler_ticks(ticks + 1); | |
237 if (FLAG_trace_opt_verbose) { | |
238 PrintF("[not yet optimizing "); | |
239 function->PrintName(); | |
240 PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total, | |
241 type_percentage); | |
242 } | |
243 } | |
244 } else if (!any_ic_changed_ && | |
245 shared_code->instruction_size() < kMaxSizeEarlyOpt) { | |
246 // If no IC was patched since the last tick and this function is very | |
247 // small, optimistically optimize it now. | |
248 int typeinfo, generic, total, type_percentage, generic_percentage; | |
249 GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage, | |
250 &generic_percentage); | |
251 if (type_percentage >= FLAG_type_info_threshold && | |
252 generic_percentage <= FLAG_generic_ic_threshold) { | |
253 Optimize(function, "small function"); | |
254 } else { | |
255 shared_code->set_profiler_ticks(ticks + 1); | |
256 } | |
257 } else { | 316 } else { |
258 shared_code->set_profiler_ticks(ticks + 1); | 317 MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized()); |
259 } | 318 } |
260 } | 319 } |
261 any_ic_changed_ = false; | 320 any_ic_changed_ = false; |
262 } | 321 } |
263 | 322 |
264 | 323 |
265 } // namespace internal | 324 } // namespace internal |
266 } // namespace v8 | 325 } // namespace v8 |
OLD | NEW |