OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 * | 6 * |
7 */ | 7 */ |
8 | 8 |
9 #include "VisualBench.h" | 9 #include "VisualBench.h" |
10 | 10 |
11 #include "ProcStats.h" | 11 #include "ProcStats.h" |
12 #include "SkApplication.h" | 12 #include "SkApplication.h" |
13 #include "SkCanvas.h" | 13 #include "SkCanvas.h" |
14 #include "SkCommandLineFlags.h" | 14 #include "SkCommandLineFlags.h" |
15 #include "SkForceLinking.h" | |
16 #include "SkGraphics.h" | 15 #include "SkGraphics.h" |
17 #include "SkGr.h" | 16 #include "SkGr.h" |
18 #include "SkImageDecoder.h" | |
19 #include "SkOSFile.h" | 17 #include "SkOSFile.h" |
20 #include "SkStream.h" | 18 #include "SkStream.h" |
21 #include "Stats.h" | 19 #include "Stats.h" |
| 20 #include "VisualLightweightBenchModule.h" |
22 #include "gl/GrGLInterface.h" | 21 #include "gl/GrGLInterface.h" |
23 | 22 |
24 __SK_FORCE_IMAGE_DECODER_LINKING; | |
25 | |
26 // Between samples we reset context | |
27 // Between frames we swap buffers | |
28 // Between flushes we call flush on GrContext | |
29 | |
30 DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allow
s to lag."); | |
31 DEFINE_int32(samples, 10, "Number of times to time each skp."); | |
32 DEFINE_int32(frames, 5, "Number of frames of each skp to render per sample."); | |
33 DEFINE_double(loopMs, 5, "Target loop time in millseconds."); | |
34 DEFINE_int32(msaa, 0, "Number of msaa samples."); | |
35 DEFINE_bool2(fullscreen, f, true, "Run fullscreen."); | 23 DEFINE_bool2(fullscreen, f, true, "Run fullscreen."); |
36 DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver."); | |
37 DEFINE_string(key, "", ""); // dummy to enable gm tests that have platform-spec
ific names | |
38 DEFINE_string(outResultsFile, "", "If given, write results here as JSON."); | |
39 DEFINE_string(properties, "", | |
40 "Space-separated key/value pairs to add to JSON identifying this r
un."); | |
41 | |
42 static SkString humanize(double ms) { | |
43 if (FLAGS_verbose) { | |
44 return SkStringPrintf("%llu", (uint64_t)(ms*1e6)); | |
45 } | |
46 return HumanizeMs(ms); | |
47 } | |
48 | |
49 #define HUMANIZE(time) humanize(time).c_str() | |
50 | 24 |
51 VisualBench::VisualBench(void* hwnd, int argc, char** argv) | 25 VisualBench::VisualBench(void* hwnd, int argc, char** argv) |
52 : INHERITED(hwnd) | 26 : INHERITED(hwnd) |
53 , fCurrentSample(0) | 27 , fModule(new VisualLightweightBenchModule(this)) { |
54 , fCurrentFrame(0) | |
55 , fLoops(1) | |
56 , fState(kPreWarmLoops_State) | |
57 , fBenchmark(nullptr) | |
58 , fResults(new ResultsWriter) { | |
59 SkCommandLineFlags::Parse(argc, argv); | 28 SkCommandLineFlags::Parse(argc, argv); |
60 | 29 |
61 this->setTitle(); | 30 this->setTitle(); |
62 this->setupBackend(); | 31 this->setupBackend(); |
63 | |
64 fBenchmarkStream.reset(new VisualBenchmarkStream); | |
65 | |
66 // Print header | |
67 SkDebugf("curr/maxrss\tloops\tflushes\tmin\tmedian\tmean\tmax\tstddev\tbench
\n"); | |
68 | |
69 // setup json logging if required | |
70 if (!FLAGS_outResultsFile.isEmpty()) { | |
71 fResults.reset(new NanoJSONResultsWriter(FLAGS_outResultsFile[0])); | |
72 } | |
73 | |
74 if (1 == FLAGS_properties.count() % 2) { | |
75 SkDebugf("ERROR: --properties must be passed with an even number of argu
ments.\n"); | |
76 } else { | |
77 for (int i = 1; i < FLAGS_properties.count(); i += 2) { | |
78 fResults->property(FLAGS_properties[i - 1], FLAGS_properties[i]); | |
79 } | |
80 } | |
81 } | 32 } |
82 | 33 |
83 VisualBench::~VisualBench() { | 34 VisualBench::~VisualBench() { |
84 INHERITED::detach(); | 35 INHERITED::detach(); |
85 } | 36 } |
86 | 37 |
87 void VisualBench::setTitle() { | 38 void VisualBench::setTitle() { |
88 SkString title("VisualBench"); | 39 SkString title("VisualBench"); |
89 INHERITED::setTitle(title.c_str()); | 40 INHERITED::setTitle(title.c_str()); |
90 } | 41 } |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 // setup rendertargets | 84 // setup rendertargets |
134 this->setupRenderTarget(); | 85 this->setupRenderTarget(); |
135 } | 86 } |
136 | 87 |
137 void VisualBench::setupRenderTarget() { | 88 void VisualBench::setupRenderTarget() { |
138 if (fContext) { | 89 if (fContext) { |
139 fRenderTarget.reset(this->renderTarget(fAttachmentInfo, fInterface, fCon
text)); | 90 fRenderTarget.reset(this->renderTarget(fAttachmentInfo, fInterface, fCon
text)); |
140 } | 91 } |
141 } | 92 } |
142 | 93 |
143 inline void VisualBench::renderFrame(SkCanvas* canvas) { | |
144 fBenchmark->draw(fLoops, canvas); | |
145 canvas->flush(); | |
146 INHERITED::present(); | |
147 } | |
148 | |
149 void VisualBench::printStats() { | |
150 const SkTArray<double>& measurements = fRecords.back().fMeasurements; | |
151 const char* shortName = fBenchmark->getUniqueName(); | |
152 | |
153 // update log | |
154 // Note: We currently log only the minimum. It would be interesting to log
more information | |
155 SkString configName; | |
156 if (FLAGS_msaa > 0) { | |
157 configName.appendf("msaa_%d", FLAGS_msaa); | |
158 } else { | |
159 configName.appendf("gpu"); | |
160 } | |
161 fResults->config(configName.c_str()); | |
162 fResults->configOption("name", fBenchmark->getUniqueName()); | |
163 SkASSERT(measurements.count()); | |
164 Stats stats(measurements); | |
165 fResults->metric("min_ms", stats.min); | |
166 | |
167 // Print output | |
168 if (FLAGS_verbose) { | |
169 for (int i = 0; i < measurements.count(); i++) { | |
170 SkDebugf("%s ", HUMANIZE(measurements[i])); | |
171 } | |
172 SkDebugf("%s\n", shortName); | |
173 } else { | |
174 const double stdDevPercent = 100 * sqrt(stats.var) / stats.mean; | |
175 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\n", | |
176 sk_tools::getCurrResidentSetSizeMB(), | |
177 sk_tools::getMaxResidentSetSizeMB(), | |
178 fLoops, | |
179 HUMANIZE(stats.min), | |
180 HUMANIZE(stats.median), | |
181 HUMANIZE(stats.mean), | |
182 HUMANIZE(stats.max), | |
183 stdDevPercent, | |
184 shortName); | |
185 } | |
186 } | |
187 | |
188 bool VisualBench::advanceRecordIfNecessary(SkCanvas* canvas) { | |
189 if (fBenchmark) { | |
190 return true; | |
191 } | |
192 | |
193 fBenchmark.reset(fBenchmarkStream->next()); | |
194 if (!fBenchmark) { | |
195 return false; | |
196 } | |
197 | |
198 canvas->clear(0xffffffff); | |
199 fBenchmark->preDraw(); | |
200 fRecords.push_back(); | |
201 | |
202 // Log bench name | |
203 fResults->bench(fBenchmark->getUniqueName(), fBenchmark->getSize().fX, | |
204 fBenchmark->getSize().fY); | |
205 return true; | |
206 } | |
207 | |
208 inline void VisualBench::nextState(State nextState) { | |
209 fState = nextState; | |
210 } | |
211 | |
212 void VisualBench::perCanvasPreDraw(SkCanvas* canvas, State nextState) { | |
213 fBenchmark->perCanvasPreDraw(canvas); | |
214 fCurrentFrame = 0; | |
215 this->nextState(nextState); | |
216 } | |
217 | |
218 void VisualBench::preWarm(State nextState) { | |
219 if (fCurrentFrame >= FLAGS_gpuFrameLag) { | |
220 // we currently time across all frames to make sure we capture all GPU w
ork | |
221 this->nextState(nextState); | |
222 fCurrentFrame = 0; | |
223 fTimer.start(); | |
224 } else { | |
225 fCurrentFrame++; | |
226 } | |
227 } | |
228 | |
229 void VisualBench::draw(SkCanvas* canvas) { | 94 void VisualBench::draw(SkCanvas* canvas) { |
230 if (!this->advanceRecordIfNecessary(canvas)) { | 95 fModule->draw(canvas); |
231 SkDebugf("Exiting VisualBench successfully\n"); | |
232 this->closeWindow(); | |
233 return; | |
234 } | |
235 this->renderFrame(canvas); | |
236 switch (fState) { | |
237 case kPreWarmLoopsPerCanvasPreDraw_State: { | |
238 this->perCanvasPreDraw(canvas, kPreWarmLoops_State); | |
239 break; | |
240 } | |
241 case kPreWarmLoops_State: { | |
242 this->preWarm(kTuneLoops_State); | |
243 break; | |
244 } | |
245 case kTuneLoops_State: { | |
246 this->tuneLoops(); | |
247 break; | |
248 } | |
249 case kPreWarmTimingPerCanvasPreDraw_State: { | |
250 this->perCanvasPreDraw(canvas, kPreWarmTiming_State); | |
251 break; | |
252 } | |
253 case kPreWarmTiming_State: { | |
254 this->preWarm(kTiming_State); | |
255 break; | |
256 } | |
257 case kTiming_State: { | |
258 this->timing(canvas); | |
259 break; | |
260 } | |
261 } | |
262 | 96 |
263 // Invalidate the window to force a redraw. Poor man's animation mechanism. | 97 // Invalidate the window to force a redraw. Poor man's animation mechanism. |
264 this->inval(nullptr); | 98 this->inval(nullptr); |
265 } | 99 } |
266 | 100 |
267 inline double VisualBench::elapsed() { | |
268 fTimer.end(); | |
269 return fTimer.fWall; | |
270 } | |
271 | |
272 void VisualBench::resetTimingState() { | |
273 fCurrentFrame = 0; | |
274 fTimer = WallTimer(); | |
275 this->resetContext(); | |
276 } | |
277 | |
278 void VisualBench::scaleLoops(double elapsedMs) { | |
279 // Scale back the number of loops | |
280 fLoops = (int)ceil(fLoops * FLAGS_loopMs / elapsedMs); | |
281 } | |
282 | |
283 inline void VisualBench::tuneLoops() { | |
284 if (1 << 30 == fLoops) { | |
285 // We're about to wrap. Something's wrong with the bench. | |
286 SkDebugf("InnerLoops wrapped\n"); | |
287 fLoops = 0; | |
288 } else { | |
289 double elapsedMs = this->elapsed(); | |
290 if (elapsedMs > FLAGS_loopMs) { | |
291 this->scaleLoops(elapsedMs); | |
292 this->nextState(kPreWarmTimingPerCanvasPreDraw_State); | |
293 } else { | |
294 fLoops *= 2; | |
295 this->nextState(kPreWarmLoops_State); | |
296 } | |
297 this->resetTimingState(); | |
298 } | |
299 } | |
300 | |
301 void VisualBench::recordMeasurement() { | |
302 double measurement = this->elapsed() / (FLAGS_frames * fLoops); | |
303 fRecords.back().fMeasurements.push_back(measurement); | |
304 } | |
305 | |
306 void VisualBench::postDraw(SkCanvas* canvas) { | |
307 fBenchmark->perCanvasPostDraw(canvas); | |
308 fBenchmark.reset(nullptr); | |
309 fCurrentSample = 0; | |
310 fLoops = 1; | |
311 } | |
312 | |
313 inline void VisualBench::timing(SkCanvas* canvas) { | |
314 if (fCurrentFrame >= FLAGS_frames) { | |
315 this->recordMeasurement(); | |
316 if (fCurrentSample++ >= FLAGS_samples) { | |
317 this->printStats(); | |
318 this->postDraw(canvas); | |
319 this->nextState(kPreWarmLoopsPerCanvasPreDraw_State); | |
320 } else { | |
321 this->nextState(kPreWarmTimingPerCanvasPreDraw_State); | |
322 } | |
323 this->resetTimingState(); | |
324 } else { | |
325 fCurrentFrame++; | |
326 } | |
327 } | |
328 | |
329 void VisualBench::onSizeChange() { | 101 void VisualBench::onSizeChange() { |
330 this->setupRenderTarget(); | 102 this->setupRenderTarget(); |
331 } | 103 } |
332 | 104 |
333 bool VisualBench::onHandleChar(SkUnichar unichar) { | 105 bool VisualBench::onHandleChar(SkUnichar unichar) { |
334 return true; | 106 return true; |
335 } | 107 } |
336 | 108 |
337 // Externally declared entry points | 109 // Externally declared entry points |
338 void application_init() { | 110 void application_init() { |
339 SkGraphics::Init(); | 111 SkGraphics::Init(); |
340 SkEvent::Init(); | 112 SkEvent::Init(); |
341 } | 113 } |
342 | 114 |
343 void application_term() { | 115 void application_term() { |
344 SkEvent::Term(); | 116 SkEvent::Term(); |
345 SkGraphics::Term(); | 117 SkGraphics::Term(); |
346 } | 118 } |
347 | 119 |
348 SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv) { | 120 SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv) { |
349 return new VisualBench(hwnd, argc, argv); | 121 return new VisualBench(hwnd, argc, argv); |
350 } | 122 } |
351 | 123 |
OLD | NEW |