| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 * | 6 * |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 #include "VisualBench.h" | 9 #include "VisualBench.h" |
| 10 | 10 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 | 23 |
| 24 __SK_FORCE_IMAGE_DECODER_LINKING; | 24 __SK_FORCE_IMAGE_DECODER_LINKING; |
| 25 | 25 |
| 26 // Between samples we reset context | 26 // Between samples we reset context |
| 27 // Between frames we swap buffers | 27 // Between frames we swap buffers |
| 28 // Between flushes we call flush on GrContext | 28 // Between flushes we call flush on GrContext |
| 29 | 29 |
| 30 DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allow
s to lag."); | 30 DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allow
s to lag."); |
| 31 DEFINE_int32(samples, 10, "Number of times to time each skp."); | 31 DEFINE_int32(samples, 10, "Number of times to time each skp."); |
| 32 DEFINE_int32(frames, 5, "Number of frames of each skp to render per sample."); | 32 DEFINE_int32(frames, 5, "Number of frames of each skp to render per sample."); |
| 33 DEFINE_double(flushMs, 20, "Target flush time in millseconds."); | |
| 34 DEFINE_double(loopMs, 5, "Target loop time in millseconds."); | 33 DEFINE_double(loopMs, 5, "Target loop time in millseconds."); |
| 35 DEFINE_int32(msaa, 0, "Number of msaa samples."); | 34 DEFINE_int32(msaa, 0, "Number of msaa samples."); |
| 36 DEFINE_bool2(fullscreen, f, true, "Run fullscreen."); | 35 DEFINE_bool2(fullscreen, f, true, "Run fullscreen."); |
| 37 DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver."); | 36 DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver."); |
| 38 DEFINE_string(key, "", ""); // dummy to enable gm tests that have platform-spec
ific names | 37 DEFINE_string(key, "", ""); // dummy to enable gm tests that have platform-spec
ific names |
| 39 DEFINE_string(outResultsFile, "", "If given, write results here as JSON."); | 38 DEFINE_string(outResultsFile, "", "If given, write results here as JSON."); |
| 40 DEFINE_string(properties, "", | 39 DEFINE_string(properties, "", |
| 41 "Space-separated key/value pairs to add to JSON identifying this r
un."); | 40 "Space-separated key/value pairs to add to JSON identifying this r
un."); |
| 42 | 41 |
| 43 static SkString humanize(double ms) { | 42 static SkString humanize(double ms) { |
| 44 if (FLAGS_verbose) { | 43 if (FLAGS_verbose) { |
| 45 return SkStringPrintf("%llu", (uint64_t)(ms*1e6)); | 44 return SkStringPrintf("%llu", (uint64_t)(ms*1e6)); |
| 46 } | 45 } |
| 47 return HumanizeMs(ms); | 46 return HumanizeMs(ms); |
| 48 } | 47 } |
| 49 | 48 |
| 50 #define HUMANIZE(time) humanize(time).c_str() | 49 #define HUMANIZE(time) humanize(time).c_str() |
| 51 | 50 |
| 52 VisualBench::VisualBench(void* hwnd, int argc, char** argv) | 51 VisualBench::VisualBench(void* hwnd, int argc, char** argv) |
| 53 : INHERITED(hwnd) | 52 : INHERITED(hwnd) |
| 54 , fCurrentSample(0) | 53 , fCurrentSample(0) |
| 55 , fCurrentFrame(0) | 54 , fCurrentFrame(0) |
| 56 , fFlushes(1) | |
| 57 , fLoops(1) | 55 , fLoops(1) |
| 58 , fState(kPreWarmLoops_State) | 56 , fState(kPreWarmLoops_State) |
| 59 , fBenchmark(nullptr) | 57 , fBenchmark(nullptr) |
| 60 , fResults(new ResultsWriter) { | 58 , fResults(new ResultsWriter) { |
| 61 SkCommandLineFlags::Parse(argc, argv); | 59 SkCommandLineFlags::Parse(argc, argv); |
| 62 | 60 |
| 63 this->setTitle(); | 61 this->setTitle(); |
| 64 this->setupBackend(); | 62 this->setupBackend(); |
| 65 | 63 |
| 66 fBenchmarkStream.reset(new VisualBenchmarkStream); | 64 fBenchmarkStream.reset(new VisualBenchmarkStream); |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 136 this->setupRenderTarget(); | 134 this->setupRenderTarget(); |
| 137 } | 135 } |
| 138 | 136 |
| 139 void VisualBench::setupRenderTarget() { | 137 void VisualBench::setupRenderTarget() { |
| 140 if (fContext) { | 138 if (fContext) { |
| 141 fRenderTarget.reset(this->renderTarget(fAttachmentInfo, fInterface, fCon
text)); | 139 fRenderTarget.reset(this->renderTarget(fAttachmentInfo, fInterface, fCon
text)); |
| 142 } | 140 } |
| 143 } | 141 } |
| 144 | 142 |
| 145 inline void VisualBench::renderFrame(SkCanvas* canvas) { | 143 inline void VisualBench::renderFrame(SkCanvas* canvas) { |
| 146 for (int flush = 0; flush < fFlushes; flush++) { | 144 fBenchmark->draw(fLoops, canvas); |
| 147 fBenchmark->draw(fLoops, canvas); | 145 canvas->flush(); |
| 148 canvas->flush(); | |
| 149 } | |
| 150 INHERITED::present(); | 146 INHERITED::present(); |
| 151 } | 147 } |
| 152 | 148 |
| 153 void VisualBench::printStats() { | 149 void VisualBench::printStats() { |
| 154 const SkTArray<double>& measurements = fRecords.back().fMeasurements; | 150 const SkTArray<double>& measurements = fRecords.back().fMeasurements; |
| 155 const char* shortName = fBenchmark->getUniqueName(); | 151 const char* shortName = fBenchmark->getUniqueName(); |
| 156 | 152 |
| 157 // update log | 153 // update log |
| 158 // Note: We currently log only the minimum. It would be interesting to log
more information | 154 // Note: We currently log only the minimum. It would be interesting to log
more information |
| 159 SkString configName; | 155 SkString configName; |
| 160 if (FLAGS_msaa > 0) { | 156 if (FLAGS_msaa > 0) { |
| 161 configName.appendf("msaa_%d", FLAGS_msaa); | 157 configName.appendf("msaa_%d", FLAGS_msaa); |
| 162 } else { | 158 } else { |
| 163 configName.appendf("gpu"); | 159 configName.appendf("gpu"); |
| 164 } | 160 } |
| 165 fResults->config(configName.c_str()); | 161 fResults->config(configName.c_str()); |
| 166 fResults->configOption("name", fBenchmark->getUniqueName()); | 162 fResults->configOption("name", fBenchmark->getUniqueName()); |
| 167 SkASSERT(measurements.count()); | 163 SkASSERT(measurements.count()); |
| 168 Stats stats(measurements); | 164 Stats stats(measurements); |
| 169 fResults->metric("min_ms", stats.min); | 165 fResults->metric("min_ms", stats.min); |
| 170 | 166 |
| 171 // Print output | 167 // Print output |
| 172 if (FLAGS_verbose) { | 168 if (FLAGS_verbose) { |
| 173 for (int i = 0; i < measurements.count(); i++) { | 169 for (int i = 0; i < measurements.count(); i++) { |
| 174 SkDebugf("%s ", HUMANIZE(measurements[i])); | 170 SkDebugf("%s ", HUMANIZE(measurements[i])); |
| 175 } | 171 } |
| 176 SkDebugf("%s\n", shortName); | 172 SkDebugf("%s\n", shortName); |
| 177 } else { | 173 } else { |
| 178 const double stdDevPercent = 100 * sqrt(stats.var) / stats.mean; | 174 const double stdDevPercent = 100 * sqrt(stats.var) / stats.mean; |
| 179 SkDebugf("%4d/%-4dMB\t%d\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\n", | 175 SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\n", |
| 180 sk_tools::getCurrResidentSetSizeMB(), | 176 sk_tools::getCurrResidentSetSizeMB(), |
| 181 sk_tools::getMaxResidentSetSizeMB(), | 177 sk_tools::getMaxResidentSetSizeMB(), |
| 182 fLoops, | 178 fLoops, |
| 183 fFlushes, | |
| 184 HUMANIZE(stats.min), | 179 HUMANIZE(stats.min), |
| 185 HUMANIZE(stats.median), | 180 HUMANIZE(stats.median), |
| 186 HUMANIZE(stats.mean), | 181 HUMANIZE(stats.mean), |
| 187 HUMANIZE(stats.max), | 182 HUMANIZE(stats.max), |
| 188 stdDevPercent, | 183 stdDevPercent, |
| 189 shortName); | 184 shortName); |
| 190 } | 185 } |
| 191 } | 186 } |
| 192 | 187 |
| 193 bool VisualBench::advanceRecordIfNecessary(SkCanvas* canvas) { | 188 bool VisualBench::advanceRecordIfNecessary(SkCanvas* canvas) { |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 276 | 271 |
| 277 void VisualBench::resetTimingState() { | 272 void VisualBench::resetTimingState() { |
| 278 fCurrentFrame = 0; | 273 fCurrentFrame = 0; |
| 279 fTimer = WallTimer(); | 274 fTimer = WallTimer(); |
| 280 this->resetContext(); | 275 this->resetContext(); |
| 281 } | 276 } |
| 282 | 277 |
| 283 void VisualBench::scaleLoops(double elapsedMs) { | 278 void VisualBench::scaleLoops(double elapsedMs) { |
| 284 // Scale back the number of loops | 279 // Scale back the number of loops |
| 285 fLoops = (int)ceil(fLoops * FLAGS_loopMs / elapsedMs); | 280 fLoops = (int)ceil(fLoops * FLAGS_loopMs / elapsedMs); |
| 286 fFlushes = (int)ceil(FLAGS_flushMs / elapsedMs); | |
| 287 } | 281 } |
| 288 | 282 |
| 289 inline void VisualBench::tuneLoops() { | 283 inline void VisualBench::tuneLoops() { |
| 290 if (1 << 30 == fLoops) { | 284 if (1 << 30 == fLoops) { |
| 291 // We're about to wrap. Something's wrong with the bench. | 285 // We're about to wrap. Something's wrong with the bench. |
| 292 SkDebugf("InnerLoops wrapped\n"); | 286 SkDebugf("InnerLoops wrapped\n"); |
| 293 fLoops = 0; | 287 fLoops = 0; |
| 294 } else { | 288 } else { |
| 295 double elapsedMs = this->elapsed(); | 289 double elapsedMs = this->elapsed(); |
| 296 if (elapsedMs > FLAGS_loopMs) { | 290 if (elapsedMs > FLAGS_loopMs) { |
| 297 this->scaleLoops(elapsedMs); | 291 this->scaleLoops(elapsedMs); |
| 298 this->nextState(kPreWarmTimingPerCanvasPreDraw_State); | 292 this->nextState(kPreWarmTimingPerCanvasPreDraw_State); |
| 299 } else { | 293 } else { |
| 300 fLoops *= 2; | 294 fLoops *= 2; |
| 301 this->nextState(kPreWarmLoops_State); | 295 this->nextState(kPreWarmLoops_State); |
| 302 } | 296 } |
| 303 this->resetTimingState(); | 297 this->resetTimingState(); |
| 304 } | 298 } |
| 305 } | 299 } |
| 306 | 300 |
| 307 void VisualBench::recordMeasurement() { | 301 void VisualBench::recordMeasurement() { |
| 308 double measurement = this->elapsed() / (FLAGS_frames * fLoops * fFlushes); | 302 double measurement = this->elapsed() / (FLAGS_frames * fLoops); |
| 309 fRecords.back().fMeasurements.push_back(measurement); | 303 fRecords.back().fMeasurements.push_back(measurement); |
| 310 } | 304 } |
| 311 | 305 |
| 312 void VisualBench::postDraw(SkCanvas* canvas) { | 306 void VisualBench::postDraw(SkCanvas* canvas) { |
| 313 fBenchmark->perCanvasPostDraw(canvas); | 307 fBenchmark->perCanvasPostDraw(canvas); |
| 314 fBenchmark.reset(nullptr); | 308 fBenchmark.reset(nullptr); |
| 315 fCurrentSample = 0; | 309 fCurrentSample = 0; |
| 316 fFlushes = 1; | |
| 317 fLoops = 1; | 310 fLoops = 1; |
| 318 } | 311 } |
| 319 | 312 |
| 320 inline void VisualBench::timing(SkCanvas* canvas) { | 313 inline void VisualBench::timing(SkCanvas* canvas) { |
| 321 if (fCurrentFrame >= FLAGS_frames) { | 314 if (fCurrentFrame >= FLAGS_frames) { |
| 322 this->recordMeasurement(); | 315 this->recordMeasurement(); |
| 323 if (fCurrentSample++ >= FLAGS_samples) { | 316 if (fCurrentSample++ >= FLAGS_samples) { |
| 324 this->printStats(); | 317 this->printStats(); |
| 325 this->postDraw(canvas); | 318 this->postDraw(canvas); |
| 326 this->nextState(kPreWarmLoopsPerCanvasPreDraw_State); | 319 this->nextState(kPreWarmLoopsPerCanvasPreDraw_State); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 349 | 342 |
| 350 void application_term() { | 343 void application_term() { |
| 351 SkEvent::Term(); | 344 SkEvent::Term(); |
| 352 SkGraphics::Term(); | 345 SkGraphics::Term(); |
| 353 } | 346 } |
| 354 | 347 |
| 355 SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv) { | 348 SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv) { |
| 356 return new VisualBench(hwnd, argc, argv); | 349 return new VisualBench(hwnd, argc, argv); |
| 357 } | 350 } |
| 358 | 351 |
| OLD | NEW |