| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2011 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #include "BenchLogger.h" | |
| 9 #include "Benchmark.h" | |
| 10 #include "CrashHandler.h" | |
| 11 #include "GMBench.h" | |
| 12 #include "ResultsWriter.h" | |
| 13 #include "SkCanvas.h" | |
| 14 #include "SkColorPriv.h" | |
| 15 #include "SkCommandLineFlags.h" | |
| 16 #include "SkData.h" | |
| 17 #include "SkDeferredCanvas.h" | |
| 18 #include "SkGraphics.h" | |
| 19 #include "SkImageEncoder.h" | |
| 20 #include "SkOSFile.h" | |
| 21 #include "SkPicture.h" | |
| 22 #include "SkPictureRecorder.h" | |
| 23 #include "SkString.h" | |
| 24 #include "SkSurface.h" | |
| 25 #include "Timer.h" | |
| 26 | |
| 27 #if SK_SUPPORT_GPU | |
| 28 #include "GrContext.h" | |
| 29 #include "GrContextFactory.h" | |
| 30 #include "GrRenderTarget.h" | |
| 31 #include "gl/GrGLDefines.h" | |
| 32 #else | |
| 33 class GrContext; | |
| 34 #endif // SK_SUPPORT_GPU | |
| 35 | |
| 36 #include <limits> | |
| 37 | |
| 38 enum BenchMode { | |
| 39 kNormal_BenchMode, | |
| 40 kDeferred_BenchMode, | |
| 41 kDeferredSilent_BenchMode, | |
| 42 kRecord_BenchMode, | |
| 43 kPictureRecord_BenchMode | |
| 44 }; | |
| 45 const char* BenchMode_Name[] = { | |
| 46 "normal", "deferred", "deferredSilent", "record", "picturerecord" | |
| 47 }; | |
| 48 | |
| 49 static const char kDefaultsConfigStr[] = "defaults"; | |
| 50 | |
| 51 #if SK_SUPPORT_GPU | |
| 52 static const char kGpuAPINameGL[] = "gl"; | |
| 53 static const char kGpuAPINameGLES[] = "gles"; | |
| 54 #endif | |
| 55 | |
| 56 /////////////////////////////////////////////////////////////////////////////// | |
| 57 | |
| 58 class Iter { | |
| 59 public: | |
| 60 Iter() : fBenches(BenchRegistry::Head()), fGMs(skiagm::GMRegistry::Head()) {
} | |
| 61 | |
| 62 Benchmark* next() { | |
| 63 if (fBenches) { | |
| 64 BenchRegistry::Factory f = fBenches->factory(); | |
| 65 fBenches = fBenches->next(); | |
| 66 return (*f)(NULL); | |
| 67 } | |
| 68 | |
| 69 while (fGMs) { | |
| 70 SkAutoTDelete<skiagm::GM> gm(fGMs->factory()(NULL)); | |
| 71 fGMs = fGMs->next(); | |
| 72 if (gm->getFlags() & skiagm::GM::kAsBench_Flag) { | |
| 73 return SkNEW_ARGS(GMBench, (gm.detach())); | |
| 74 } | |
| 75 } | |
| 76 | |
| 77 return NULL; | |
| 78 } | |
| 79 | |
| 80 private: | |
| 81 const BenchRegistry* fBenches; | |
| 82 const skiagm::GMRegistry* fGMs; | |
| 83 }; | |
| 84 | |
| 85 static void make_filename(const char name[], SkString* path) { | |
| 86 path->set(name); | |
| 87 for (int i = 0; name[i]; i++) { | |
| 88 switch (name[i]) { | |
| 89 case '/': | |
| 90 case '\\': | |
| 91 case ' ': | |
| 92 case ':': | |
| 93 path->writable_str()[i] = '-'; | |
| 94 break; | |
| 95 default: | |
| 96 break; | |
| 97 } | |
| 98 } | |
| 99 } | |
| 100 | |
| 101 static void saveFile(const char name[], const char config[], const char dir[], | |
| 102 const SkImage* image) { | |
| 103 SkAutoTUnref<SkData> data(image->encode(SkImageEncoder::kPNG_Type, 100)); | |
| 104 if (NULL == data.get()) { | |
| 105 return; | |
| 106 } | |
| 107 | |
| 108 SkString filename; | |
| 109 make_filename(name, &filename); | |
| 110 filename.appendf("_%s.png", config); | |
| 111 SkString path = SkOSPath::Join(dir, filename.c_str()); | |
| 112 ::remove(path.c_str()); | |
| 113 | |
| 114 SkFILEWStream stream(path.c_str()); | |
| 115 stream.write(data->data(), data->size()); | |
| 116 } | |
| 117 | |
| 118 static void perform_clip(SkCanvas* canvas, int w, int h) { | |
| 119 SkRect r; | |
| 120 | |
| 121 r.set(SkIntToScalar(10), SkIntToScalar(10), | |
| 122 SkIntToScalar(w*2/3), SkIntToScalar(h*2/3)); | |
| 123 canvas->clipRect(r, SkRegion::kIntersect_Op); | |
| 124 | |
| 125 r.set(SkIntToScalar(w/3), SkIntToScalar(h/3), | |
| 126 SkIntToScalar(w-10), SkIntToScalar(h-10)); | |
| 127 canvas->clipRect(r, SkRegion::kXOR_Op); | |
| 128 } | |
| 129 | |
| 130 static void perform_rotate(SkCanvas* canvas, int w, int h) { | |
| 131 const SkScalar x = SkIntToScalar(w) / 2; | |
| 132 const SkScalar y = SkIntToScalar(h) / 2; | |
| 133 | |
| 134 canvas->translate(x, y); | |
| 135 canvas->rotate(SkIntToScalar(35)); | |
| 136 canvas->translate(-x, -y); | |
| 137 } | |
| 138 | |
| 139 static void perform_scale(SkCanvas* canvas, int w, int h) { | |
| 140 const SkScalar x = SkIntToScalar(w) / 2; | |
| 141 const SkScalar y = SkIntToScalar(h) / 2; | |
| 142 | |
| 143 canvas->translate(x, y); | |
| 144 // just enough so we can't take the sprite case | |
| 145 canvas->scale(SK_Scalar1 * 99/100, SK_Scalar1 * 99/100); | |
| 146 canvas->translate(-x, -y); | |
| 147 } | |
| 148 | |
| 149 static SkSurface* make_surface(SkColorType colorType, const SkIPoint& size, | |
| 150 Benchmark::Backend backend, int sampleCount, | |
| 151 GrContext* context) { | |
| 152 SkSurface* surface = NULL; | |
| 153 SkImageInfo info = SkImageInfo::Make(size.fX, size.fY, colorType, | |
| 154 kPremul_SkAlphaType); | |
| 155 | |
| 156 switch (backend) { | |
| 157 case Benchmark::kRaster_Backend: | |
| 158 surface = SkSurface::NewRaster(info); | |
| 159 surface->getCanvas()->clear(SK_ColorWHITE); | |
| 160 break; | |
| 161 #if SK_SUPPORT_GPU | |
| 162 case Benchmark::kGPU_Backend: { | |
| 163 surface = SkSurface::NewRenderTarget(context, info, sampleCount); | |
| 164 break; | |
| 165 } | |
| 166 #endif | |
| 167 case Benchmark::kPDF_Backend: | |
| 168 default: | |
| 169 SkDEBUGFAIL("unsupported"); | |
| 170 } | |
| 171 return surface; | |
| 172 } | |
| 173 | |
| 174 #if SK_SUPPORT_GPU | |
| 175 GrContextFactory gContextFactory; | |
| 176 typedef GrContextFactory::GLContextType GLContextType; | |
| 177 static const GLContextType kNative = GrContextFactory::kNative_GLContextType; | |
| 178 static const GLContextType kNVPR = GrContextFactory::kNVPR_GLContextType; | |
| 179 #if SK_ANGLE | |
| 180 static const GLContextType kANGLE = GrContextFactory::kANGLE_GLContextType; | |
| 181 #endif | |
| 182 static const GLContextType kDebug = GrContextFactory::kDebug_GLContextType; | |
| 183 static const GLContextType kNull = GrContextFactory::kNull_GLContextType; | |
| 184 #else | |
| 185 typedef int GLContextType; | |
| 186 static const GLContextType kNative = 0, kANGLE = 0, kDebug = 0, kNull = 0; | |
| 187 #endif | |
| 188 | |
| 189 #ifdef SK_DEBUG | |
| 190 static const bool kIsDebug = true; | |
| 191 #else | |
| 192 static const bool kIsDebug = false; | |
| 193 #endif | |
| 194 | |
| 195 static const struct Config { | |
| 196 SkColorType fColorType; | |
| 197 const char* name; | |
| 198 int sampleCount; | |
| 199 Benchmark::Backend backend; | |
| 200 GLContextType contextType; | |
| 201 bool runByDefault; | |
| 202 } gConfigs[] = { | |
| 203 { kN32_SkColorType, "NONRENDERING", 0, Benchmark::kNonRendering_Backend,
kNative, true}, | |
| 204 { kN32_SkColorType, "8888", 0, Benchmark::kRaster_Backend,
kNative, true}, | |
| 205 { kRGB_565_SkColorType, "565", 0, Benchmark::kRaster_Backend,
kNative, true}, | |
| 206 #if SK_SUPPORT_GPU | |
| 207 { kN32_SkColorType, "GPU", 0, Benchmark::kGPU_Backend,
kNative, true}, | |
| 208 { kN32_SkColorType, "MSAA4", 4, Benchmark::kGPU_Backend,
kNative, false}, | |
| 209 { kN32_SkColorType, "MSAA16", 16, Benchmark::kGPU_Backend,
kNative, false}, | |
| 210 { kN32_SkColorType, "NVPRMSAA4", 4, Benchmark::kGPU_Backend,
kNVPR, true}, | |
| 211 { kN32_SkColorType, "NVPRMSAA16", 16, Benchmark::kGPU_Backend,
kNVPR, false}, | |
| 212 #if SK_ANGLE | |
| 213 { kN32_SkColorType, "ANGLE", 0, Benchmark::kGPU_Backend,
kANGLE, true}, | |
| 214 #endif // SK_ANGLE | |
| 215 { kN32_SkColorType, "Debug", 0, Benchmark::kGPU_Backend,
kDebug, kIsDebug}, | |
| 216 { kN32_SkColorType, "NULLGPU", 0, Benchmark::kGPU_Backend,
kNull, true}, | |
| 217 #endif // SK_SUPPORT_GPU | |
| 218 }; | |
| 219 | |
| 220 DEFINE_string(outDir, "", "If given, image of each bench will be put in outDir."
); | |
| 221 DEFINE_string(timers, "cg", "Timers to display. " | |
| 222 "Options: w(all) W(all, truncated) c(pu) C(pu, truncated) g(pu)"); | |
| 223 | |
| 224 DEFINE_bool(rotate, false, "Rotate canvas before bench run?"); | |
| 225 DEFINE_bool(scale, false, "Scale canvas before bench run?"); | |
| 226 DEFINE_bool(clip, false, "Clip canvas before bench run?"); | |
| 227 | |
| 228 DEFINE_string(forceDither, "default", "Force dithering: true, false, or default?
"); | |
| 229 DEFINE_bool(forceBlend, false, "Force alpha blending?"); | |
| 230 | |
| 231 DEFINE_string(gpuAPI, "", "Force use of specific gpu API. Using \"gl\" " | |
| 232 "forces OpenGL API. Using \"gles\" forces OpenGL ES API. " | |
| 233 "Defaults to empty string, which selects the API native to the " | |
| 234 "system."); | |
| 235 DEFINE_int32(gpuCacheBytes, -1, "GPU cache size limit in bytes. 0 to disable ca
che."); | |
| 236 DEFINE_int32(gpuCacheCount, -1, "GPU cache size limit in object count. 0 to dis
able cache."); | |
| 237 | |
| 238 DEFINE_bool(gpu, true, "Allows GPU configs to be run. Applied after --configs.")
; | |
| 239 DEFINE_bool(cpu, true, "Allows non-GPU configs to be run. Applied after --config
."); | |
| 240 | |
| 241 DEFINE_bool2(leaks, l, false, "show leaked ref cnt'd objects."); | |
| 242 DEFINE_string(match, "", "[~][^]substring[$] [...] of test name to run.\n" | |
| 243 "Multiple matches may be separated by spaces.\n" | |
| 244 "~ causes a matching test to always be skipped\n" | |
| 245 "^ requires the start of the test to match\n" | |
| 246 "$ requires the end of the test to match\n" | |
| 247 "^ and $ requires an exact match\n" | |
| 248 "If a test does not match any list entry,\n" | |
| 249 "it is skipped unless some list entry starts with ~\n"
); | |
| 250 DEFINE_string(mode, "normal", | |
| 251 "normal: draw to a normal canvas;\n" | |
| 252 "deferred: draw to a deferred canvas;\n" | |
| 253 "deferredSilent: deferred with silent playback;\n" | |
| 254 "record: draw to an SkPicture;\n" | |
| 255 "picturerecord: draw from an SkPicture to an SkPicture.\n"); | |
| 256 DEFINE_string(config, kDefaultsConfigStr, | |
| 257 "Run configs given. By default, runs the configs marked \"runByDe
fault\" in gConfigs."); | |
| 258 DEFINE_string(logFile, "", "Also write stdout here."); | |
| 259 DEFINE_int32(minMs, 20, "Shortest time we'll allow a benchmark to run."); | |
| 260 DEFINE_int32(maxMs, 1000, "Longest time we'll allow a benchmark to run."); | |
| 261 DEFINE_bool(runOnce, kIsDebug, "Run each bench exactly once and don't report tim
ings."); | |
| 262 DEFINE_double(error, 0.01, | |
| 263 "Ratio of subsequent bench measurements must drop within 1±error t
o converge."); | |
| 264 DEFINE_string(timeFormat, "%9.2f", "Format to print results, in milliseconds per
1000 loops."); | |
| 265 DEFINE_bool2(verbose, v, false, "Print more."); | |
| 266 DEFINE_string(outResultsFile, "", "If given, the results will be written to the
file in JSON format."); | |
| 267 DEFINE_bool(dryRun, false, "Don't actually run the tests, just print what would
have been done."); | |
| 268 | |
| 269 // Has this bench converged? First arguments are milliseconds / loop iteration, | |
| 270 // last is overall runtime in milliseconds. | |
| 271 static bool HasConverged(double prevPerLoop, double currPerLoop, double currRaw)
{ | |
| 272 if (currRaw < FLAGS_minMs) { | |
| 273 return false; | |
| 274 } | |
| 275 const double low = 1 - FLAGS_error, high = 1 + FLAGS_error; | |
| 276 const double ratio = currPerLoop / prevPerLoop; | |
| 277 return low < ratio && ratio < high; | |
| 278 } | |
| 279 | |
| 280 int tool_main(int argc, char** argv); | |
| 281 int tool_main(int argc, char** argv) { | |
| 282 SetupCrashHandler(); | |
| 283 SkCommandLineFlags::Parse(argc, argv); | |
| 284 #if SK_ENABLE_INST_COUNT | |
| 285 if (FLAGS_leaks) { | |
| 286 gPrintInstCount = true; | |
| 287 } | |
| 288 #endif | |
| 289 SkAutoGraphics ag; | |
| 290 | |
| 291 // First, parse some flags. | |
| 292 BenchLogger logger; | |
| 293 if (FLAGS_logFile.count()) { | |
| 294 logger.SetLogFile(FLAGS_logFile[0]); | |
| 295 } | |
| 296 | |
| 297 LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); | |
| 298 MultiResultsWriter writer; | |
| 299 writer.add(&logWriter); | |
| 300 | |
| 301 SkAutoTDelete<JSONResultsWriter> jsonWriter; | |
| 302 if (FLAGS_outResultsFile.count()) { | |
| 303 jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); | |
| 304 writer.add(jsonWriter.get()); | |
| 305 } | |
| 306 | |
| 307 // Instantiate after all the writers have been added to writer so that we | |
| 308 // call close() before their destructors are called on the way out. | |
| 309 CallEnd<MultiResultsWriter> ender(writer); | |
| 310 | |
| 311 const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; | |
| 312 SkTriState::State dither = SkTriState::kDefault; | |
| 313 for (size_t i = 0; i < 3; i++) { | |
| 314 if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { | |
| 315 dither = static_cast<SkTriState::State>(i); | |
| 316 } | |
| 317 } | |
| 318 | |
| 319 BenchMode benchMode = kNormal_BenchMode; | |
| 320 for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { | |
| 321 if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { | |
| 322 benchMode = static_cast<BenchMode>(i); | |
| 323 } | |
| 324 } | |
| 325 | |
| 326 SkTDArray<int> configs; | |
| 327 bool runDefaultConfigs = false; | |
| 328 // Try user-given configs first. | |
| 329 for (int i = 0; i < FLAGS_config.count(); i++) { | |
| 330 for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { | |
| 331 if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { | |
| 332 *configs.append() = j; | |
| 333 } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { | |
| 334 runDefaultConfigs = true; | |
| 335 } | |
| 336 } | |
| 337 } | |
| 338 // If there weren't any, fill in with defaults. | |
| 339 if (runDefaultConfigs) { | |
| 340 for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { | |
| 341 if (gConfigs[i].runByDefault) { | |
| 342 *configs.append() = i; | |
| 343 } | |
| 344 } | |
| 345 } | |
| 346 // Filter out things we can't run. | |
| 347 if (kNormal_BenchMode != benchMode) { | |
| 348 // Non-rendering configs only run in normal mode | |
| 349 for (int i = 0; i < configs.count(); ++i) { | |
| 350 const Config& config = gConfigs[configs[i]]; | |
| 351 if (Benchmark::kNonRendering_Backend == config.backend) { | |
| 352 configs.remove(i, 1); | |
| 353 --i; | |
| 354 } | |
| 355 } | |
| 356 } | |
| 357 // Apply the gpu/cpu only flags | |
| 358 for (int i = 0; i < configs.count(); ++i) { | |
| 359 const Config& config = gConfigs[configs[i]]; | |
| 360 if (config.backend == Benchmark::kGPU_Backend) { | |
| 361 if (!FLAGS_gpu) { | |
| 362 configs.remove(i, 1); | |
| 363 --i; | |
| 364 } | |
| 365 } else if (!FLAGS_cpu) { | |
| 366 configs.remove(i, 1); | |
| 367 --i; | |
| 368 } | |
| 369 } | |
| 370 | |
| 371 #if SK_SUPPORT_GPU | |
| 372 GrGLStandard gpuAPI = kNone_GrGLStandard; | |
| 373 if (1 == FLAGS_gpuAPI.count()) { | |
| 374 if (FLAGS_gpuAPI.contains(kGpuAPINameGL)) { | |
| 375 gpuAPI = kGL_GrGLStandard; | |
| 376 } else if (FLAGS_gpuAPI.contains(kGpuAPINameGLES)) { | |
| 377 gpuAPI = kGLES_GrGLStandard; | |
| 378 } else { | |
| 379 SkDebugf("Selected gpu API could not be used. Using the default.\n")
; | |
| 380 } | |
| 381 } else if (FLAGS_gpuAPI.count() > 1) { | |
| 382 SkDebugf("Selected gpu API could not be used. Using the default.\n"); | |
| 383 } | |
| 384 | |
| 385 for (int i = 0; i < configs.count(); ++i) { | |
| 386 const Config& config = gConfigs[configs[i]]; | |
| 387 | |
| 388 if (Benchmark::kGPU_Backend == config.backend) { | |
| 389 GrContext* context = gContextFactory.get(config.contextType, gpuAPI)
; | |
| 390 if (NULL == context) { | |
| 391 SkDebugf("GrContext could not be created for config %s. Config w
ill be skipped.\n", | |
| 392 config.name); | |
| 393 configs.remove(i); | |
| 394 --i; | |
| 395 continue; | |
| 396 } | |
| 397 if (config.sampleCount > context->getMaxSampleCount()){ | |
| 398 SkDebugf( | |
| 399 "Sample count (%d) for config %s is not supported. Config wi
ll be skipped.\n", | |
| 400 config.sampleCount, config.name); | |
| 401 configs.remove(i); | |
| 402 --i; | |
| 403 continue; | |
| 404 } | |
| 405 } | |
| 406 } | |
| 407 #endif | |
| 408 | |
| 409 // All flags should be parsed now. Report our settings. | |
| 410 if (FLAGS_runOnce) { | |
| 411 logger.logError("bench was run with --runOnce, so we're going to hide th
e times." | |
| 412 " It's for your own good!\n"); | |
| 413 } | |
| 414 writer.option("mode", FLAGS_mode[0]); | |
| 415 writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); | |
| 416 writer.option("dither", SkTriState::Name[dither]); | |
| 417 | |
| 418 writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); | |
| 419 writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); | |
| 420 writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); | |
| 421 | |
| 422 #if defined(SK_BUILD_FOR_WIN32) | |
| 423 writer.option("system", "WIN32"); | |
| 424 #elif defined(SK_BUILD_FOR_MAC) | |
| 425 writer.option("system", "MAC"); | |
| 426 #elif defined(SK_BUILD_FOR_ANDROID) | |
| 427 writer.option("system", "ANDROID"); | |
| 428 #elif defined(SK_BUILD_FOR_UNIX) | |
| 429 writer.option("system", "UNIX"); | |
| 430 #else | |
| 431 writer.option("system", "other"); | |
| 432 #endif | |
| 433 | |
| 434 #if defined(SK_DEBUG) | |
| 435 writer.option("build", "DEBUG"); | |
| 436 #else | |
| 437 writer.option("build", "RELEASE"); | |
| 438 #endif | |
| 439 | |
| 440 // Set texture cache limits if non-default. | |
| 441 for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { | |
| 442 #if SK_SUPPORT_GPU | |
| 443 const Config& config = gConfigs[i]; | |
| 444 if (Benchmark::kGPU_Backend != config.backend) { | |
| 445 continue; | |
| 446 } | |
| 447 GrContext* context = gContextFactory.get(config.contextType, gpuAPI); | |
| 448 if (NULL == context) { | |
| 449 continue; | |
| 450 } | |
| 451 | |
| 452 size_t bytes; | |
| 453 int count; | |
| 454 context->getResourceCacheLimits(&count, &bytes); | |
| 455 if (-1 != FLAGS_gpuCacheBytes) { | |
| 456 bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); | |
| 457 } | |
| 458 if (-1 != FLAGS_gpuCacheCount) { | |
| 459 count = FLAGS_gpuCacheCount; | |
| 460 } | |
| 461 context->setResourceCacheLimits(count, bytes); | |
| 462 #endif | |
| 463 } | |
| 464 | |
| 465 // Run each bench in each configuration it supports and we asked for. | |
| 466 Iter iter; | |
| 467 Benchmark* bench; | |
| 468 while ((bench = iter.next()) != NULL) { | |
| 469 SkAutoTUnref<Benchmark> benchUnref(bench); | |
| 470 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { | |
| 471 continue; | |
| 472 } | |
| 473 | |
| 474 bench->setForceAlpha(alpha); | |
| 475 bench->setDither(dither); | |
| 476 bench->preDraw(); | |
| 477 | |
| 478 bool loggedBenchName = false; | |
| 479 for (int i = 0; i < configs.count(); ++i) { | |
| 480 const int configIndex = configs[i]; | |
| 481 const Config& config = gConfigs[configIndex]; | |
| 482 | |
| 483 if (!bench->isSuitableFor(config.backend)) { | |
| 484 continue; | |
| 485 } | |
| 486 | |
| 487 GrContext* context = NULL; | |
| 488 #if SK_SUPPORT_GPU | |
| 489 SkGLContextHelper* glContext = NULL; | |
| 490 if (Benchmark::kGPU_Backend == config.backend) { | |
| 491 context = gContextFactory.get(config.contextType, gpuAPI); | |
| 492 if (NULL == context) { | |
| 493 continue; | |
| 494 } | |
| 495 glContext = gContextFactory.getGLContext(config.contextType); | |
| 496 } | |
| 497 #endif | |
| 498 | |
| 499 SkAutoTUnref<SkCanvas> canvas; | |
| 500 SkAutoTUnref<SkPicture> recordFrom; | |
| 501 SkPictureRecorder recorderTo; | |
| 502 const SkIPoint dim = bench->getSize(); | |
| 503 | |
| 504 SkAutoTUnref<SkSurface> surface; | |
| 505 if (Benchmark::kNonRendering_Backend != config.backend) { | |
| 506 surface.reset(make_surface(config.fColorType, | |
| 507 dim, | |
| 508 config.backend, | |
| 509 config.sampleCount, | |
| 510 context)); | |
| 511 if (!surface.get()) { | |
| 512 logger.logError(SkStringPrintf( | |
| 513 "Device creation failure for config %s. Will skip.\n", c
onfig.name)); | |
| 514 continue; | |
| 515 } | |
| 516 | |
| 517 switch(benchMode) { | |
| 518 case kDeferredSilent_BenchMode: | |
| 519 case kDeferred_BenchMode: | |
| 520 canvas.reset(SkDeferredCanvas::Create(surface.get())); | |
| 521 break; | |
| 522 case kRecord_BenchMode: | |
| 523 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim
.fY))); | |
| 524 break; | |
| 525 case kPictureRecord_BenchMode: { | |
| 526 SkPictureRecorder recorderFrom; | |
| 527 bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.f
Y)); | |
| 528 recordFrom.reset(recorderFrom.endRecording()); | |
| 529 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim
.fY))); | |
| 530 break; | |
| 531 } | |
| 532 case kNormal_BenchMode: | |
| 533 canvas.reset(SkRef(surface->getCanvas())); | |
| 534 break; | |
| 535 default: | |
| 536 SkASSERT(false); | |
| 537 } | |
| 538 } | |
| 539 | |
| 540 if (NULL != canvas) { | |
| 541 canvas->clear(SK_ColorWHITE); | |
| 542 if (FLAGS_clip) { | |
| 543 perform_clip(canvas, dim.fX, dim.fY); | |
| 544 } | |
| 545 if (FLAGS_scale) { | |
| 546 perform_scale(canvas, dim.fX, dim.fY); | |
| 547 } | |
| 548 if (FLAGS_rotate) { | |
| 549 perform_rotate(canvas, dim.fX, dim.fY); | |
| 550 } | |
| 551 } | |
| 552 | |
| 553 if (!loggedBenchName) { | |
| 554 loggedBenchName = true; | |
| 555 writer.bench(bench->getName(), dim.fX, dim.fY); | |
| 556 } | |
| 557 | |
| 558 #if SK_SUPPORT_GPU | |
| 559 SkGLContextHelper* contextHelper = NULL; | |
| 560 if (Benchmark::kGPU_Backend == config.backend) { | |
| 561 contextHelper = gContextFactory.getGLContext(config.contextType)
; | |
| 562 } | |
| 563 Timer timer(contextHelper); | |
| 564 #else | |
| 565 Timer timer; | |
| 566 #endif | |
| 567 | |
| 568 double previous = std::numeric_limits<double>::infinity(); | |
| 569 bool converged = false; | |
| 570 | |
| 571 // variables used to compute loopsPerFrame | |
| 572 double frameIntervalTime = 0.0f; | |
| 573 int frameIntervalTotalLoops = 0; | |
| 574 | |
| 575 bool frameIntervalComputed = false; | |
| 576 int loopsPerFrame = 0; | |
| 577 int loopsPerIter = 0; | |
| 578 if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.na
me); } | |
| 579 if (!FLAGS_dryRun) { | |
| 580 do { | |
| 581 // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. | |
| 582 loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; | |
| 583 if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { | |
| 584 // If you find it takes more than a billion loops to get
up to 20ms of runtime, | |
| 585 // you've got a computer clocked at several THz or have
a broken benchmark. ;) | |
| 586 // "1B ought to be enough for anybody." | |
| 587 logger.logError(SkStringPrintf( | |
| 588 "\nCan't get %s %s to converge in %dms (%d loops)", | |
| 589 bench->getName(), config.name, FLAGS_maxMs, loopsPe
rIter)); | |
| 590 break; | |
| 591 } | |
| 592 | |
| 593 if ((benchMode == kRecord_BenchMode || benchMode == kPicture
Record_BenchMode)) { | |
| 594 // Clear the recorded commands so that they do not accum
ulate. | |
| 595 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim
.fY))); | |
| 596 } | |
| 597 | |
| 598 timer.start(); | |
| 599 // Inner loop that allows us to break the run into smaller | |
| 600 // chunks (e.g. frames). This is especially useful for the G
PU | |
| 601 // as we can flush and/or swap buffers to keep the GPU from | |
| 602 // queuing up too much work. | |
| 603 for (int loopCount = loopsPerIter; loopCount > 0; ) { | |
| 604 // Save and restore around each call to draw() to guaran
tee a pristine canvas. | |
| 605 SkAutoCanvasRestore saveRestore(canvas, true/*also save*
/); | |
| 606 | |
| 607 int loops; | |
| 608 if (frameIntervalComputed && loopCount > loopsPerFrame)
{ | |
| 609 loops = loopsPerFrame; | |
| 610 loopCount -= loopsPerFrame; | |
| 611 } else { | |
| 612 loops = loopCount; | |
| 613 loopCount = 0; | |
| 614 } | |
| 615 | |
| 616 if (benchMode == kPictureRecord_BenchMode) { | |
| 617 recordFrom->draw(canvas); | |
| 618 } else { | |
| 619 bench->draw(loops, canvas); | |
| 620 } | |
| 621 | |
| 622 if (kDeferredSilent_BenchMode == benchMode) { | |
| 623 static_cast<SkDeferredCanvas*>(canvas.get())->silent
Flush(); | |
| 624 } else if (NULL != canvas) { | |
| 625 canvas->flush(); | |
| 626 } | |
| 627 | |
| 628 #if SK_SUPPORT_GPU | |
| 629 // swap drawing buffers on each frame to prevent the GPU | |
| 630 // from queuing up too much work | |
| 631 if (NULL != glContext) { | |
| 632 glContext->swapBuffers(); | |
| 633 } | |
| 634 #endif | |
| 635 } | |
| 636 | |
| 637 | |
| 638 | |
| 639 // Stop truncated timers before GL calls complete, and stop
the full timers after. | |
| 640 timer.truncatedEnd(); | |
| 641 #if SK_SUPPORT_GPU | |
| 642 if (NULL != glContext) { | |
| 643 context->flush(); | |
| 644 SK_GL(*glContext, Finish()); | |
| 645 } | |
| 646 #endif | |
| 647 timer.end(); | |
| 648 | |
| 649 // setup the frame interval for subsequent iterations | |
| 650 if (!frameIntervalComputed) { | |
| 651 frameIntervalTime += timer.fWall; | |
| 652 frameIntervalTotalLoops += loopsPerIter; | |
| 653 if (frameIntervalTime >= FLAGS_minMs) { | |
| 654 frameIntervalComputed = true; | |
| 655 loopsPerFrame = | |
| 656 (int)(((double)frameIntervalTotalLoops / frameInte
rvalTime) * FLAGS_minMs); | |
| 657 if (loopsPerFrame < 1) { | |
| 658 loopsPerFrame = 1; | |
| 659 } | |
| 660 // SkDebugf(" %s has %d loops in %f ms (normalized t
o %d)\n", | |
| 661 // bench->getName(), frameIntervalTotalLoops
, | |
| 662 // timer.fWall, loopsPerFrame); | |
| 663 } | |
| 664 } | |
| 665 | |
| 666 const double current = timer.fWall / loopsPerIter; | |
| 667 if (FLAGS_verbose && current > previous) { SkDebugf("↑"); } | |
| 668 if (FLAGS_verbose) { SkDebugf("%.3g ", current); } | |
| 669 converged = HasConverged(previous, current, timer.fWall); | |
| 670 previous = current; | |
| 671 } while (!FLAGS_runOnce && !converged); | |
| 672 } | |
| 673 if (FLAGS_verbose) { SkDebugf("\n"); } | |
| 674 | |
| 675 if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRenderin
g_Backend != config.backend) { | |
| 676 SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); | |
| 677 if (image.get()) { | |
| 678 saveFile(bench->getName(), config.name, FLAGS_outDir[0], | |
| 679 image); | |
| 680 } | |
| 681 } | |
| 682 | |
| 683 if (FLAGS_runOnce) { | |
| 684 // Let's not mislead ourselves by looking at Debug build or sing
le iteration bench times! | |
| 685 continue; | |
| 686 } | |
| 687 | |
| 688 // Normalize to ms per 1000 iterations. | |
| 689 const double normalize = 1000.0 / loopsPerIter; | |
| 690 const struct { char shortName; const char* longName; double ms; } ti
mes[] = { | |
| 691 {'w', "msecs", normalize * timer.fWall}, | |
| 692 {'W', "Wmsecs", normalize * timer.fTruncatedWall}, | |
| 693 {'c', "cmsecs", normalize * timer.fCpu}, | |
| 694 {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, | |
| 695 {'g', "gmsecs", normalize * timer.fGpu}, | |
| 696 }; | |
| 697 | |
| 698 writer.config(config.name); | |
| 699 for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { | |
| 700 if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms >
0) { | |
| 701 writer.timer(times[i].longName, times[i].ms); | |
| 702 } | |
| 703 } | |
| 704 } | |
| 705 } | |
| 706 #if SK_SUPPORT_GPU | |
| 707 gContextFactory.destroyContexts(); | |
| 708 #endif | |
| 709 return 0; | |
| 710 } | |
| 711 | |
| 712 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL) | |
| 713 int main(int argc, char * const argv[]) { | |
| 714 return tool_main(argc, (char**) argv); | |
| 715 } | |
| 716 #endif | |
| OLD | NEW |