| Index: bench/nanobench.cpp
|
| diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
|
| index 65cef8989e2f3a5cc4fa471a72665d37dfd7c492..d06d75358d0a56831065f4ca1f22099549929b40 100644
|
| --- a/bench/nanobench.cpp
|
| +++ b/bench/nanobench.cpp
|
| @@ -35,7 +35,7 @@
|
|
|
| __SK_FORCE_IMAGE_DECODER_LINKING;
|
|
|
| -static const int kAutoTuneLoops = -1;
|
| +static const int kAutoTuneLoops = 0;
|
|
|
| static const int kDefaultLoops =
|
| #ifdef SK_DEBUG
|
| @@ -114,6 +114,14 @@ static double estimate_timer_overhead() {
|
| return overhead / FLAGS_overheadLoops;
|
| }
|
|
|
| +static int detect_forever_loops(int loops) {
|
| + // look for a magic run-forever value
|
| + if (loops < 0) {
|
| + loops = SK_MaxS32;
|
| + }
|
| + return loops;
|
| +}
|
| +
|
| static int clamp_loops(int loops) {
|
| if (loops < 1) {
|
| SkDebugf("ERROR: clamping loops from %d to 1.\n", loops);
|
| @@ -193,8 +201,10 @@ static int cpu_bench(const double overhead, Benchmark* bench, SkCanvas* canvas,
|
| const double numer = overhead / FLAGS_overheadGoal - overhead;
|
| const double denom = bench_plus_overhead - overhead;
|
| loops = (int)ceil(numer / denom);
|
| + loops = clamp_loops(loops);
|
| + } else {
|
| + loops = detect_forever_loops(loops);
|
| }
|
| - loops = clamp_loops(loops);
|
|
|
| for (int i = 0; i < FLAGS_samples; i++) {
|
| samples[i] = time(loops, bench, canvas, NULL) / loops;
|
| @@ -228,11 +238,13 @@ static int gpu_bench(SkGLContext* gl,
|
|
|
| // We've overshot at least a little. Scale back linearly.
|
| loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
|
| + loops = clamp_loops(loops);
|
|
|
| // Might as well make sure we're not still timing our calibration.
|
| SK_GL(*gl, Finish());
|
| + } else {
|
| + loops = detect_forever_loops(loops);
|
| }
|
| - loops = clamp_loops(loops);
|
|
|
| // Pretty much the same deal as the calibration: do some warmup to make
|
| // sure we're timing steady-state pipelined frames.
|
|
|