| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 | |
| 6 // Performance.now is used in latency benchmarks, the fallback is Date.now. | |
| 7 var performance = performance || {}; | |
| 8 performance.now = (function() { | |
| 9 return performance.now || | |
| 10 performance.mozNow || | |
| 11 performance.msNow || | |
| 12 performance.oNow || | |
| 13 performance.webkitNow || | |
| 14 Date.now; | |
| 15 })(); | |
| 16 | |
| 17 // Simple framework for running the benchmark suites and | |
| 18 // computing a score based on the timing measurements. | |
| 19 | |
| 20 | |
| 21 // A benchmark has a name (string) and a function that will be run to | |
| 22 // do the performance measurement. The optional setup and tearDown | |
| 23 // arguments are functions that will be invoked before and after | |
| 24 // running the benchmark, but the running time of these functions will | |
| 25 // not be accounted for in the benchmark score. | |
| 26 function Benchmark(name, doWarmup, doDeterministic, deterministicIterations, | |
| 27 run, setup, tearDown, rmsResult, minIterations) { | |
| 28 this.name = name; | |
| 29 this.doWarmup = doWarmup; | |
| 30 this.doDeterministic = doDeterministic; | |
| 31 this.deterministicIterations = deterministicIterations; | |
| 32 this.run = run; | |
| 33 this.Setup = setup ? setup : function() { }; | |
| 34 this.TearDown = tearDown ? tearDown : function() { }; | |
| 35 this.rmsResult = rmsResult ? rmsResult : null; | |
| 36 this.minIterations = minIterations ? minIterations : 32; | |
| 37 } | |
| 38 | |
| 39 | |
| 40 // Benchmark results hold the benchmark and the measured time used to | |
| 41 // run the benchmark. The benchmark score is computed later once a | |
| 42 // full benchmark suite has run to completion. If latency is set to 0 | |
| 43 // then there is no latency score for this benchmark. | |
| 44 function BenchmarkResult(benchmark, time, latency) { | |
| 45 this.benchmark = benchmark; | |
| 46 this.time = time; | |
| 47 this.latency = latency; | |
| 48 } | |
| 49 | |
| 50 | |
| 51 // Automatically convert results to numbers. Used by the geometric | |
| 52 // mean computation. | |
| 53 BenchmarkResult.prototype.valueOf = function() { | |
| 54 return this.time; | |
| 55 } | |
| 56 | |
| 57 | |
| 58 // Suites of benchmarks consist of a name and the set of benchmarks in | |
| 59 // addition to the reference timing that the final score will be based | |
| 60 // on. This way, all scores are relative to a reference run and higher | |
| 61 // scores implies better performance. | |
| 62 function BenchmarkSuite(name, reference, benchmarks) { | |
| 63 this.name = name; | |
| 64 this.reference = reference; | |
| 65 this.benchmarks = benchmarks; | |
| 66 BenchmarkSuite.suites.push(this); | |
| 67 } | |
| 68 | |
| 69 | |
| 70 // Keep track of all declared benchmark suites. | |
| 71 BenchmarkSuite.suites = []; | |
| 72 | |
| 73 // Scores are not comparable across versions. Bump the version if | |
| 74 // you're making changes that will affect that scores, e.g. if you add | |
| 75 // a new benchmark or change an existing one. | |
| 76 BenchmarkSuite.version = '1'; | |
| 77 | |
| 78 | |
| 79 // Defines global benchsuite running mode that overrides benchmark suite | |
| 80 // behavior. Intended to be set by the benchmark driver. Undefined | |
| 81 // values here allow a benchmark to define behaviour itself. | |
| 82 BenchmarkSuite.config = { | |
| 83 doWarmup: undefined, | |
| 84 doDeterministic: undefined | |
| 85 }; | |
| 86 | |
| 87 | |
| 88 // Override the alert function to throw an exception instead. | |
| 89 alert = function(s) { | |
| 90 throw "Alert called with argument: " + s; | |
| 91 }; | |
| 92 | |
| 93 | |
| 94 // To make the benchmark results predictable, we replace Math.random | |
| 95 // with a 100% deterministic alternative. | |
| 96 BenchmarkSuite.ResetRNG = function() { | |
| 97 Math.random = (function() { | |
| 98 var seed = 49734321; | |
| 99 return function() { | |
| 100 // Robert Jenkins' 32 bit integer hash function. | |
| 101 seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff; | |
| 102 seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff; | |
| 103 seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff; | |
| 104 seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff; | |
| 105 seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff; | |
| 106 seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff; | |
| 107 return (seed & 0xfffffff) / 0x10000000; | |
| 108 }; | |
| 109 })(); | |
| 110 } | |
| 111 | |
| 112 | |
| 113 // Runs all registered benchmark suites and optionally yields between | |
| 114 // each individual benchmark to avoid running for too long in the | |
| 115 // context of browsers. Once done, the final score is reported to the | |
| 116 // runner. | |
| 117 BenchmarkSuite.RunSuites = function(runner, skipBenchmarks) { | |
| 118 skipBenchmarks = typeof skipBenchmarks === 'undefined' ? [] : skipBenchmarks; | |
| 119 var continuation = null; | |
| 120 var suites = BenchmarkSuite.suites; | |
| 121 var length = suites.length; | |
| 122 BenchmarkSuite.scores = []; | |
| 123 var index = 0; | |
| 124 function RunStep() { | |
| 125 while (continuation || index < length) { | |
| 126 if (continuation) { | |
| 127 continuation = continuation(); | |
| 128 } else { | |
| 129 var suite = suites[index++]; | |
| 130 if (runner.NotifyStart) runner.NotifyStart(suite.name); | |
| 131 if (skipBenchmarks.indexOf(suite.name) > -1) { | |
| 132 suite.NotifySkipped(runner); | |
| 133 } else { | |
| 134 continuation = suite.RunStep(runner); | |
| 135 } | |
| 136 } | |
| 137 if (continuation && typeof window != 'undefined' && window.setTimeout) { | |
| 138 window.setTimeout(RunStep, 25); | |
| 139 return; | |
| 140 } | |
| 141 } | |
| 142 | |
| 143 // show final result | |
| 144 if (runner.NotifyScore) { | |
| 145 var score = BenchmarkSuite.GeometricMean(BenchmarkSuite.scores); | |
| 146 var formatted = BenchmarkSuite.FormatScore(100 * score); | |
| 147 runner.NotifyScore(formatted); | |
| 148 } | |
| 149 } | |
| 150 RunStep(); | |
| 151 } | |
| 152 | |
| 153 | |
| 154 // Counts the total number of registered benchmarks. Useful for | |
| 155 // showing progress as a percentage. | |
| 156 BenchmarkSuite.CountBenchmarks = function() { | |
| 157 var result = 0; | |
| 158 var suites = BenchmarkSuite.suites; | |
| 159 for (var i = 0; i < suites.length; i++) { | |
| 160 result += suites[i].benchmarks.length; | |
| 161 } | |
| 162 return result; | |
| 163 } | |
| 164 | |
| 165 | |
| 166 // Computes the geometric mean of a set of numbers. | |
| 167 BenchmarkSuite.GeometricMean = function(numbers) { | |
| 168 var log = 0; | |
| 169 for (var i = 0; i < numbers.length; i++) { | |
| 170 log += Math.log(numbers[i]); | |
| 171 } | |
| 172 return Math.pow(Math.E, log / numbers.length); | |
| 173 } | |
| 174 | |
| 175 | |
| 176 // Computes the geometric mean of a set of throughput time measurements. | |
| 177 BenchmarkSuite.GeometricMeanTime = function(measurements) { | |
| 178 var log = 0; | |
| 179 for (var i = 0; i < measurements.length; i++) { | |
| 180 log += Math.log(measurements[i].time); | |
| 181 } | |
| 182 return Math.pow(Math.E, log / measurements.length); | |
| 183 } | |
| 184 | |
| 185 | |
| 186 // Computes the geometric mean of a set of rms measurements. | |
| 187 BenchmarkSuite.GeometricMeanLatency = function(measurements) { | |
| 188 var log = 0; | |
| 189 var hasLatencyResult = false; | |
| 190 for (var i = 0; i < measurements.length; i++) { | |
| 191 if (measurements[i].latency != 0) { | |
| 192 log += Math.log(measurements[i].latency); | |
| 193 hasLatencyResult = true; | |
| 194 } | |
| 195 } | |
| 196 if (hasLatencyResult) { | |
| 197 return Math.pow(Math.E, log / measurements.length); | |
| 198 } else { | |
| 199 return 0; | |
| 200 } | |
| 201 } | |
| 202 | |
| 203 | |
| 204 // Converts a score value to a string with at least three significant | |
| 205 // digits. | |
| 206 BenchmarkSuite.FormatScore = function(value) { | |
| 207 if (value > 100) { | |
| 208 return value.toFixed(0); | |
| 209 } else { | |
| 210 return value.toPrecision(3); | |
| 211 } | |
| 212 } | |
| 213 | |
| 214 // Notifies the runner that we're done running a single benchmark in | |
| 215 // the benchmark suite. This can be useful to report progress. | |
| 216 BenchmarkSuite.prototype.NotifyStep = function(result) { | |
| 217 this.results.push(result); | |
| 218 if (this.runner.NotifyStep) this.runner.NotifyStep(result.benchmark.name); | |
| 219 } | |
| 220 | |
| 221 | |
| 222 // Notifies the runner that we're done with running a suite and that | |
| 223 // we have a result which can be reported to the user if needed. | |
| 224 BenchmarkSuite.prototype.NotifyResult = function() { | |
| 225 var mean = BenchmarkSuite.GeometricMeanTime(this.results); | |
| 226 var score = this.reference[0] / mean; | |
| 227 BenchmarkSuite.scores.push(score); | |
| 228 if (this.runner.NotifyResult) { | |
| 229 var formatted = BenchmarkSuite.FormatScore(100 * score); | |
| 230 this.runner.NotifyResult(this.name, formatted); | |
| 231 } | |
| 232 if (this.reference.length == 2) { | |
| 233 var meanLatency = BenchmarkSuite.GeometricMeanLatency(this.results); | |
| 234 if (meanLatency != 0) { | |
| 235 var scoreLatency = this.reference[1] / meanLatency; | |
| 236 BenchmarkSuite.scores.push(scoreLatency); | |
| 237 if (this.runner.NotifyResult) { | |
| 238 var formattedLatency = BenchmarkSuite.FormatScore(100 * scoreLatency) | |
| 239 this.runner.NotifyResult(this.name + "Latency", formattedLatency); | |
| 240 } | |
| 241 } | |
| 242 } | |
| 243 } | |
| 244 | |
| 245 | |
| 246 BenchmarkSuite.prototype.NotifySkipped = function(runner) { | |
| 247 BenchmarkSuite.scores.push(1); // push default reference score. | |
| 248 if (runner.NotifyResult) { | |
| 249 runner.NotifyResult(this.name, "Skipped"); | |
| 250 } | |
| 251 } | |
| 252 | |
| 253 | |
| 254 // Notifies the runner that running a benchmark resulted in an error. | |
| 255 BenchmarkSuite.prototype.NotifyError = function(error) { | |
| 256 if (this.runner.NotifyError) { | |
| 257 this.runner.NotifyError(this.name, error); | |
| 258 } | |
| 259 if (this.runner.NotifyStep) { | |
| 260 this.runner.NotifyStep(this.name); | |
| 261 } | |
| 262 } | |
| 263 | |
| 264 | |
| 265 // Runs a single benchmark for at least a second and computes the | |
| 266 // average time it takes to run a single iteration. | |
| 267 BenchmarkSuite.prototype.RunSingleBenchmark = function(benchmark, data) { | |
| 268 var config = BenchmarkSuite.config; | |
| 269 var doWarmup = config.doWarmup !== undefined | |
| 270 ? config.doWarmup | |
| 271 : benchmark.doWarmup; | |
| 272 var doDeterministic = config.doDeterministic !== undefined | |
| 273 ? config.doDeterministic | |
| 274 : benchmark.doDeterministic; | |
| 275 | |
| 276 function Measure(data) { | |
| 277 var elapsed = 0; | |
| 278 var start = new Date(); | |
| 279 | |
| 280 // Run either for 1 second or for the number of iterations specified | |
| 281 // by minIterations, depending on the config flag doDeterministic. | |
| 282 for (var i = 0; (doDeterministic ? | |
| 283 i<benchmark.deterministicIterations : elapsed < 1000); i++) { | |
| 284 benchmark.run(); | |
| 285 elapsed = new Date() - start; | |
| 286 } | |
| 287 if (data != null) { | |
| 288 data.runs += i; | |
| 289 data.elapsed += elapsed; | |
| 290 } | |
| 291 } | |
| 292 | |
| 293 // Sets up data in order to skip or not the warmup phase. | |
| 294 if (!doWarmup && data == null) { | |
| 295 data = { runs: 0, elapsed: 0 }; | |
| 296 } | |
| 297 | |
| 298 if (data == null) { | |
| 299 Measure(null); | |
| 300 return { runs: 0, elapsed: 0 }; | |
| 301 } else { | |
| 302 Measure(data); | |
| 303 // If we've run too few iterations, we continue for another second. | |
| 304 if (data.runs < benchmark.minIterations) return data; | |
| 305 var usec = (data.elapsed * 1000) / data.runs; | |
| 306 var rms = (benchmark.rmsResult != null) ? benchmark.rmsResult() : 0; | |
| 307 this.NotifyStep(new BenchmarkResult(benchmark, usec, rms)); | |
| 308 return null; | |
| 309 } | |
| 310 } | |
| 311 | |
| 312 | |
| 313 // This function starts running a suite, but stops between each | |
| 314 // individual benchmark in the suite and returns a continuation | |
| 315 // function which can be invoked to run the next benchmark. Once the | |
| 316 // last benchmark has been executed, null is returned. | |
| 317 BenchmarkSuite.prototype.RunStep = function(runner) { | |
| 318 BenchmarkSuite.ResetRNG(); | |
| 319 this.results = []; | |
| 320 this.runner = runner; | |
| 321 var length = this.benchmarks.length; | |
| 322 var index = 0; | |
| 323 var suite = this; | |
| 324 var data; | |
| 325 | |
| 326 // Run the setup, the actual benchmark, and the tear down in three | |
| 327 // separate steps to allow the framework to yield between any of the | |
| 328 // steps. | |
| 329 | |
| 330 function RunNextSetup() { | |
| 331 if (index < length) { | |
| 332 try { | |
| 333 suite.benchmarks[index].Setup(); | |
| 334 } catch (e) { | |
| 335 suite.NotifyError(e); | |
| 336 return null; | |
| 337 } | |
| 338 return RunNextBenchmark; | |
| 339 } | |
| 340 suite.NotifyResult(); | |
| 341 return null; | |
| 342 } | |
| 343 | |
| 344 function RunNextBenchmark() { | |
| 345 try { | |
| 346 data = suite.RunSingleBenchmark(suite.benchmarks[index], data); | |
| 347 } catch (e) { | |
| 348 suite.NotifyError(e); | |
| 349 return null; | |
| 350 } | |
| 351 // If data is null, we're done with this benchmark. | |
| 352 return (data == null) ? RunNextTearDown : RunNextBenchmark(); | |
| 353 } | |
| 354 | |
| 355 function RunNextTearDown() { | |
| 356 try { | |
| 357 suite.benchmarks[index++].TearDown(); | |
| 358 } catch (e) { | |
| 359 suite.NotifyError(e); | |
| 360 return null; | |
| 361 } | |
| 362 return RunNextSetup; | |
| 363 } | |
| 364 | |
| 365 // Start out running the setup. | |
| 366 return RunNextSetup(); | |
| 367 } | |
| OLD | NEW |