OLD | NEW |
1 // There are tests for computeStatistics() located in LayoutTests/fast/harness/p
erftests | 1 // There are tests for computeStatistics() located in LayoutTests/fast/harness/p
erftests |
2 | 2 |
3 if (window.testRunner) { | 3 if (window.testRunner) { |
4 testRunner.waitUntilDone(); | 4 testRunner.waitUntilDone(); |
5 testRunner.dumpAsText(); | 5 testRunner.dumpAsText(); |
6 } | 6 } |
7 | 7 |
8 (function () { | 8 (function () { |
9 var logLines = null; | 9 var logLines = null; |
10 var completedIterations = -1; | 10 var completedIterations = -1; |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 | 138 |
139 PerfTestRunner.forceLayout = function(doc) { | 139 PerfTestRunner.forceLayout = function(doc) { |
140 doc = doc || document; | 140 doc = doc || document; |
141 if (doc.body) | 141 if (doc.body) |
142 doc.body.offsetHeight; | 142 doc.body.offsetHeight; |
143 else if (doc.documentElement) | 143 else if (doc.documentElement) |
144 doc.documentElement.offsetHeight; | 144 doc.documentElement.offsetHeight; |
145 }; | 145 }; |
146 | 146 |
147 function start(test, scheduler, runner) { | 147 function start(test, scheduler, runner) { |
148 if (!test) { | 148 if (!test || !runner) { |
149 PerfTestRunner.logFatalError("Got a bad test object."); | 149 PerfTestRunner.logFatalError("Got a bad test object."); |
150 return; | 150 return; |
151 } | 151 } |
152 currentTest = test; | 152 currentTest = test; |
153 | 153 |
154 if (test.tracingCategories && !test.traceEventsToMeasure) { | 154 if (test.tracingCategories && !test.traceEventsToMeasure) { |
155 PerfTestRunner.logFatalError("test's tracingCategories is " + | 155 PerfTestRunner.logFatalError("test's tracingCategories is " + |
156 "specified but test's traceEventsToMeasure is empty"); | 156 "specified but test's traceEventsToMeasure is empty"); |
157 return; | 157 return; |
158 } | 158 } |
159 | 159 |
160 if (test.traceEventsToMeasure && !test.tracingCategories) { | 160 if (test.traceEventsToMeasure && !test.tracingCategories) { |
161 PerfTestRunner.logFatalError("test's traceEventsToMeasure is " + | 161 PerfTestRunner.logFatalError("test's traceEventsToMeasure is " + |
162 "specified but test's tracingCategories is empty"); | 162 "specified but test's tracingCategories is empty"); |
163 return; | 163 return; |
164 } | 164 } |
165 iterationCount = test.iterationCount || (window.testRunner ? 5 : 20); | 165 iterationCount = test.iterationCount || (window.testRunner ? 5 : 20); |
166 if (test.warmUpCount && test.warmUpCount > 0) | 166 if (test.warmUpCount && test.warmUpCount > 0) |
167 completedIterations = -test.warmUpCount; | 167 completedIterations = -test.warmUpCount; |
168 logLines = PerfTestRunner.bufferedLog || window.testRunner ? [] : null; | 168 logLines = PerfTestRunner.bufferedLog || window.testRunner ? [] : null; |
169 PerfTestRunner.log("Running " + iterationCount + " times"); | 169 PerfTestRunner.log("Running " + iterationCount + " times"); |
170 if (test.doNotIgnoreInitialRun) | 170 if (test.doNotIgnoreInitialRun) |
171 completedIterations++; | 171 completedIterations++; |
172 | 172 |
173 if (runner && test.tracingCategories && window.testRunner && | 173 if (!test.tracingCategories) { |
174 window.testRunner.supportTracing) { | 174 scheduleNextRun(scheduler, runner); |
175 window.testRunner.startTracing(test.tracingCategories, function() { | 175 return; |
| 176 } |
| 177 |
| 178 if (window.testRunner && window.testRunner.supportTracing) { |
| 179 testRunner.startTracing(test.tracingCategories, function() { |
176 scheduleNextRun(scheduler, runner); | 180 scheduleNextRun(scheduler, runner); |
177 }); | 181 }); |
178 } else if (runner) { | 182 return; |
179 if (test.tracingCategories && !(window.testRuner && | |
180 window.testRunner.supportTracing)) { | |
181 PerfTestRunner.log("Tracing based metrics are specified but " + | |
182 "tracing is not supported on this platform. To get those " + | |
183 "metrics from this test, you can run the test using " + | |
184 "tools/perf/run_benchmarks script."); | |
185 } | |
186 scheduleNextRun(scheduler, runner); | |
187 } | 183 } |
| 184 |
| 185 PerfTestRunner.log("Tracing based metrics are specified but " + |
| 186 "tracing is not supported on this platform. To get those " + |
| 187 "metrics from this test, you can run the test using " + |
| 188 "tools/perf/run_benchmarks script."); |
| 189 scheduleNextRun(scheduler, runner); |
188 } | 190 } |
189 | 191 |
190 function scheduleNextRun(scheduler, runner) { | 192 function scheduleNextRun(scheduler, runner) { |
| 193 if (!scheduler) { |
| 194 // This is an async measurement test which has its own scheduler. |
| 195 try { |
| 196 runner(); |
| 197 } catch (exception) { |
| 198 PerfTestRunner.logFatalError("Got an exception while running tes
t.run with name=" + exception.name + ", message=" + exception.message); |
| 199 } |
| 200 return; |
| 201 } |
| 202 |
191 scheduler(function () { | 203 scheduler(function () { |
192 // This will be used by tools/perf/benchmarks/blink_perf.py to find | 204 // This will be used by tools/perf/benchmarks/blink_perf.py to find |
193 // traces during the measured runs. | 205 // traces during the measured runs. |
194 if (completedIterations >= 0) | 206 if (completedIterations >= 0) |
195 console.time("blink_perf"); | 207 console.time("blink_perf"); |
196 | 208 |
197 try { | 209 try { |
198 if (currentTest.setup) | 210 if (currentTest.setup) |
199 currentTest.setup(); | 211 currentTest.setup(); |
200 | 212 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
257 testRunner.stopTracingAndMeasure( | 269 testRunner.stopTracingAndMeasure( |
258 currentTest.traceEventsToMeasure, function() { | 270 currentTest.traceEventsToMeasure, function() { |
259 testRunner.notifyDone(); | 271 testRunner.notifyDone(); |
260 }); | 272 }); |
261 } else { | 273 } else { |
262 testRunner.notifyDone(); | 274 testRunner.notifyDone(); |
263 } | 275 } |
264 } | 276 } |
265 } | 277 } |
266 | 278 |
267 PerfTestRunner.prepareToMeasureValuesAsync = function (test) { | 279 PerfTestRunner.startMeasureValuesAsync = function (test) { |
268 PerfTestRunner.unit = test.unit; | 280 PerfTestRunner.unit = test.unit; |
269 start(test); | 281 start(test, undefined, function() { test.run() }); |
270 } | 282 } |
271 | 283 |
272 PerfTestRunner.measureValueAsync = function (measuredValue) { | 284 PerfTestRunner.measureValueAsync = function (measuredValue) { |
273 completedIterations++; | 285 completedIterations++; |
274 | 286 |
275 try { | 287 try { |
276 ignoreWarmUpAndLog(measuredValue); | 288 ignoreWarmUpAndLog(measuredValue); |
277 } catch (exception) { | 289 } catch (exception) { |
278 PerfTestRunner.logFatalError("Got an exception while logging the res
ult with name=" + exception.name + ", message=" + exception.message); | 290 PerfTestRunner.logFatalError("Got an exception while logging the res
ult with name=" + exception.name + ", message=" + exception.message); |
279 return; | 291 return; |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
426 | 438 |
427 iframe.contentDocument.close(); | 439 iframe.contentDocument.close(); |
428 document.body.removeChild(iframe); | 440 document.body.removeChild(iframe); |
429 }; | 441 }; |
430 | 442 |
431 PerfTestRunner.measureTime(test); | 443 PerfTestRunner.measureTime(test); |
432 } | 444 } |
433 | 445 |
434 window.PerfTestRunner = PerfTestRunner; | 446 window.PerfTestRunner = PerfTestRunner; |
435 })(); | 447 })(); |
OLD | NEW |