OLD | NEW |
---|---|
1 // There are tests for computeStatistics() located in LayoutTests/fast/harness/p erftests | 1 // There are tests for computeStatistics() located in LayoutTests/fast/harness/p erftests |
2 | 2 |
3 if (window.testRunner) { | 3 if (window.testRunner) { |
4 testRunner.waitUntilDone(); | 4 testRunner.waitUntilDone(); |
5 testRunner.dumpAsText(); | 5 testRunner.dumpAsText(); |
6 } | 6 } |
7 | 7 |
8 (function () { | 8 (function () { |
9 var logLines = null; | 9 var logLines = null; |
10 var completedIterations = -1; | 10 var completedIterations = -1; |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
138 | 138 |
139 PerfTestRunner.forceLayout = function(doc) { | 139 PerfTestRunner.forceLayout = function(doc) { |
140 doc = doc || document; | 140 doc = doc || document; |
141 if (doc.body) | 141 if (doc.body) |
142 doc.body.offsetHeight; | 142 doc.body.offsetHeight; |
143 else if (doc.documentElement) | 143 else if (doc.documentElement) |
144 doc.documentElement.offsetHeight; | 144 doc.documentElement.offsetHeight; |
145 }; | 145 }; |
146 | 146 |
147 function start(test, scheduler, runner) { | 147 function start(test, scheduler, runner) { |
148 if (!test) { | 148 if (!test || !runner) { |
149 PerfTestRunner.logFatalError("Got a bad test object."); | 149 PerfTestRunner.logFatalError("Got a bad test object."); |
150 return; | 150 return; |
151 } | 151 } |
152 currentTest = test; | 152 currentTest = test; |
153 | 153 |
154 if (test.tracingCategories && !test.traceEventsToMeasure) { | 154 if (test.tracingCategories && !test.traceEventsToMeasure) { |
155 PerfTestRunner.logFatalError("test's tracingCategories is " + | 155 PerfTestRunner.logFatalError("test's tracingCategories is " + |
156 "specified but test's traceEventsToMeasure is empty"); | 156 "specified but test's traceEventsToMeasure is empty"); |
157 return; | 157 return; |
158 } | 158 } |
159 | 159 |
160 if (test.traceEventsToMeasure && !test.tracingCategories) { | 160 if (test.traceEventsToMeasure && !test.tracingCategories) { |
161 PerfTestRunner.logFatalError("test's traceEventsToMeasure is " + | 161 PerfTestRunner.logFatalError("test's traceEventsToMeasure is " + |
162 "specified but test's tracingCategories is empty"); | 162 "specified but test's tracingCategories is empty"); |
163 return; | 163 return; |
164 } | 164 } |
165 iterationCount = test.iterationCount || (window.testRunner ? 5 : 20); | 165 iterationCount = test.iterationCount || (window.testRunner ? 5 : 20); |
166 if (test.warmUpCount && test.warmUpCount > 0) | 166 if (test.warmUpCount && test.warmUpCount > 0) |
167 completedIterations = -test.warmUpCount; | 167 completedIterations = -test.warmUpCount; |
168 logLines = PerfTestRunner.bufferedLog || window.testRunner ? [] : null; | 168 logLines = PerfTestRunner.bufferedLog || window.testRunner ? [] : null; |
169 PerfTestRunner.log("Running " + iterationCount + " times"); | 169 PerfTestRunner.log("Running " + iterationCount + " times"); |
170 if (test.doNotIgnoreInitialRun) | 170 if (test.doNotIgnoreInitialRun) |
171 completedIterations++; | 171 completedIterations++; |
172 | 172 |
173 if (runner && test.tracingCategories && window.testRunner && | 173 if (!test.tracingCategories) { |
174 window.testRunner.supportTracing) { | 174 scheduleNextRun(scheduler, runner); |
175 window.testRunner.startTracing(test.tracingCategories, function() { | 175 return; |
176 } | |
177 | |
178 if (window.testRunner && window.testRunner.supportTracing) { | |
179 testRunner.startTracing(test.tracingCategories, function() { | |
176 scheduleNextRun(scheduler, runner); | 180 scheduleNextRun(scheduler, runner); |
177 }); | 181 }); |
178 } else if (runner) { | 182 return; |
179 if (test.tracingCategories && !(window.testRuner && | |
180 window.testRunner.supportTracing)) { | |
181 PerfTestRunner.log("Tracing based metrics are specified but " + | |
182 "tracing is not supported on this platform. To get those " + | |
183 "metrics from this test, you can run the test using " + | |
184 "tools/perf/run_benchmarks script."); | |
185 } | |
186 scheduleNextRun(scheduler, runner); | |
187 } | 183 } |
184 | |
185 PerfTestRunner.log("Tracing based metrics are specified but " + | |
186 "tracing is not supported on this platform. To get those " + | |
187 "metrics from this test, you can run the test using " + | |
188 "tools/perf/run_benchmarks script."); | |
189 scheduleNextRun(scheduler, runner); | |
188 } | 190 } |
189 | 191 |
190 function scheduleNextRun(scheduler, runner) { | 192 function scheduleNextRun(scheduler, runner) { |
193 if (!scheduler) { | |
194 // This is an async measurement test which has its own scheduler. | |
195 try { | |
Xianzhu
2017/05/03 22:06:26
Nit: Use indentation same as the reset of this fil
nednguyen
2017/05/04 00:10:32
Done.
| |
196 runner(); | |
197 } catch (exception) { | |
198 PerfTestRunner.logFatalError("Got an exception while running test.ru n with name=" + exception.name + ", message=" + exception.message); | |
199 } | |
200 return; | |
201 } | |
Xianzhu
2017/05/03 22:06:26
Nit: insert a blank line after this line.
nednguyen
2017/05/04 00:10:33
Done.
| |
191 scheduler(function () { | 202 scheduler(function () { |
192 // This will be used by tools/perf/benchmarks/blink_perf.py to find | 203 // This will be used by tools/perf/benchmarks/blink_perf.py to find |
193 // traces during the measured runs. | 204 // traces during the measured runs. |
194 if (completedIterations >= 0) | 205 if (completedIterations >= 0) |
195 console.time("blink_perf"); | 206 console.time("blink_perf"); |
196 | 207 |
197 try { | 208 try { |
198 if (currentTest.setup) | 209 if (currentTest.setup) |
199 currentTest.setup(); | 210 currentTest.setup(); |
200 | 211 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
257 testRunner.stopTracingAndMeasure( | 268 testRunner.stopTracingAndMeasure( |
258 currentTest.traceEventsToMeasure, function() { | 269 currentTest.traceEventsToMeasure, function() { |
259 testRunner.notifyDone(); | 270 testRunner.notifyDone(); |
260 }); | 271 }); |
261 } else { | 272 } else { |
262 testRunner.notifyDone(); | 273 testRunner.notifyDone(); |
263 } | 274 } |
264 } | 275 } |
265 } | 276 } |
266 | 277 |
267 PerfTestRunner.prepareToMeasureValuesAsync = function (test) { | 278 PerfTestRunner.prepareToMeasureValuesAsync = function (test) { |
Xianzhu
2017/05/03 22:06:26
As test.run is called inside of this function, it
nednguyen
2017/05/04 00:10:50
Done.
| |
268 PerfTestRunner.unit = test.unit; | 279 PerfTestRunner.unit = test.unit; |
269 start(test); | 280 start(test, undefined, function() { test.run() }); |
270 } | 281 } |
271 | 282 |
272 PerfTestRunner.measureValueAsync = function (measuredValue) { | 283 PerfTestRunner.measureValueAsync = function (measuredValue) { |
273 completedIterations++; | 284 completedIterations++; |
274 | 285 |
275 try { | 286 try { |
276 ignoreWarmUpAndLog(measuredValue); | 287 ignoreWarmUpAndLog(measuredValue); |
277 } catch (exception) { | 288 } catch (exception) { |
278 PerfTestRunner.logFatalError("Got an exception while logging the res ult with name=" + exception.name + ", message=" + exception.message); | 289 PerfTestRunner.logFatalError("Got an exception while logging the res ult with name=" + exception.name + ", message=" + exception.message); |
279 return; | 290 return; |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
426 | 437 |
427 iframe.contentDocument.close(); | 438 iframe.contentDocument.close(); |
428 document.body.removeChild(iframe); | 439 document.body.removeChild(iframe); |
429 }; | 440 }; |
430 | 441 |
431 PerfTestRunner.measureTime(test); | 442 PerfTestRunner.measureTime(test); |
432 } | 443 } |
433 | 444 |
434 window.PerfTestRunner = PerfTestRunner; | 445 window.PerfTestRunner = PerfTestRunner; |
435 })(); | 446 })(); |
OLD | NEW |