Index: tools/perf/metrics/startup_metric.py |
diff --git a/tools/perf/metrics/startup_metric.py b/tools/perf/metrics/startup_metric.py |
index 0a532affb1f4788beae79109679333d19af337b3..8c973ff18605018753f8ecf2aba49a8bd7818346 100644 |
--- a/tools/perf/metrics/startup_metric.py |
+++ b/tools/perf/metrics/startup_metric.py |
@@ -47,33 +47,31 @@ class StartupMetric(Metric): |
tab_load_times = [] |
TabLoadTime = collections.namedtuple( |
'TabLoadTime', |
- ['load_start_ms', 'load_duration_ms', 'request_start_ms']) |
+ ['request_start_ms', 'load_end_ms']) |
def RecordTabLoadTime(t): |
+ def EvaluateInt(exp): |
+ val = t.EvaluateJavaScript(exp) |
+ if not val: |
+ logging.warn('%s undefined' % exp) |
+ return 0 |
+ return int(val) |
+ |
try: |
- t.WaitForDocumentReadyStateToBeComplete() |
- |
- result = t.EvaluateJavaScript( |
- 'statsCollectionController.tabLoadTiming()') |
- result = json.loads(result) |
- |
- if 'load_start_ms' not in result or 'load_duration_ms' not in result: |
- raise Exception("Outdated Chrome version, " |
- "statsCollectionController.tabLoadTiming() not present") |
- if result['load_duration_ms'] is None: |
- tab_title = t.EvaluateJavaScript('document.title') |
- print "Page: ", tab_title, " didn't finish loading." |
- return |
- |
- perf_timing = t.EvaluateJavaScript('window.performance.timing') |
- if 'requestStart' not in perf_timing: |
- perf_timing['requestStart'] = 0 # Exclude from benchmark results |
- print 'requestStart is not supported by this browser' |
- |
- tab_load_times.append(TabLoadTime( |
- int(result['load_start_ms']), |
- int(result['load_duration_ms']), |
- int(perf_timing['requestStart']))) |
+ t.WaitForJavaScriptExpression( |
+ 'window.performance.timing["loadEventEnd"] > 0', 10) |
+ |
+ # EvaluateJavaScript(window.performance.timing) doesn't guarantee to |
+ # return the desired javascript object (crbug/472603). It may return an |
+ # empty object. However getting individual field works. |
+ # The behavior depends on Webkit implementation on different platforms. |
+ load_event_end = EvaluateInt( |
+ 'window.performance.timing["loadEventEnd"]') |
+ request_start = EvaluateInt( |
+ 'window.performance.timing["requestStart"]') |
+ |
+ tab_load_times.append(TabLoadTime(request_start, load_event_end)) |
nednguyen
2015/04/03 21:24:59
Why do we need to use a list here? Seems like this
cylee1
2015/04/03 21:26:57
I dont' know. It's how it was originally.
Do you s
nednguyen
2015/04/03 21:28:36
I prefer refactoring work to be done in a differen
|
+ |
except exceptions.TimeoutException: |
# Low memory Android devices may not be able to load more than |
# one tab at a time, so may timeout when the test attempts to |
@@ -89,8 +87,8 @@ class StartupMetric(Metric): |
RecordTabLoadTime(tab.browser.foreground_tab) |
foreground_tab_stats = tab_load_times[0] |
nednguyen
2015/04/03 22:07:08
If the TimeoutExceptions is thrown above, wouldn't
cylee1
2015/04/07 09:37:50
True, it was a legacy issue.
Did a tiny refactori
|
- foreground_tab_load_complete = ((foreground_tab_stats.load_start_ms + |
- foreground_tab_stats.load_duration_ms) - browser_main_entry_time_ms) |
+ foreground_tab_load_complete = ( |
+ foreground_tab_stats.load_end_ms - browser_main_entry_time_ms) |
results.AddValue(scalar.ScalarValue( |
results.current_page, 'foreground_tab_load_complete', 'ms', |
foreground_tab_load_complete)) |