OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Basic pyauto performance tests. | |
7 | |
8 For tests that need to be run for multiple iterations (e.g., so that average | |
9 and standard deviation values can be reported), the default number of iterations | |
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|. | |
11 That value can optionally be tweaked by setting an environment variable | |
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations | |
13 to run. An additional, initial iteration will also be run to "warm up" the | |
14 environment, and the result from that initial iteration will be ignored. | |
15 | |
16 Some tests rely on repeatedly appending tabs to Chrome. Occasionally, these | |
17 automation calls time out, thereby affecting the timing measurements (see issue | |
18 crosbug.com/20503). To work around this, the tests discard timing measurements | |
19 that involve automation timeouts. The value |_DEFAULT_MAX_TIMEOUT_COUNT| | |
20 specifies the threshold number of timeouts that can be tolerated before the test | |
21 fails. To tweak this value, set environment variable 'MAX_TIMEOUT_COUNT' to the | |
22 desired threshold value. | |
23 """ | |
24 | |
25 import BaseHTTPServer | |
26 import commands | |
27 import errno | |
28 import itertools | |
29 import logging | |
30 import math | |
31 import os | |
32 import posixpath | |
33 import re | |
34 import SimpleHTTPServer | |
35 import SocketServer | |
36 import signal | |
37 import subprocess | |
38 import sys | |
39 import tempfile | |
40 import threading | |
41 import time | |
42 import timeit | |
43 import urllib | |
44 import urllib2 | |
45 import urlparse | |
46 | |
47 import pyauto_functional # Must be imported before pyauto. | |
48 import pyauto | |
49 import simplejson # Must be imported after pyauto; located in third_party. | |
50 | |
51 from netflix import NetflixTestHelper | |
52 import pyauto_utils | |
53 import test_utils | |
54 from youtube import YoutubeTestHelper | |
55 | |
56 | |
57 _CHROME_BASE_DIR = os.path.abspath(os.path.join( | |
58 os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir)) | |
59 | |
60 | |
61 def FormatChromePath(posix_path, **kwargs): | |
62 """Convert a path relative to the Chromium root into an OS-specific path. | |
63 | |
64 Args: | |
65 posix_path: a path string that may be a format(). | |
66 Example: 'src/third_party/{module_name}/__init__.py' | |
67 kwargs: args for the format replacement. | |
68 Example: {'module_name': 'pylib'} | |
69 | |
70 Returns: | |
71 an absolute path in the current Chromium tree with formatting applied. | |
72 """ | |
73 formated_path = posix_path.format(**kwargs) | |
74 path_parts = formated_path.split('/') | |
75 return os.path.join(_CHROME_BASE_DIR, *path_parts) | |
76 | |
77 | |
78 def StandardDeviation(values): | |
79 """Returns the standard deviation of |values|.""" | |
80 avg = Mean(values) | |
81 if len(values) < 2 or not avg: | |
82 return 0.0 | |
83 temp_vals = [math.pow(x - avg, 2) for x in values] | |
84 return math.sqrt(sum(temp_vals) / (len(temp_vals) - 1)) | |
85 | |
86 | |
87 def Mean(values): | |
88 """Returns the arithmetic mean of |values|.""" | |
89 if not values or None in values: | |
90 return None | |
91 return sum(values) / float(len(values)) | |
92 | |
93 | |
94 def GeometricMean(values): | |
95 """Returns the geometric mean of |values|.""" | |
96 if not values or None in values or [x for x in values if x < 0.0]: | |
97 return None | |
98 if 0.0 in values: | |
99 return 0.0 | |
100 return math.exp(Mean([math.log(x) for x in values])) | |
101 | |
102 | |
103 class BasePerfTest(pyauto.PyUITest): | |
104 """Base class for performance tests.""" | |
105 | |
106 _DEFAULT_NUM_ITERATIONS = 10 # Keep synced with desktopui_PyAutoPerfTests.py. | |
107 _DEFAULT_MAX_TIMEOUT_COUNT = 10 | |
108 _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_' | |
109 _PERF_OUTPUT_MARKER_POST = '_PERF_POST_' | |
110 | |
111 def setUp(self): | |
112 """Performs necessary setup work before running each test.""" | |
113 self._num_iterations = self._DEFAULT_NUM_ITERATIONS | |
114 if 'NUM_ITERATIONS' in os.environ: | |
115 self._num_iterations = int(os.environ['NUM_ITERATIONS']) | |
116 self._max_timeout_count = self._DEFAULT_MAX_TIMEOUT_COUNT | |
117 if 'MAX_TIMEOUT_COUNT' in os.environ: | |
118 self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT']) | |
119 self._timeout_count = 0 | |
120 | |
121 # For users who want to see local perf graphs for Chrome when running the | |
122 # tests on their own machines. | |
123 self._local_perf_dir = None | |
124 if 'LOCAL_PERF_DIR' in os.environ: | |
125 self._local_perf_dir = os.environ['LOCAL_PERF_DIR'] | |
126 if not os.path.exists(self._local_perf_dir): | |
127 self.fail('LOCAL_PERF_DIR environment variable specified as %s, ' | |
128 'but this directory does not exist.' % self._local_perf_dir) | |
129 # When outputting perf graph information on-the-fly for Chrome, this | |
130 # variable lets us know whether a perf measurement is for a new test | |
131 # execution, or the current test execution. | |
132 self._seen_graph_lines = {} | |
133 | |
134 pyauto.PyUITest.setUp(self) | |
135 | |
136 # Flush all buffers to disk and wait until system calms down. Must be done | |
137 # *after* calling pyauto.PyUITest.setUp, since that is where Chrome is | |
138 # killed and re-initialized for a new test. | |
139 # TODO(dennisjeffrey): Implement wait for idle CPU on Windows/Mac. | |
140 if self.IsLinux(): # IsLinux() also implies IsChromeOS(). | |
141 os.system('sync') | |
142 self._WaitForIdleCPU(60.0, 0.05) | |
143 | |
144 def _IsPIDRunning(self, pid): | |
145 """Checks if a given process id is running. | |
146 | |
147 Args: | |
148 pid: The process id of the process to check. | |
149 | |
150 Returns: | |
151 True if the process is running. False if not. | |
152 """ | |
153 try: | |
154 # Note that this sends the signal 0, which should not interfere with the | |
155 # process. | |
156 os.kill(pid, 0) | |
157 except OSError, err: | |
158 if err.errno == errno.ESRCH: | |
159 return False | |
160 | |
161 try: | |
162 with open('/proc/%s/status' % pid) as proc_file: | |
163 if 'zombie' in proc_file.read(): | |
164 return False | |
165 except IOError: | |
166 return False | |
167 return True | |
168 | |
169 def _GetAllDescendentProcesses(self, pid): | |
170 pstree_out = subprocess.check_output(['pstree', '-p', '%s' % pid]) | |
171 children = re.findall('\((\d+)\)', pstree_out) | |
172 return [int(pid) for pid in children] | |
173 | |
174 def _WaitForChromeExit(self, browser_info, timeout): | |
175 pid = browser_info['browser_pid'] | |
176 chrome_pids = self._GetAllDescendentProcesses(pid) | |
177 initial_time = time.time() | |
178 while time.time() - initial_time < timeout: | |
179 if any([self._IsPIDRunning(pid) for pid in chrome_pids]): | |
180 time.sleep(1) | |
181 else: | |
182 logging.info('_WaitForChromeExit() took: %s seconds', | |
183 time.time() - initial_time) | |
184 return | |
185 self.fail('_WaitForChromeExit() did not finish within %s seconds' % | |
186 timeout) | |
187 | |
188 def tearDown(self): | |
189 if self._IsPGOMode(): | |
190 browser_info = self.GetBrowserInfo() | |
191 pid = browser_info['browser_pid'] | |
192 # session_manager kills chrome without waiting for it to cleanly exit. | |
193 # Until that behavior is changed, we stop it and wait for Chrome to exit | |
194 # cleanly before restarting it. See: | |
195 # crbug.com/264717 | |
196 subprocess.call(['sudo', 'pkill', '-STOP', 'session_manager']) | |
197 os.kill(pid, signal.SIGINT) | |
198 self._WaitForChromeExit(browser_info, 120) | |
199 subprocess.call(['sudo', 'pkill', '-CONT', 'session_manager']) | |
200 | |
201 pyauto.PyUITest.tearDown(self) | |
202 | |
203 def _IsPGOMode(self): | |
204 return 'USE_PGO' in os.environ | |
205 | |
206 def _WaitForIdleCPU(self, timeout, utilization): | |
207 """Waits for the CPU to become idle (< utilization). | |
208 | |
209 Args: | |
210 timeout: The longest time in seconds to wait before throwing an error. | |
211 utilization: The CPU usage below which the system should be considered | |
212 idle (between 0 and 1.0 independent of cores/hyperthreads). | |
213 """ | |
214 time_passed = 0.0 | |
215 fraction_non_idle_time = 1.0 | |
216 logging.info('Starting to wait up to %fs for idle CPU...', timeout) | |
217 while fraction_non_idle_time >= utilization: | |
218 cpu_usage_start = self._GetCPUUsage() | |
219 time.sleep(2) | |
220 time_passed += 2.0 | |
221 cpu_usage_end = self._GetCPUUsage() | |
222 fraction_non_idle_time = \ | |
223 self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end) | |
224 logging.info('Current CPU utilization = %f.', fraction_non_idle_time) | |
225 if time_passed > timeout: | |
226 self._LogProcessActivity() | |
227 message = ('CPU did not idle after %fs wait (utilization = %f).' % ( | |
228 time_passed, fraction_non_idle_time)) | |
229 | |
230 # crosbug.com/37389 | |
231 if self._IsPGOMode(): | |
232 logging.info(message) | |
233 logging.info('Still continuing because we are in PGO mode.') | |
234 return | |
235 | |
236 self.fail(message) | |
237 logging.info('Wait for idle CPU took %fs (utilization = %f).', | |
238 time_passed, fraction_non_idle_time) | |
239 | |
240 def _LogProcessActivity(self): | |
241 """Logs the output of top on Linux/Mac/CrOS. | |
242 | |
243 TODO: use taskmgr or similar on Windows. | |
244 """ | |
245 if self.IsLinux() or self.IsMac(): # IsLinux() also implies IsChromeOS(). | |
246 logging.info('Logging current process activity using top.') | |
247 cmd = 'top -b -d1 -n1' | |
248 if self.IsMac(): | |
249 cmd = 'top -l1' | |
250 p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, | |
251 stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) | |
252 output = p.stdout.read() | |
253 logging.info(output) | |
254 else: | |
255 logging.info('Process activity logging not implemented on this OS.') | |
256 | |
257 def _AppendTab(self, url): | |
258 """Appends a tab and increments a counter if the automation call times out. | |
259 | |
260 Args: | |
261 url: The string url to which the appended tab should be navigated. | |
262 """ | |
263 if not self.AppendTab(pyauto.GURL(url)): | |
264 self._timeout_count += 1 | |
265 | |
266 def _MeasureElapsedTime(self, python_command, num_invocations=1): | |
267 """Measures time (in msec) to execute a python command one or more times. | |
268 | |
269 Args: | |
270 python_command: A callable. | |
271 num_invocations: An integer number of times to invoke the given command. | |
272 | |
273 Returns: | |
274 The time required to execute the python command the specified number of | |
275 times, in milliseconds as a float. | |
276 """ | |
277 assert callable(python_command) | |
278 def RunCommand(): | |
279 for _ in range(num_invocations): | |
280 python_command() | |
281 timer = timeit.Timer(stmt=RunCommand) | |
282 return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds. | |
283 | |
284 def _OutputPerfForStandaloneGraphing(self, graph_name, description, value, | |
285 units, units_x, is_stacked): | |
286 """Outputs perf measurement data to a local folder to be graphed. | |
287 | |
288 This function only applies to Chrome desktop, and assumes that environment | |
289 variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory | |
290 on the local machine. | |
291 | |
292 Args: | |
293 graph_name: A string name for the graph associated with this performance | |
294 value. | |
295 description: A string description of the performance value. Should not | |
296 include spaces. | |
297 value: Either a single numeric value representing a performance | |
298 measurement, or else a list of (x, y) tuples representing one or more | |
299 long-running performance measurements, where 'x' is an x-axis value | |
300 (such as an iteration number) and 'y' is the corresponding performance | |
301 measurement. If a list of tuples is given, then the |units_x| | |
302 argument must also be specified. | |
303 units: A string representing the units of the performance measurement(s). | |
304 Should not include spaces. | |
305 units_x: A string representing the units of the x-axis values associated | |
306 with the performance measurements, such as 'iteration' if the x values | |
307 are iteration numbers. If this argument is specified, then the | |
308 |value| argument must be a list of (x, y) tuples. | |
309 is_stacked: True to draw a "stacked" graph. First-come values are | |
310 stacked at bottom by default. | |
311 """ | |
312 revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat') | |
313 if os.path.exists(revision_num_file): | |
314 with open(revision_num_file) as f: | |
315 revision = int(f.read()) | |
316 else: | |
317 revision = 0 | |
318 | |
319 if not self._seen_graph_lines: | |
320 # We're about to output data for a new test run. | |
321 revision += 1 | |
322 | |
323 # Update graphs.dat. | |
324 existing_graphs = [] | |
325 graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat') | |
326 if os.path.exists(graphs_file): | |
327 with open(graphs_file) as f: | |
328 existing_graphs = simplejson.loads(f.read()) | |
329 is_new_graph = True | |
330 for graph in existing_graphs: | |
331 if graph['name'] == graph_name: | |
332 is_new_graph = False | |
333 break | |
334 if is_new_graph: | |
335 new_graph = { | |
336 'name': graph_name, | |
337 'units': units, | |
338 'important': False, | |
339 } | |
340 if units_x: | |
341 new_graph['units_x'] = units_x | |
342 existing_graphs.append(new_graph) | |
343 with open(graphs_file, 'w') as f: | |
344 f.write(simplejson.dumps(existing_graphs)) | |
345 os.chmod(graphs_file, 0755) | |
346 | |
347 # Update data file for this particular graph. | |
348 existing_lines = [] | |
349 data_file = os.path.join(self._local_perf_dir, graph_name + '-summary.dat') | |
350 if os.path.exists(data_file): | |
351 with open(data_file) as f: | |
352 existing_lines = f.readlines() | |
353 existing_lines = map( | |
354 simplejson.loads, map(lambda x: x.strip(), existing_lines)) | |
355 | |
356 seen_key = graph_name | |
357 # We assume that the first line |existing_lines[0]| is the latest. | |
358 if units_x: | |
359 new_line = { | |
360 'rev': revision, | |
361 'traces': { description: [] } | |
362 } | |
363 if seen_key in self._seen_graph_lines: | |
364 # We've added points previously for this graph line in the current | |
365 # test execution, so retrieve the original set of points specified in | |
366 # the most recent revision in the data file. | |
367 new_line = existing_lines[0] | |
368 if not description in new_line['traces']: | |
369 new_line['traces'][description] = [] | |
370 for x_value, y_value in value: | |
371 new_line['traces'][description].append([str(x_value), str(y_value)]) | |
372 else: | |
373 new_line = { | |
374 'rev': revision, | |
375 'traces': { description: [str(value), str(0.0)] } | |
376 } | |
377 | |
378 if is_stacked: | |
379 new_line['stack'] = True | |
380 if 'stack_order' not in new_line: | |
381 new_line['stack_order'] = [] | |
382 if description not in new_line['stack_order']: | |
383 new_line['stack_order'].append(description) | |
384 | |
385 if seen_key in self._seen_graph_lines: | |
386 # Update results for the most recent revision. | |
387 existing_lines[0] = new_line | |
388 else: | |
389 # New results for a new revision. | |
390 existing_lines.insert(0, new_line) | |
391 self._seen_graph_lines[seen_key] = True | |
392 | |
393 existing_lines = map(simplejson.dumps, existing_lines) | |
394 with open(data_file, 'w') as f: | |
395 f.write('\n'.join(existing_lines)) | |
396 os.chmod(data_file, 0755) | |
397 | |
398 with open(revision_num_file, 'w') as f: | |
399 f.write(str(revision)) | |
400 | |
401 def _OutputPerfGraphValue(self, description, value, units, | |
402 graph_name, units_x=None, is_stacked=False): | |
403 """Outputs a performance value to have it graphed on the performance bots. | |
404 | |
405 The output format differs, depending on whether the current platform is | |
406 Chrome desktop or ChromeOS. | |
407 | |
408 For ChromeOS, the performance bots have a 30-character limit on the length | |
409 of the key associated with a performance value. A key on ChromeOS is | |
410 considered to be of the form "units_description" (for example, | |
411 "milliseconds_NewTabPage"), and is created from the |units| and | |
412 |description| passed as input to this function. Any characters beyond the | |
413 length 30 limit are truncated before results are stored in the autotest | |
414 database. | |
415 | |
416 Args: | |
417 description: A string description of the performance value. Should not | |
418 include spaces. | |
419 value: Either a numeric value representing a performance measurement, or | |
420 a list of values to be averaged. Lists may also contain (x, y) tuples | |
421 representing one or more performance measurements, where 'x' is an | |
422 x-axis value (such as an iteration number) and 'y' is the | |
423 corresponding performance measurement. If a list of tuples is given, | |
424 the |units_x| argument must also be specified. | |
425 units: A string representing the units of the performance measurement(s). | |
426 Should not include spaces. | |
427 graph_name: A string name for the graph associated with this performance | |
428 value. Only used on Chrome desktop. | |
429 units_x: A string representing the units of the x-axis values associated | |
430 with the performance measurements, such as 'iteration' if the x values | |
431 are iteration numbers. If this argument is specified, then the | |
432 |value| argument must be a list of (x, y) tuples. | |
433 is_stacked: True to draw a "stacked" graph. First-come values are | |
434 stacked at bottom by default. | |
435 """ | |
436 if (isinstance(value, list) and value[0] is not None and | |
437 isinstance(value[0], tuple)): | |
438 assert units_x | |
439 if units_x: | |
440 assert isinstance(value, list) | |
441 | |
442 if self.IsChromeOS(): | |
443 # Autotest doesn't support result lists. | |
444 autotest_value = value | |
445 if (isinstance(value, list) and value[0] is not None and | |
446 not isinstance(value[0], tuple)): | |
447 autotest_value = Mean(value) | |
448 | |
449 if units_x: | |
450 # TODO(dennisjeffrey): Support long-running performance measurements on | |
451 # ChromeOS in a way that can be graphed: crosbug.com/21881. | |
452 pyauto_utils.PrintPerfResult(graph_name, description, autotest_value, | |
453 units + ' ' + units_x) | |
454 else: | |
455 # Output short-running performance results in a format understood by | |
456 # autotest. | |
457 perf_key = '%s_%s' % (units, description) | |
458 if len(perf_key) > 30: | |
459 logging.warning('The description "%s" will be truncated to "%s" ' | |
460 '(length 30) when added to the autotest database.', | |
461 perf_key, perf_key[:30]) | |
462 print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE, | |
463 perf_key, autotest_value, | |
464 self._PERF_OUTPUT_MARKER_POST) | |
465 | |
466 # Also output results in the format recognized by buildbot, for cases | |
467 # in which these tests are run on chromeOS through buildbot. Since | |
468 # buildbot supports result lists, it's ok for |value| to be a list here. | |
469 pyauto_utils.PrintPerfResult(graph_name, description, value, units) | |
470 | |
471 sys.stdout.flush() | |
472 else: | |
473 # TODO(dmikurube): Support stacked graphs in PrintPerfResult. | |
474 # See http://crbug.com/122119. | |
475 if units_x: | |
476 pyauto_utils.PrintPerfResult(graph_name, description, value, | |
477 units + ' ' + units_x) | |
478 else: | |
479 pyauto_utils.PrintPerfResult(graph_name, description, value, units) | |
480 | |
481 if self._local_perf_dir: | |
482 self._OutputPerfForStandaloneGraphing( | |
483 graph_name, description, value, units, units_x, is_stacked) | |
484 | |
485 def _OutputEventForStandaloneGraphing(self, description, event_list): | |
486 """Outputs event information to a local folder to be graphed. | |
487 | |
488 See function _OutputEventGraphValue below for a description of an event. | |
489 | |
490 This function only applies to Chrome Endure tests running on Chrome desktop, | |
491 and assumes that environment variable 'LOCAL_PERF_DIR' has been specified | |
492 and refers to a valid directory on the local machine. | |
493 | |
494 Args: | |
495 description: A string description of the event. Should not include | |
496 spaces. | |
497 event_list: A list of (x, y) tuples representing one or more events | |
498 occurring during an endurance test, where 'x' is the time of the event | |
499 (in seconds since the start of the test), and 'y' is a dictionary | |
500 representing relevant data associated with that event (as key/value | |
501 pairs). | |
502 """ | |
503 revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat') | |
504 if os.path.exists(revision_num_file): | |
505 with open(revision_num_file) as f: | |
506 revision = int(f.read()) | |
507 else: | |
508 revision = 0 | |
509 | |
510 if not self._seen_graph_lines: | |
511 # We're about to output data for a new test run. | |
512 revision += 1 | |
513 | |
514 existing_lines = [] | |
515 data_file = os.path.join(self._local_perf_dir, '_EVENT_-summary.dat') | |
516 if os.path.exists(data_file): | |
517 with open(data_file) as f: | |
518 existing_lines = f.readlines() | |
519 existing_lines = map(eval, map(lambda x: x.strip(), existing_lines)) | |
520 | |
521 seen_event_type = description | |
522 value_list = [] | |
523 if seen_event_type in self._seen_graph_lines: | |
524 # We've added events previously for this event type in the current | |
525 # test execution, so retrieve the original set of values specified in | |
526 # the most recent revision in the data file. | |
527 value_list = existing_lines[0]['events'][description] | |
528 for event_time, event_data in event_list: | |
529 value_list.append([str(event_time), event_data]) | |
530 new_events = { | |
531 description: value_list | |
532 } | |
533 | |
534 new_line = { | |
535 'rev': revision, | |
536 'events': new_events | |
537 } | |
538 | |
539 if seen_event_type in self._seen_graph_lines: | |
540 # Update results for the most recent revision. | |
541 existing_lines[0] = new_line | |
542 else: | |
543 # New results for a new revision. | |
544 existing_lines.insert(0, new_line) | |
545 self._seen_graph_lines[seen_event_type] = True | |
546 | |
547 existing_lines = map(str, existing_lines) | |
548 with open(data_file, 'w') as f: | |
549 f.write('\n'.join(existing_lines)) | |
550 os.chmod(data_file, 0755) | |
551 | |
552 with open(revision_num_file, 'w') as f: | |
553 f.write(str(revision)) | |
554 | |
555 def _OutputEventGraphValue(self, description, event_list): | |
556 """Outputs a set of events to have them graphed on the Chrome Endure bots. | |
557 | |
558 An "event" can be anything recorded by a performance test that occurs at | |
559 particular times during a test execution. For example, a garbage collection | |
560 in the v8 heap can be considered an event. An event is distinguished from a | |
561 regular perf measurement in two ways: (1) an event is depicted differently | |
562 in the performance graphs than performance measurements; (2) an event can | |
563 be associated with zero or more data fields describing relevant information | |
564 associated with the event. For example, a garbage collection event will | |
565 occur at a particular time, and it may be associated with data such as | |
566 the number of collected bytes and/or the length of time it took to perform | |
567 the garbage collection. | |
568 | |
569 This function only applies to Chrome Endure tests running on Chrome desktop. | |
570 | |
571 Args: | |
572 description: A string description of the event. Should not include | |
573 spaces. | |
574 event_list: A list of (x, y) tuples representing one or more events | |
575 occurring during an endurance test, where 'x' is the time of the event | |
576 (in seconds since the start of the test), and 'y' is a dictionary | |
577 representing relevant data associated with that event (as key/value | |
578 pairs). | |
579 """ | |
580 pyauto_utils.PrintPerfResult('_EVENT_', description, event_list, '') | |
581 if self._local_perf_dir: | |
582 self._OutputEventForStandaloneGraphing(description, event_list) | |
583 | |
584 def _PrintSummaryResults(self, description, values, units, graph_name): | |
585 """Logs summary measurement information. | |
586 | |
587 This function computes and outputs the average and standard deviation of | |
588 the specified list of value measurements. It also invokes | |
589 _OutputPerfGraphValue() with the computed *average* value, to ensure the | |
590 average value can be plotted in a performance graph. | |
591 | |
592 Args: | |
593 description: A string description for the specified results. | |
594 values: A list of numeric value measurements. | |
595 units: A string specifying the units for the specified measurements. | |
596 graph_name: A string name for the graph associated with this performance | |
597 value. Only used on Chrome desktop. | |
598 """ | |
599 logging.info('Overall results for: %s', description) | |
600 if values: | |
601 logging.info(' Average: %f %s', Mean(values), units) | |
602 logging.info(' Std dev: %f %s', StandardDeviation(values), units) | |
603 self._OutputPerfGraphValue(description, values, units, graph_name) | |
604 else: | |
605 logging.info('No results to report.') | |
606 | |
607 def _RunNewTabTest(self, description, open_tab_command, graph_name, | |
608 num_tabs=1): | |
609 """Runs a perf test that involves opening new tab(s). | |
610 | |
611 This helper function can be called from different tests to do perf testing | |
612 with different types of tabs. It is assumed that the |open_tab_command| | |
613 will open up a single tab. | |
614 | |
615 Args: | |
616 description: A string description of the associated tab test. | |
617 open_tab_command: A callable that will open a single tab. | |
618 graph_name: A string name for the performance graph associated with this | |
619 test. Only used on Chrome desktop. | |
620 num_tabs: The number of tabs to open, i.e., the number of times to invoke | |
621 the |open_tab_command|. | |
622 """ | |
623 assert callable(open_tab_command) | |
624 | |
625 timings = [] | |
626 for iteration in range(self._num_iterations + 1): | |
627 orig_timeout_count = self._timeout_count | |
628 elapsed_time = self._MeasureElapsedTime(open_tab_command, | |
629 num_invocations=num_tabs) | |
630 # Only count the timing measurement if no automation call timed out. | |
631 if self._timeout_count == orig_timeout_count: | |
632 # Ignore the first iteration. | |
633 if iteration: | |
634 timings.append(elapsed_time) | |
635 logging.info('Iteration %d of %d: %f milliseconds', iteration, | |
636 self._num_iterations, elapsed_time) | |
637 self.assertTrue(self._timeout_count <= self._max_timeout_count, | |
638 msg='Test exceeded automation timeout threshold.') | |
639 self.assertEqual(1 + num_tabs, self.GetTabCount(), | |
640 msg='Did not open %d new tab(s).' % num_tabs) | |
641 for _ in range(num_tabs): | |
642 self.CloseTab(tab_index=1) | |
643 | |
644 self._PrintSummaryResults(description, timings, 'milliseconds', graph_name) | |
645 | |
646 def _GetConfig(self): | |
647 """Load perf test configuration file. | |
648 | |
649 Returns: | |
650 A dictionary that represents the config information. | |
651 """ | |
652 config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg') | |
653 config = {'username': None, | |
654 'password': None, | |
655 'google_account_url': 'https://accounts.google.com/', | |
656 'gmail_url': 'https://www.gmail.com', | |
657 'plus_url': 'https://plus.google.com', | |
658 'docs_url': 'https://docs.google.com'} | |
659 if os.path.exists(config_file): | |
660 try: | |
661 new_config = pyauto.PyUITest.EvalDataFrom(config_file) | |
662 for key in new_config: | |
663 if new_config.get(key) is not None: | |
664 config[key] = new_config.get(key) | |
665 except SyntaxError, e: | |
666 logging.info('Could not read %s: %s', config_file, str(e)) | |
667 return config | |
668 | |
669 def _LoginToGoogleAccount(self, account_key='test_google_account'): | |
670 """Logs in to a test Google account. | |
671 | |
672 Login with user-defined credentials if they exist. | |
673 Else login with private test credentials if they exist. | |
674 Else fail. | |
675 | |
676 Args: | |
677 account_key: The string key in private_tests_info.txt which is associated | |
678 with the test account login credentials to use. It will only | |
679 be used when fail to load user-defined credentials. | |
680 | |
681 Raises: | |
682 RuntimeError: if could not get credential information. | |
683 """ | |
684 private_file = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private', | |
685 'private_tests_info.txt') | |
686 config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg') | |
687 config = self._GetConfig() | |
688 google_account_url = config.get('google_account_url') | |
689 username = config.get('username') | |
690 password = config.get('password') | |
691 if username and password: | |
692 logging.info( | |
693 'Using google account credential from %s', | |
694 os.path.join(os.path.dirname(__file__), 'perf.cfg')) | |
695 elif os.path.exists(private_file): | |
696 creds = self.GetPrivateInfo()[account_key] | |
697 username = creds['username'] | |
698 password = creds['password'] | |
699 logging.info( | |
700 'User-defined credentials not found,' + | |
701 ' using private test credentials instead.') | |
702 else: | |
703 message = 'No user-defined or private test ' \ | |
704 'credentials could be found. ' \ | |
705 'Please specify credential information in %s.' \ | |
706 % config_file | |
707 raise RuntimeError(message) | |
708 test_utils.GoogleAccountsLogin( | |
709 self, username, password, url=google_account_url) | |
710 self.NavigateToURL('about:blank') # Clear the existing tab. | |
711 | |
712 def _GetCPUUsage(self): | |
713 """Returns machine's CPU usage. | |
714 | |
715 This function uses /proc/stat to identify CPU usage, and therefore works | |
716 only on Linux/ChromeOS. | |
717 | |
718 Returns: | |
719 A dictionary with 'user', 'nice', 'system' and 'idle' values. | |
720 Sample dictionary: | |
721 { | |
722 'user': 254544, | |
723 'nice': 9, | |
724 'system': 254768, | |
725 'idle': 2859878, | |
726 } | |
727 """ | |
728 try: | |
729 f = open('/proc/stat') | |
730 cpu_usage_str = f.readline().split() | |
731 f.close() | |
732 except IOError, e: | |
733 self.fail('Could not retrieve CPU usage: ' + str(e)) | |
734 return { | |
735 'user': int(cpu_usage_str[1]), | |
736 'nice': int(cpu_usage_str[2]), | |
737 'system': int(cpu_usage_str[3]), | |
738 'idle': int(cpu_usage_str[4]) | |
739 } | |
740 | |
741 def _GetFractionNonIdleCPUTime(self, cpu_usage_start, cpu_usage_end): | |
742 """Computes the fraction of CPU time spent non-idling. | |
743 | |
744 This function should be invoked using before/after values from calls to | |
745 _GetCPUUsage(). | |
746 """ | |
747 time_non_idling_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] + | |
748 cpu_usage_end['system']) | |
749 time_non_idling_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] + | |
750 cpu_usage_start['system']) | |
751 total_time_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] + | |
752 cpu_usage_end['system'] + cpu_usage_end['idle']) | |
753 total_time_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] + | |
754 cpu_usage_start['system'] + cpu_usage_start['idle']) | |
755 return ((float(time_non_idling_end) - time_non_idling_start) / | |
756 (total_time_end - total_time_start)) | |
757 | |
758 def ExtraChromeFlags(self): | |
759 """Ensures Chrome is launched with custom flags. | |
760 | |
761 Returns: | |
762 A list of extra flags to pass to Chrome when it is launched. | |
763 """ | |
764 flags = super(BasePerfTest, self).ExtraChromeFlags() | |
765 # Window size impacts a variety of perf tests, ensure consistency. | |
766 flags.append('--window-size=1024,768') | |
767 if self._IsPGOMode(): | |
768 flags = flags + ['--no-sandbox'] | |
769 return flags | |
770 | |
771 | |
772 class TabPerfTest(BasePerfTest): | |
773 """Tests that involve opening tabs.""" | |
774 | |
775 def testNewTab(self): | |
776 """Measures time to open a new tab.""" | |
777 self._RunNewTabTest('NewTabPage', | |
778 lambda: self._AppendTab('chrome://newtab'), 'open_tab') | |
779 | |
780 def testNewTabFlash(self): | |
781 """Measures time to open a new tab navigated to a flash page.""" | |
782 self.assertTrue( | |
783 os.path.exists(os.path.join(self.ContentDataDir(), 'plugin', | |
784 'flash.swf')), | |
785 msg='Missing required flash data file.') | |
786 url = self.GetFileURLForContentDataPath('plugin', 'flash.swf') | |
787 self._RunNewTabTest('NewTabFlashPage', lambda: self._AppendTab(url), | |
788 'open_tab') | |
789 | |
790 def test20Tabs(self): | |
791 """Measures time to open 20 tabs.""" | |
792 self._RunNewTabTest('20TabsNewTabPage', | |
793 lambda: self._AppendTab('chrome://newtab'), | |
794 'open_20_tabs', num_tabs=20) | |
795 | |
796 | |
797 class BenchmarkPerfTest(BasePerfTest): | |
798 """Benchmark performance tests.""" | |
799 | |
800 def testV8BenchmarkSuite(self): | |
801 """Measures score from v8 benchmark suite.""" | |
802 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html') | |
803 | |
804 def _RunBenchmarkOnce(url): | |
805 """Runs the v8 benchmark suite once and returns the results in a dict.""" | |
806 self.assertTrue(self.AppendTab(pyauto.GURL(url)), | |
807 msg='Failed to append tab for v8 benchmark suite.') | |
808 js_done = """ | |
809 var val = document.getElementById("status").innerHTML; | |
810 window.domAutomationController.send(val); | |
811 """ | |
812 self.assertTrue( | |
813 self.WaitUntil( | |
814 lambda: 'Score:' in self.ExecuteJavascript(js_done, tab_index=1), | |
815 timeout=300, expect_retval=True, retry_sleep=1), | |
816 msg='Timed out when waiting for v8 benchmark score.') | |
817 | |
818 js_get_results = """ | |
819 var result = {}; | |
820 result['final_score'] = document.getElementById("status").innerHTML; | |
821 result['all_results'] = document.getElementById("results").innerHTML; | |
822 window.domAutomationController.send(JSON.stringify(result)); | |
823 """ | |
824 results = eval(self.ExecuteJavascript(js_get_results, tab_index=1)) | |
825 score_pattern = '(\w+): (\d+)' | |
826 final_score = re.search(score_pattern, results['final_score']).group(2) | |
827 result_dict = {'final_score': int(final_score)} | |
828 for match in re.finditer(score_pattern, results['all_results']): | |
829 benchmark_name = match.group(1) | |
830 benchmark_score = match.group(2) | |
831 result_dict[benchmark_name] = int(benchmark_score) | |
832 self.CloseTab(tab_index=1) | |
833 return result_dict | |
834 | |
835 timings = {} | |
836 for iteration in xrange(self._num_iterations + 1): | |
837 result_dict = _RunBenchmarkOnce(url) | |
838 # Ignore the first iteration. | |
839 if iteration: | |
840 for key, val in result_dict.items(): | |
841 timings.setdefault(key, []).append(val) | |
842 logging.info('Iteration %d of %d:\n%s', iteration, | |
843 self._num_iterations, self.pformat(result_dict)) | |
844 | |
845 for key, val in timings.items(): | |
846 if key == 'final_score': | |
847 self._PrintSummaryResults('V8Benchmark', val, 'score', | |
848 'v8_benchmark_final') | |
849 else: | |
850 self._PrintSummaryResults('V8Benchmark-%s' % key, val, 'score', | |
851 'v8_benchmark_individual') | |
852 | |
853 def testSunSpider(self): | |
854 """Runs the SunSpider javascript benchmark suite.""" | |
855 url = self.GetFileURLForDataPath('sunspider', 'sunspider-driver.html') | |
856 self.assertTrue(self.AppendTab(pyauto.GURL(url)), | |
857 msg='Failed to append tab for SunSpider benchmark suite.') | |
858 | |
859 js_is_done = """ | |
860 var done = false; | |
861 if (document.getElementById("console")) | |
862 done = true; | |
863 window.domAutomationController.send(JSON.stringify(done)); | |
864 """ | |
865 self.assertTrue( | |
866 self.WaitUntil( | |
867 lambda: self.ExecuteJavascript(js_is_done, tab_index=1), | |
868 timeout=300, expect_retval='true', retry_sleep=1), | |
869 msg='Timed out when waiting for SunSpider benchmark score.') | |
870 | |
871 js_get_results = """ | |
872 window.domAutomationController.send( | |
873 document.getElementById("console").innerHTML); | |
874 """ | |
875 # Append '<br>' to the result to simplify regular expression matching. | |
876 results = self.ExecuteJavascript(js_get_results, tab_index=1) + '<br>' | |
877 total = re.search('Total:\s*([\d.]+)ms', results).group(1) | |
878 logging.info('Total: %f ms', float(total)) | |
879 self._OutputPerfGraphValue('SunSpider-total', float(total), 'ms', | |
880 'sunspider_total') | |
881 | |
882 for match_category in re.finditer('\s\s(\w+):\s*([\d.]+)ms.+?<br><br>', | |
883 results): | |
884 category_name = match_category.group(1) | |
885 category_result = match_category.group(2) | |
886 logging.info('Benchmark "%s": %f ms', category_name, | |
887 float(category_result)) | |
888 self._OutputPerfGraphValue('SunSpider-' + category_name, | |
889 float(category_result), 'ms', | |
890 'sunspider_individual') | |
891 | |
892 for match_result in re.finditer('<br>\s\s\s\s([\w-]+):\s*([\d.]+)ms', | |
893 match_category.group(0)): | |
894 result_name = match_result.group(1) | |
895 result_value = match_result.group(2) | |
896 logging.info(' Result "%s-%s": %f ms', category_name, result_name, | |
897 float(result_value)) | |
898 self._OutputPerfGraphValue( | |
899 'SunSpider-%s-%s' % (category_name, result_name), | |
900 float(result_value), 'ms', 'sunspider_individual') | |
901 | |
902 def testDromaeoSuite(self): | |
903 """Measures results from Dromaeo benchmark suite.""" | |
904 url = self.GetFileURLForDataPath('dromaeo', 'index.html') | |
905 self.assertTrue(self.AppendTab(pyauto.GURL(url + '?dromaeo')), | |
906 msg='Failed to append tab for Dromaeo benchmark suite.') | |
907 | |
908 js_is_ready = """ | |
909 var val = document.getElementById('pause').value; | |
910 window.domAutomationController.send(val); | |
911 """ | |
912 self.assertTrue( | |
913 self.WaitUntil( | |
914 lambda: self.ExecuteJavascript(js_is_ready, tab_index=1), | |
915 timeout=30, expect_retval='Run', retry_sleep=1), | |
916 msg='Timed out when waiting for Dromaeo benchmark to load.') | |
917 | |
918 js_run = """ | |
919 $('#pause').val('Run').click(); | |
920 window.domAutomationController.send('done'); | |
921 """ | |
922 self.ExecuteJavascript(js_run, tab_index=1) | |
923 | |
924 js_is_done = """ | |
925 var val = document.getElementById('timebar').innerHTML; | |
926 window.domAutomationController.send(val); | |
927 """ | |
928 self.assertTrue( | |
929 self.WaitUntil( | |
930 lambda: 'Total' in self.ExecuteJavascript(js_is_done, tab_index=1), | |
931 timeout=900, expect_retval=True, retry_sleep=2), | |
932 msg='Timed out when waiting for Dromaeo benchmark to complete.') | |
933 | |
934 js_get_results = """ | |
935 var result = {}; | |
936 result['total_result'] = $('#timebar strong').html(); | |
937 result['all_results'] = {}; | |
938 $('.result-item.done').each(function (i) { | |
939 var group_name = $(this).find('.test b').html().replace(':', ''); | |
940 var group_results = {}; | |
941 group_results['result'] = | |
942 $(this).find('span').html().replace('runs/s', '') | |
943 | |
944 group_results['sub_groups'] = {} | |
945 $(this).find('li').each(function (i) { | |
946 var sub_name = $(this).find('b').html().replace(':', ''); | |
947 group_results['sub_groups'][sub_name] = | |
948 $(this).text().match(/: ([\d.]+)/)[1] | |
949 }); | |
950 result['all_results'][group_name] = group_results; | |
951 }); | |
952 window.domAutomationController.send(JSON.stringify(result)); | |
953 """ | |
954 results = eval(self.ExecuteJavascript(js_get_results, tab_index=1)) | |
955 total_result = results['total_result'] | |
956 logging.info('Total result: ' + total_result) | |
957 self._OutputPerfGraphValue('Dromaeo-total', float(total_result), | |
958 'runsPerSec', 'dromaeo_total') | |
959 | |
960 for group_name, group in results['all_results'].iteritems(): | |
961 logging.info('Benchmark "%s": %s', group_name, group['result']) | |
962 self._OutputPerfGraphValue('Dromaeo-' + group_name.replace(' ', ''), | |
963 float(group['result']), 'runsPerSec', | |
964 'dromaeo_individual') | |
965 for benchmark_name, benchmark_score in group['sub_groups'].iteritems(): | |
966 logging.info(' Result "%s": %s', benchmark_name, benchmark_score) | |
967 | |
968 def testSpaceport(self): | |
969 """Measures results from Spaceport benchmark suite.""" | |
970 # TODO(tonyg): Test is failing on bots. Diagnose and re-enable. | |
971 pass | |
972 | |
973 # url = self.GetFileURLForDataPath('third_party', 'spaceport', 'index.html') | |
974 # self.assertTrue(self.AppendTab(pyauto.GURL(url + '?auto')), | |
975 # msg='Failed to append tab for Spaceport benchmark suite.') | |
976 # | |
977 # # The test reports results to console.log in the format "name: value". | |
978 # # Inject a bit of JS to intercept those. | |
979 # js_collect_console_log = """ | |
980 # window.__pyautoresult = {}; | |
981 # window.console.log = function(str) { | |
982 # if (!str) return; | |
983 # var key_val = str.split(': '); | |
984 # if (!key_val.length == 2) return; | |
985 # __pyautoresult[key_val[0]] = key_val[1]; | |
986 # }; | |
987 # window.domAutomationController.send('done'); | |
988 # """ | |
989 # self.ExecuteJavascript(js_collect_console_log, tab_index=1) | |
990 # | |
991 # def _IsDone(): | |
992 # expected_num_results = 30 # The number of tests in benchmark. | |
993 # results = eval(self.ExecuteJavascript(js_get_results, tab_index=1)) | |
994 # return expected_num_results == len(results) | |
995 # | |
996 # js_get_results = """ | |
997 # window.domAutomationController.send( | |
998 # JSON.stringify(window.__pyautoresult)); | |
999 # """ | |
1000 # self.assertTrue( | |
1001 # self.WaitUntil(_IsDone, timeout=1200, expect_retval=True, | |
1002 # retry_sleep=5), | |
1003 # msg='Timed out when waiting for Spaceport benchmark to complete.') | |
1004 # results = eval(self.ExecuteJavascript(js_get_results, tab_index=1)) | |
1005 # | |
1006 # for key in results: | |
1007 # suite, test = key.split('.') | |
1008 # value = float(results[key]) | |
1009 # self._OutputPerfGraphValue(test, value, 'ObjectsAt30FPS', suite) | |
1010 # self._PrintSummaryResults('Overall', [float(x) for x in results.values()], | |
1011 # 'ObjectsAt30FPS', 'Overall') | |
1012 | |
1013 | |
1014 class LiveWebappLoadTest(BasePerfTest): | |
1015 """Tests that involve performance measurements of live webapps. | |
1016 | |
1017 These tests connect to live webpages (e.g., Gmail, Calendar, Docs) and are | |
1018 therefore subject to network conditions. These tests are meant to generate | |
1019 "ball-park" numbers only (to see roughly how long things take to occur from a | |
1020 user's perspective), and are not expected to be precise. | |
1021 """ | |
1022 | |
1023 def testNewTabGmail(self): | |
1024 """Measures time to open a tab to a logged-in Gmail account. | |
1025 | |
1026 Timing starts right before the new tab is opened, and stops as soon as the | |
1027 webpage displays the substring 'Last account activity:'. | |
1028 """ | |
1029 EXPECTED_SUBSTRING = 'Last account activity:' | |
1030 | |
1031 def _SubstringExistsOnPage(): | |
1032 js = """ | |
1033 var frame = document.getElementById("canvas_frame"); | |
1034 var divs = frame.contentDocument.getElementsByTagName("div"); | |
1035 for (var i = 0; i < divs.length; ++i) { | |
1036 if (divs[i].innerHTML.indexOf("%s") >= 0) | |
1037 window.domAutomationController.send("true"); | |
1038 } | |
1039 window.domAutomationController.send("false"); | |
1040 """ % EXPECTED_SUBSTRING | |
1041 return self.ExecuteJavascript(js, tab_index=1) | |
1042 | |
1043 def _RunSingleGmailTabOpen(): | |
1044 self._AppendTab('http://www.gmail.com') | |
1045 self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120, | |
1046 expect_retval='true', retry_sleep=0.10), | |
1047 msg='Timed out waiting for expected Gmail string.') | |
1048 | |
1049 self._LoginToGoogleAccount() | |
1050 self._RunNewTabTest('NewTabGmail', _RunSingleGmailTabOpen, | |
1051 'open_tab_live_webapp') | |
1052 | |
1053 def testNewTabCalendar(self): | |
1054 """Measures time to open a tab to a logged-in Calendar account. | |
1055 | |
1056 Timing starts right before the new tab is opened, and stops as soon as the | |
1057 webpage displays the calendar print button (title 'Print my calendar'). | |
1058 """ | |
1059 EXPECTED_SUBSTRING = 'Month' | |
1060 | |
1061 def _DivTitleStartsWith(): | |
1062 js = """ | |
1063 var divs = document.getElementsByTagName("div"); | |
1064 for (var i = 0; i < divs.length; ++i) { | |
1065 if (divs[i].innerHTML == "%s") | |
1066 window.domAutomationController.send("true"); | |
1067 } | |
1068 window.domAutomationController.send("false"); | |
1069 """ % EXPECTED_SUBSTRING | |
1070 return self.ExecuteJavascript(js, tab_index=1) | |
1071 | |
1072 def _RunSingleCalendarTabOpen(): | |
1073 self._AppendTab('http://calendar.google.com') | |
1074 self.assertTrue(self.WaitUntil(_DivTitleStartsWith, timeout=120, | |
1075 expect_retval='true', retry_sleep=0.10), | |
1076 msg='Timed out waiting for expected Calendar string.') | |
1077 | |
1078 self._LoginToGoogleAccount() | |
1079 self._RunNewTabTest('NewTabCalendar', _RunSingleCalendarTabOpen, | |
1080 'open_tab_live_webapp') | |
1081 | |
1082 def testNewTabDocs(self): | |
1083 """Measures time to open a tab to a logged-in Docs account. | |
1084 | |
1085 Timing starts right before the new tab is opened, and stops as soon as the | |
1086 webpage displays the expected substring 'last modified' (case insensitive). | |
1087 """ | |
1088 EXPECTED_SUBSTRING = 'sort' | |
1089 | |
1090 def _SubstringExistsOnPage(): | |
1091 js = """ | |
1092 var divs = document.getElementsByTagName("div"); | |
1093 for (var i = 0; i < divs.length; ++i) { | |
1094 if (divs[i].innerHTML.toLowerCase().indexOf("%s") >= 0) | |
1095 window.domAutomationController.send("true"); | |
1096 } | |
1097 window.domAutomationController.send("false"); | |
1098 """ % EXPECTED_SUBSTRING | |
1099 return self.ExecuteJavascript(js, tab_index=1) | |
1100 | |
1101 def _RunSingleDocsTabOpen(): | |
1102 self._AppendTab('http://docs.google.com') | |
1103 self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120, | |
1104 expect_retval='true', retry_sleep=0.10), | |
1105 msg='Timed out waiting for expected Docs string.') | |
1106 | |
1107 self._LoginToGoogleAccount() | |
1108 self._RunNewTabTest('NewTabDocs', _RunSingleDocsTabOpen, | |
1109 'open_tab_live_webapp') | |
1110 | |
1111 | |
1112 class NetflixPerfTest(BasePerfTest, NetflixTestHelper): | |
1113 """Test Netflix video performance.""" | |
1114 | |
1115 def __init__(self, methodName='runTest', **kwargs): | |
1116 pyauto.PyUITest.__init__(self, methodName, **kwargs) | |
1117 NetflixTestHelper.__init__(self, self) | |
1118 | |
1119 def tearDown(self): | |
1120 self.SignOut() | |
1121 pyauto.PyUITest.tearDown(self) | |
1122 | |
1123 def testNetflixDroppedFrames(self): | |
1124 """Measures the Netflix video dropped frames/second. Runs for 60 secs.""" | |
1125 self.LoginAndStartPlaying() | |
1126 self.CheckNetflixPlaying(self.IS_PLAYING, | |
1127 'Player did not start playing the title.') | |
1128 # Ignore first 10 seconds of video playing so we get smooth videoplayback. | |
1129 time.sleep(10) | |
1130 init_dropped_frames = self._GetVideoDroppedFrames() | |
1131 dropped_frames = [] | |
1132 prev_dropped_frames = 0 | |
1133 for iteration in xrange(60): | |
1134 # Ignoring initial dropped frames of first 10 seconds. | |
1135 total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames | |
1136 dropped_frames_last_sec = total_dropped_frames - prev_dropped_frames | |
1137 dropped_frames.append(dropped_frames_last_sec) | |
1138 logging.info('Iteration %d of %d: %f dropped frames in the last second', | |
1139 iteration + 1, 60, dropped_frames_last_sec) | |
1140 prev_dropped_frames = total_dropped_frames | |
1141 # Play the video for some time. | |
1142 time.sleep(1) | |
1143 self._PrintSummaryResults('NetflixDroppedFrames', dropped_frames, 'frames', | |
1144 'netflix_dropped_frames') | |
1145 | |
1146 def testNetflixCPU(self): | |
1147 """Measures the Netflix video CPU usage. Runs for 60 seconds.""" | |
1148 self.LoginAndStartPlaying() | |
1149 self.CheckNetflixPlaying(self.IS_PLAYING, | |
1150 'Player did not start playing the title.') | |
1151 # Ignore first 10 seconds of video playing so we get smooth videoplayback. | |
1152 time.sleep(10) | |
1153 init_dropped_frames = self._GetVideoDroppedFrames() | |
1154 init_video_frames = self._GetVideoFrames() | |
1155 cpu_usage_start = self._GetCPUUsage() | |
1156 total_shown_frames = 0 | |
1157 # Play the video for some time. | |
1158 time.sleep(60) | |
1159 total_video_frames = self._GetVideoFrames() - init_video_frames | |
1160 total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames | |
1161 cpu_usage_end = self._GetCPUUsage() | |
1162 fraction_non_idle_time = \ | |
1163 self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end) | |
1164 # Counting extrapolation for utilization to play the video. | |
1165 extrapolation_value = fraction_non_idle_time * \ | |
1166 (float(total_video_frames) + total_dropped_frames) / total_video_frames | |
1167 logging.info('Netflix CPU extrapolation: %f', extrapolation_value) | |
1168 self._OutputPerfGraphValue('NetflixCPUExtrapolation', extrapolation_value, | |
1169 'extrapolation', 'netflix_cpu_extrapolation') | |
1170 | |
1171 | |
1172 class YoutubePerfTest(BasePerfTest, YoutubeTestHelper): | |
1173 """Test Youtube video performance.""" | |
1174 | |
1175 def __init__(self, methodName='runTest', **kwargs): | |
1176 pyauto.PyUITest.__init__(self, methodName, **kwargs) | |
1177 YoutubeTestHelper.__init__(self, self) | |
1178 | |
1179 def _VerifyVideoTotalBytes(self): | |
1180 """Returns true if video total bytes information is available.""" | |
1181 return self.GetVideoTotalBytes() > 0 | |
1182 | |
1183 def _VerifyVideoLoadedBytes(self): | |
1184 """Returns true if video loaded bytes information is available.""" | |
1185 return self.GetVideoLoadedBytes() > 0 | |
1186 | |
1187 def StartVideoForPerformance(self, video_id='zuzaxlddWbk'): | |
1188 """Start the test video with all required buffering.""" | |
1189 self.PlayVideoAndAssert(video_id) | |
1190 self.ExecuteJavascript(""" | |
1191 ytplayer.setPlaybackQuality('hd720'); | |
1192 window.domAutomationController.send(''); | |
1193 """) | |
1194 self.AssertPlayerState(state=self.is_playing, | |
1195 msg='Player did not enter the playing state') | |
1196 self.assertTrue( | |
1197 self.WaitUntil(self._VerifyVideoTotalBytes, expect_retval=True), | |
1198 msg='Failed to get video total bytes information.') | |
1199 self.assertTrue( | |
1200 self.WaitUntil(self._VerifyVideoLoadedBytes, expect_retval=True), | |
1201 msg='Failed to get video loaded bytes information') | |
1202 loaded_video_bytes = self.GetVideoLoadedBytes() | |
1203 total_video_bytes = self.GetVideoTotalBytes() | |
1204 self.PauseVideo() | |
1205 logging.info('total_video_bytes: %f', total_video_bytes) | |
1206 # Wait for the video to finish loading. | |
1207 while total_video_bytes > loaded_video_bytes: | |
1208 loaded_video_bytes = self.GetVideoLoadedBytes() | |
1209 logging.info('loaded_video_bytes: %f', loaded_video_bytes) | |
1210 time.sleep(1) | |
1211 self.PlayVideo() | |
1212 # Ignore first 10 seconds of video playing so we get smooth videoplayback. | |
1213 time.sleep(10) | |
1214 | |
1215 def testYoutubeDroppedFrames(self): | |
1216 """Measures the Youtube video dropped frames/second. Runs for 60 secs. | |
1217 | |
1218 This test measures Youtube video dropped frames for three different types | |
1219 of videos like slow, normal and fast motion. | |
1220 """ | |
1221 youtube_video = {'Slow': 'VT1-sitWRtY', | |
1222 'Normal': '2tqK_3mKQUw', | |
1223 'Fast': '8ETDE0VGJY4', | |
1224 } | |
1225 for video_type in youtube_video: | |
1226 logging.info('Running %s video.', video_type) | |
1227 self.StartVideoForPerformance(youtube_video[video_type]) | |
1228 init_dropped_frames = self.GetVideoDroppedFrames() | |
1229 total_dropped_frames = 0 | |
1230 dropped_fps = [] | |
1231 for iteration in xrange(60): | |
1232 frames = self.GetVideoDroppedFrames() - init_dropped_frames | |
1233 current_dropped_frames = frames - total_dropped_frames | |
1234 dropped_fps.append(current_dropped_frames) | |
1235 logging.info('Iteration %d of %d: %f dropped frames in the last ' | |
1236 'second', iteration + 1, 60, current_dropped_frames) | |
1237 total_dropped_frames = frames | |
1238 # Play the video for some time | |
1239 time.sleep(1) | |
1240 graph_description = 'YoutubeDroppedFrames' + video_type | |
1241 self._PrintSummaryResults(graph_description, dropped_fps, 'frames', | |
1242 'youtube_dropped_frames') | |
1243 | |
1244 def testYoutubeCPU(self): | |
1245 """Measures the Youtube video CPU usage. Runs for 60 seconds. | |
1246 | |
1247 Measures the Youtube video CPU usage (between 0 and 1), extrapolated to | |
1248 totalframes in the video by taking dropped frames into account. For smooth | |
1249 videoplayback this number should be < 0.5..1.0 on a hyperthreaded CPU. | |
1250 """ | |
1251 self.StartVideoForPerformance() | |
1252 init_dropped_frames = self.GetVideoDroppedFrames() | |
1253 logging.info('init_dropped_frames: %f', init_dropped_frames) | |
1254 cpu_usage_start = self._GetCPUUsage() | |
1255 total_shown_frames = 0 | |
1256 for sec_num in xrange(60): | |
1257 # Play the video for some time. | |
1258 time.sleep(1) | |
1259 total_shown_frames = total_shown_frames + self.GetVideoFrames() | |
1260 logging.info('total_shown_frames: %f', total_shown_frames) | |
1261 total_dropped_frames = self.GetVideoDroppedFrames() - init_dropped_frames | |
1262 logging.info('total_dropped_frames: %f', total_dropped_frames) | |
1263 cpu_usage_end = self._GetCPUUsage() | |
1264 fraction_non_idle_time = self._GetFractionNonIdleCPUTime( | |
1265 cpu_usage_start, cpu_usage_end) | |
1266 logging.info('fraction_non_idle_time: %f', fraction_non_idle_time) | |
1267 total_frames = total_shown_frames + total_dropped_frames | |
1268 # Counting extrapolation for utilization to play the video. | |
1269 extrapolation_value = (fraction_non_idle_time * | |
1270 (float(total_frames) / total_shown_frames)) | |
1271 logging.info('Youtube CPU extrapolation: %f', extrapolation_value) | |
1272 # Video is still running so log some more detailed data. | |
1273 self._LogProcessActivity() | |
1274 self._OutputPerfGraphValue('YoutubeCPUExtrapolation', extrapolation_value, | |
1275 'extrapolation', 'youtube_cpu_extrapolation') | |
1276 | |
1277 | |
1278 class FlashVideoPerfTest(BasePerfTest): | |
1279 """General flash video performance tests.""" | |
1280 | |
1281 def FlashVideo1080P(self): | |
1282 """Measures total dropped frames and average FPS for a 1080p flash video. | |
1283 | |
1284 This is a temporary test to be run manually for now, needed to collect some | |
1285 performance statistics across different ChromeOS devices. | |
1286 """ | |
1287 # Open up the test webpage; it's assumed the test will start automatically. | |
1288 webpage_url = 'http://www/~arscott/fl/FlashVideoTests.html' | |
1289 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)), | |
1290 msg='Failed to append tab for webpage.') | |
1291 | |
1292 # Wait until the test is complete. | |
1293 js_is_done = """ | |
1294 window.domAutomationController.send(JSON.stringify(tests_done)); | |
1295 """ | |
1296 self.assertTrue( | |
1297 self.WaitUntil( | |
1298 lambda: self.ExecuteJavascript(js_is_done, tab_index=1) == 'true', | |
1299 timeout=300, expect_retval=True, retry_sleep=1), | |
1300 msg='Timed out when waiting for test result.') | |
1301 | |
1302 # Retrieve and output the test results. | |
1303 js_results = """ | |
1304 window.domAutomationController.send(JSON.stringify(tests_results)); | |
1305 """ | |
1306 test_result = eval(self.ExecuteJavascript(js_results, tab_index=1)) | |
1307 test_result[0] = test_result[0].replace('true', 'True') | |
1308 test_result = eval(test_result[0]) # Webpage only does 1 test right now. | |
1309 | |
1310 description = 'FlashVideo1080P' | |
1311 result = test_result['averageFPS'] | |
1312 logging.info('Result for %s: %f FPS (average)', description, result) | |
1313 self._OutputPerfGraphValue(description, result, 'FPS', | |
1314 'flash_video_1080p_fps') | |
1315 result = test_result['droppedFrames'] | |
1316 logging.info('Result for %s: %f dropped frames', description, result) | |
1317 self._OutputPerfGraphValue(description, result, 'DroppedFrames', | |
1318 'flash_video_1080p_dropped_frames') | |
1319 | |
1320 | |
1321 class WebGLTest(BasePerfTest): | |
1322 """Tests for WebGL performance.""" | |
1323 | |
1324 def _RunWebGLTest(self, url, description, graph_name): | |
1325 """Measures FPS using a specified WebGL demo. | |
1326 | |
1327 Args: | |
1328 url: The string URL that, once loaded, will run the WebGL demo (default | |
1329 WebGL demo settings are used, since this test does not modify any | |
1330 settings in the demo). | |
1331 description: A string description for this demo, used as a performance | |
1332 value description. Should not contain any spaces. | |
1333 graph_name: A string name for the performance graph associated with this | |
1334 test. Only used on Chrome desktop. | |
1335 """ | |
1336 self.assertTrue(self.AppendTab(pyauto.GURL(url)), | |
1337 msg='Failed to append tab for %s.' % description) | |
1338 | |
1339 get_fps_js = """ | |
1340 var fps_field = document.getElementById("fps"); | |
1341 var result = -1; | |
1342 if (fps_field) | |
1343 result = fps_field.innerHTML; | |
1344 window.domAutomationController.send(JSON.stringify(result)); | |
1345 """ | |
1346 | |
1347 # Wait until we start getting FPS values. | |
1348 self.assertTrue( | |
1349 self.WaitUntil( | |
1350 lambda: self.ExecuteJavascript(get_fps_js, tab_index=1) != '-1', | |
1351 timeout=300, retry_sleep=1), | |
1352 msg='Timed out when waiting for FPS values to be available.') | |
1353 | |
1354 # Let the experiment run for 5 seconds before we start collecting perf | |
1355 # measurements. | |
1356 time.sleep(5) | |
1357 | |
1358 # Collect the current FPS value each second for the next 30 seconds. The | |
1359 # final result of this test will be the average of these FPS values. | |
1360 fps_vals = [] | |
1361 for iteration in xrange(30): | |
1362 fps = self.ExecuteJavascript(get_fps_js, tab_index=1) | |
1363 fps = float(fps.replace('"', '')) | |
1364 fps_vals.append(fps) | |
1365 logging.info('Iteration %d of %d: %f FPS', iteration + 1, 30, fps) | |
1366 time.sleep(1) | |
1367 self._PrintSummaryResults(description, fps_vals, 'fps', graph_name) | |
1368 | |
1369 def testWebGLAquarium(self): | |
1370 """Measures performance using the WebGL Aquarium demo.""" | |
1371 self._RunWebGLTest( | |
1372 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'aquarium', | |
1373 'aquarium.html'), | |
1374 'WebGLAquarium', 'webgl_demo') | |
1375 | |
1376 def testWebGLField(self): | |
1377 """Measures performance using the WebGL Field demo.""" | |
1378 self._RunWebGLTest( | |
1379 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'field', | |
1380 'field.html'), | |
1381 'WebGLField', 'webgl_demo') | |
1382 | |
1383 def testWebGLSpaceRocks(self): | |
1384 """Measures performance using the WebGL SpaceRocks demo.""" | |
1385 self._RunWebGLTest( | |
1386 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'spacerocks', | |
1387 'spacerocks.html'), | |
1388 'WebGLSpaceRocks', 'webgl_demo') | |
1389 | |
1390 | |
1391 class GPUPerfTest(BasePerfTest): | |
1392 """Tests for GPU performance.""" | |
1393 | |
1394 def setUp(self): | |
1395 """Performs necessary setup work before running each test in this class.""" | |
1396 self._gpu_info_dict = self.EvalDataFrom(os.path.join(self.DataDir(), | |
1397 'gpu', 'gpuperf.txt')) | |
1398 self._demo_name_url_dict = self._gpu_info_dict['demo_info'] | |
1399 pyauto.PyUITest.setUp(self) | |
1400 | |
1401 def _MeasureFpsOverTime(self, tab_index=0): | |
1402 """Measures FPS using a specified demo. | |
1403 | |
1404 This function assumes that the demo is already loaded in the specified tab | |
1405 index. | |
1406 | |
1407 Args: | |
1408 tab_index: The tab index, default is 0. | |
1409 """ | |
1410 # Let the experiment run for 5 seconds before we start collecting FPS | |
1411 # values. | |
1412 time.sleep(5) | |
1413 | |
1414 # Collect the current FPS value each second for the next 10 seconds. | |
1415 # Then return the average FPS value from among those collected. | |
1416 fps_vals = [] | |
1417 for iteration in xrange(10): | |
1418 fps = self.GetFPS(tab_index=tab_index) | |
1419 fps_vals.append(fps['fps']) | |
1420 time.sleep(1) | |
1421 return Mean(fps_vals) | |
1422 | |
1423 def _GetStdAvgAndCompare(self, avg_fps, description, ref_dict): | |
1424 """Computes the average and compare set of values with reference data. | |
1425 | |
1426 Args: | |
1427 avg_fps: Average fps value. | |
1428 description: A string description for this demo, used as a performance | |
1429 value description. | |
1430 ref_dict: Dictionary which contains reference data for this test case. | |
1431 | |
1432 Returns: | |
1433 True, if the actual FPS value is within 10% of the reference FPS value, | |
1434 or False, otherwise. | |
1435 """ | |
1436 std_fps = 0 | |
1437 status = True | |
1438 # Load reference data according to platform. | |
1439 platform_ref_dict = None | |
1440 if self.IsWin(): | |
1441 platform_ref_dict = ref_dict['win'] | |
1442 elif self.IsMac(): | |
1443 platform_ref_dict = ref_dict['mac'] | |
1444 elif self.IsLinux(): | |
1445 platform_ref_dict = ref_dict['linux'] | |
1446 else: | |
1447 self.assertFail(msg='This platform is unsupported.') | |
1448 std_fps = platform_ref_dict[description] | |
1449 # Compare reference data to average fps. | |
1450 # We allow the average FPS value to be within 10% of the reference | |
1451 # FPS value. | |
1452 if avg_fps < (0.9 * std_fps): | |
1453 logging.info('FPS difference exceeds threshold for: %s', description) | |
1454 logging.info(' Average: %f fps', avg_fps) | |
1455 logging.info('Reference Average: %f fps', std_fps) | |
1456 status = False | |
1457 else: | |
1458 logging.info('Average FPS is actually greater than 10 percent ' | |
1459 'more than the reference FPS for: %s', description) | |
1460 logging.info(' Average: %f fps', avg_fps) | |
1461 logging.info(' Reference Average: %f fps', std_fps) | |
1462 return status | |
1463 | |
1464 def testLaunchDemosParallelInSeparateTabs(self): | |
1465 """Measures performance of demos in different tabs in same browser.""" | |
1466 # Launch all the demos parallel in separate tabs | |
1467 counter = 0 | |
1468 all_demos_passed = True | |
1469 ref_dict = self._gpu_info_dict['separate_tab_ref_data'] | |
1470 # Iterate through dictionary and append all url to browser | |
1471 for url in self._demo_name_url_dict.iterkeys(): | |
1472 self.assertTrue( | |
1473 self.AppendTab(pyauto.GURL(self._demo_name_url_dict[url])), | |
1474 msg='Failed to append tab for %s.' % url) | |
1475 counter += 1 | |
1476 # Assert number of tab count is equal to number of tabs appended. | |
1477 self.assertEqual(self.GetTabCount(), counter + 1) | |
1478 # Measures performance using different demos and compare it golden | |
1479 # reference. | |
1480 for url in self._demo_name_url_dict.iterkeys(): | |
1481 avg_fps = self._MeasureFpsOverTime(tab_index=counter) | |
1482 # Get the reference value of fps and compare the results | |
1483 if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict): | |
1484 all_demos_passed = False | |
1485 counter -= 1 | |
1486 self.assertTrue( | |
1487 all_demos_passed, | |
1488 msg='One or more demos failed to yield an acceptable FPS value') | |
1489 | |
1490 def testLaunchDemosInSeparateBrowser(self): | |
1491 """Measures performance by launching each demo in a separate tab.""" | |
1492 # Launch demos in the browser | |
1493 ref_dict = self._gpu_info_dict['separate_browser_ref_data'] | |
1494 all_demos_passed = True | |
1495 for url in self._demo_name_url_dict.iterkeys(): | |
1496 self.NavigateToURL(self._demo_name_url_dict[url]) | |
1497 # Measures performance using different demos. | |
1498 avg_fps = self._MeasureFpsOverTime() | |
1499 self.RestartBrowser() | |
1500 # Get the standard value of fps and compare the rseults | |
1501 if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict): | |
1502 all_demos_passed = False | |
1503 self.assertTrue( | |
1504 all_demos_passed, | |
1505 msg='One or more demos failed to yield an acceptable FPS value') | |
1506 | |
1507 def testLaunchDemosBrowseForwardBackward(self): | |
1508 """Measures performance of various demos in browser going back and forth.""" | |
1509 ref_dict = self._gpu_info_dict['browse_back_forward_ref_data'] | |
1510 url_array = [] | |
1511 desc_array = [] | |
1512 all_demos_passed = True | |
1513 # Get URL/Description from dictionary and put in individual array | |
1514 for url in self._demo_name_url_dict.iterkeys(): | |
1515 url_array.append(self._demo_name_url_dict[url]) | |
1516 desc_array.append(url) | |
1517 for index in range(len(url_array) - 1): | |
1518 # Launch demo in the Browser | |
1519 if index == 0: | |
1520 self.NavigateToURL(url_array[index]) | |
1521 # Measures performance using the first demo. | |
1522 avg_fps = self._MeasureFpsOverTime() | |
1523 status1 = self._GetStdAvgAndCompare(avg_fps, desc_array[index], | |
1524 ref_dict) | |
1525 # Measures performance using the second demo. | |
1526 self.NavigateToURL(url_array[index + 1]) | |
1527 avg_fps = self._MeasureFpsOverTime() | |
1528 status2 = self._GetStdAvgAndCompare(avg_fps, desc_array[index + 1], | |
1529 ref_dict) | |
1530 # Go Back to previous demo | |
1531 self.TabGoBack() | |
1532 # Measures performance for first demo when moved back | |
1533 avg_fps = self._MeasureFpsOverTime() | |
1534 status3 = self._GetStdAvgAndCompare( | |
1535 avg_fps, desc_array[index] + '_backward', | |
1536 ref_dict) | |
1537 # Go Forward to previous demo | |
1538 self.TabGoForward() | |
1539 # Measures performance for second demo when moved forward | |
1540 avg_fps = self._MeasureFpsOverTime() | |
1541 status4 = self._GetStdAvgAndCompare( | |
1542 avg_fps, desc_array[index + 1] + '_forward', | |
1543 ref_dict) | |
1544 if not all([status1, status2, status3, status4]): | |
1545 all_demos_passed = False | |
1546 self.assertTrue( | |
1547 all_demos_passed, | |
1548 msg='One or more demos failed to yield an acceptable FPS value') | |
1549 | |
1550 | |
1551 class HTML5BenchmarkTest(BasePerfTest): | |
1552 """Tests for HTML5 performance.""" | |
1553 | |
1554 def testHTML5Benchmark(self): | |
1555 """Measures performance using the benchmark at html5-benchmark.com.""" | |
1556 self.NavigateToURL('http://html5-benchmark.com') | |
1557 | |
1558 start_benchmark_js = """ | |
1559 benchmark(); | |
1560 window.domAutomationController.send("done"); | |
1561 """ | |
1562 self.ExecuteJavascript(start_benchmark_js) | |
1563 | |
1564 js_final_score = """ | |
1565 var score = "-1"; | |
1566 var elem = document.getElementById("score"); | |
1567 if (elem) | |
1568 score = elem.innerHTML; | |
1569 window.domAutomationController.send(score); | |
1570 """ | |
1571 # Wait for the benchmark to complete, which is assumed to be when the value | |
1572 # of the 'score' DOM element changes to something other than '87485'. | |
1573 self.assertTrue( | |
1574 self.WaitUntil( | |
1575 lambda: self.ExecuteJavascript(js_final_score) != '87485', | |
1576 timeout=900, retry_sleep=1), | |
1577 msg='Timed out when waiting for final score to be available.') | |
1578 | |
1579 score = self.ExecuteJavascript(js_final_score) | |
1580 logging.info('HTML5 Benchmark final score: %f', float(score)) | |
1581 self._OutputPerfGraphValue('HTML5Benchmark', float(score), 'score', | |
1582 'html5_benchmark') | |
1583 | |
1584 | |
1585 class FileUploadDownloadTest(BasePerfTest): | |
1586 """Tests that involve measuring performance of upload and download.""" | |
1587 | |
1588 def setUp(self): | |
1589 """Performs necessary setup work before running each test in this class.""" | |
1590 self._temp_dir = tempfile.mkdtemp() | |
1591 self._test_server = PerfTestServer(self._temp_dir) | |
1592 self._test_server_port = self._test_server.GetPort() | |
1593 self._test_server.Run() | |
1594 self.assertTrue(self.WaitUntil(self._IsTestServerRunning), | |
1595 msg='Failed to start local performance test server.') | |
1596 BasePerfTest.setUp(self) | |
1597 | |
1598 def tearDown(self): | |
1599 """Performs necessary cleanup work after running each test in this class.""" | |
1600 BasePerfTest.tearDown(self) | |
1601 self._test_server.ShutDown() | |
1602 pyauto_utils.RemovePath(self._temp_dir) | |
1603 | |
1604 def _IsTestServerRunning(self): | |
1605 """Determines whether the local test server is ready to accept connections. | |
1606 | |
1607 Returns: | |
1608 True, if a connection can be made to the local performance test server, or | |
1609 False otherwise. | |
1610 """ | |
1611 conn = None | |
1612 try: | |
1613 conn = urllib2.urlopen('http://localhost:%d' % self._test_server_port) | |
1614 return True | |
1615 except IOError, e: | |
1616 return False | |
1617 finally: | |
1618 if conn: | |
1619 conn.close() | |
1620 | |
1621 def testDownload100MBFile(self): | |
1622 """Measures the time to download a 100 MB file from a local server.""" | |
1623 CREATE_100MB_URL = ( | |
1624 'http://localhost:%d/create_file_of_size?filename=data&mb=100' % | |
1625 self._test_server_port) | |
1626 DOWNLOAD_100MB_URL = 'http://localhost:%d/data' % self._test_server_port | |
1627 DELETE_100MB_URL = ('http://localhost:%d/delete_file?filename=data' % | |
1628 self._test_server_port) | |
1629 | |
1630 # Tell the local server to create a 100 MB file. | |
1631 self.NavigateToURL(CREATE_100MB_URL) | |
1632 | |
1633 # Cleaning up downloaded files is done in the same way as in downloads.py. | |
1634 # We first identify all existing downloaded files, then remove only those | |
1635 # new downloaded files that appear during the course of this test. | |
1636 download_dir = self.GetDownloadDirectory().value() | |
1637 orig_downloads = [] | |
1638 if os.path.isdir(download_dir): | |
1639 orig_downloads = os.listdir(download_dir) | |
1640 | |
1641 def _CleanupAdditionalFilesInDir(directory, orig_files): | |
1642 """Removes the additional files in the specified directory. | |
1643 | |
1644 This function will remove all files from |directory| that are not | |
1645 specified in |orig_files|. | |
1646 | |
1647 Args: | |
1648 directory: A string directory path. | |
1649 orig_files: A list of strings representing the original set of files in | |
1650 the specified directory. | |
1651 """ | |
1652 downloads_to_remove = [] | |
1653 if os.path.isdir(directory): | |
1654 downloads_to_remove = [os.path.join(directory, name) | |
1655 for name in os.listdir(directory) | |
1656 if name not in orig_files] | |
1657 for file_name in downloads_to_remove: | |
1658 pyauto_utils.RemovePath(file_name) | |
1659 | |
1660 def _DownloadFile(url): | |
1661 self.DownloadAndWaitForStart(url) | |
1662 self.WaitForAllDownloadsToComplete(timeout=2 * 60 * 1000) # 2 minutes. | |
1663 | |
1664 timings = [] | |
1665 for iteration in range(self._num_iterations + 1): | |
1666 elapsed_time = self._MeasureElapsedTime( | |
1667 lambda: _DownloadFile(DOWNLOAD_100MB_URL), num_invocations=1) | |
1668 # Ignore the first iteration. | |
1669 if iteration: | |
1670 timings.append(elapsed_time) | |
1671 logging.info('Iteration %d of %d: %f milliseconds', iteration, | |
1672 self._num_iterations, elapsed_time) | |
1673 self.SetDownloadShelfVisible(False) | |
1674 _CleanupAdditionalFilesInDir(download_dir, orig_downloads) | |
1675 | |
1676 self._PrintSummaryResults('Download100MBFile', timings, 'milliseconds', | |
1677 'download_file') | |
1678 | |
1679 # Tell the local server to delete the 100 MB file. | |
1680 self.NavigateToURL(DELETE_100MB_URL) | |
1681 | |
1682 def testUpload50MBFile(self): | |
1683 """Measures the time to upload a 50 MB file to a local server.""" | |
1684 # TODO(dennisjeffrey): Replace the use of XMLHttpRequest in this test with | |
1685 # FileManager automation to select the upload file when crosbug.com/17903 | |
1686 # is complete. | |
1687 START_UPLOAD_URL = ( | |
1688 'http://localhost:%d/start_upload?mb=50' % self._test_server_port) | |
1689 | |
1690 EXPECTED_SUBSTRING = 'Upload complete' | |
1691 | |
1692 def _IsUploadComplete(): | |
1693 js = """ | |
1694 result = ""; | |
1695 var div = document.getElementById("upload_result"); | |
1696 if (div) | |
1697 result = div.innerHTML; | |
1698 window.domAutomationController.send(result); | |
1699 """ | |
1700 return self.ExecuteJavascript(js).find(EXPECTED_SUBSTRING) >= 0 | |
1701 | |
1702 def _RunSingleUpload(): | |
1703 self.NavigateToURL(START_UPLOAD_URL) | |
1704 self.assertTrue( | |
1705 self.WaitUntil(_IsUploadComplete, timeout=120, expect_retval=True, | |
1706 retry_sleep=0.10), | |
1707 msg='Upload failed to complete before the timeout was hit.') | |
1708 | |
1709 timings = [] | |
1710 for iteration in range(self._num_iterations + 1): | |
1711 elapsed_time = self._MeasureElapsedTime(_RunSingleUpload) | |
1712 # Ignore the first iteration. | |
1713 if iteration: | |
1714 timings.append(elapsed_time) | |
1715 logging.info('Iteration %d of %d: %f milliseconds', iteration, | |
1716 self._num_iterations, elapsed_time) | |
1717 | |
1718 self._PrintSummaryResults('Upload50MBFile', timings, 'milliseconds', | |
1719 'upload_file') | |
1720 | |
1721 | |
1722 class FlashTest(BasePerfTest): | |
1723 """Tests to measure flash performance.""" | |
1724 | |
1725 def _RunFlashTestForAverageFPS(self, webpage_url, description, graph_name): | |
1726 """Runs a single flash test that measures an average FPS value. | |
1727 | |
1728 Args: | |
1729 webpage_url: The string URL to a webpage that will run the test. | |
1730 description: A string description for this test. | |
1731 graph_name: A string name for the performance graph associated with this | |
1732 test. Only used on Chrome desktop. | |
1733 """ | |
1734 # Open up the test webpage; it's assumed the test will start automatically. | |
1735 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)), | |
1736 msg='Failed to append tab for webpage.') | |
1737 | |
1738 # Wait until the final result is computed, then retrieve and output it. | |
1739 js = """ | |
1740 window.domAutomationController.send( | |
1741 JSON.stringify(final_average_fps)); | |
1742 """ | |
1743 self.assertTrue( | |
1744 self.WaitUntil( | |
1745 lambda: self.ExecuteJavascript(js, tab_index=1) != '-1', | |
1746 timeout=300, expect_retval=True, retry_sleep=1), | |
1747 msg='Timed out when waiting for test result.') | |
1748 result = float(self.ExecuteJavascript(js, tab_index=1)) | |
1749 logging.info('Result for %s: %f FPS (average)', description, result) | |
1750 self._OutputPerfGraphValue(description, result, 'FPS', graph_name) | |
1751 | |
1752 def testFlashGaming(self): | |
1753 """Runs a simple flash gaming benchmark test.""" | |
1754 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash', | |
1755 'FlashGamingTest2.html') | |
1756 self._RunFlashTestForAverageFPS(webpage_url, 'FlashGaming', 'flash_fps') | |
1757 | |
1758 def testFlashText(self): | |
1759 """Runs a simple flash text benchmark test.""" | |
1760 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash', | |
1761 'FlashTextTest2.html') | |
1762 self._RunFlashTestForAverageFPS(webpage_url, 'FlashText', 'flash_fps') | |
1763 | |
1764 def testScimarkGui(self): | |
1765 """Runs the ScimarkGui benchmark tests.""" | |
1766 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash', | |
1767 'scimarkGui.html') | |
1768 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)), | |
1769 msg='Failed to append tab for webpage.') | |
1770 | |
1771 js = 'window.domAutomationController.send(JSON.stringify(tests_done));' | |
1772 self.assertTrue( | |
1773 self.WaitUntil( | |
1774 lambda: self.ExecuteJavascript(js, tab_index=1), timeout=300, | |
1775 expect_retval='true', retry_sleep=1), | |
1776 msg='Timed out when waiting for tests to complete.') | |
1777 | |
1778 js_result = """ | |
1779 var result = {}; | |
1780 for (var i = 0; i < tests_results.length; ++i) { | |
1781 var test_name = tests_results[i][0]; | |
1782 var mflops = tests_results[i][1]; | |
1783 var mem = tests_results[i][2]; | |
1784 result[test_name] = [mflops, mem] | |
1785 } | |
1786 window.domAutomationController.send(JSON.stringify(result)); | |
1787 """ | |
1788 result = eval(self.ExecuteJavascript(js_result, tab_index=1)) | |
1789 for benchmark in result: | |
1790 mflops = float(result[benchmark][0]) | |
1791 mem = float(result[benchmark][1]) | |
1792 if benchmark.endswith('_mflops'): | |
1793 benchmark = benchmark[:benchmark.find('_mflops')] | |
1794 logging.info('Results for ScimarkGui_%s:', benchmark) | |
1795 logging.info(' %f MFLOPS', mflops) | |
1796 logging.info(' %f MB', mem) | |
1797 self._OutputPerfGraphValue('ScimarkGui-%s-MFLOPS' % benchmark, mflops, | |
1798 'MFLOPS', 'scimark_gui_mflops') | |
1799 self._OutputPerfGraphValue('ScimarkGui-%s-Mem' % benchmark, mem, 'MB', | |
1800 'scimark_gui_mem') | |
1801 | |
1802 | |
1803 class LiveGamePerfTest(BasePerfTest): | |
1804 """Tests to measure performance of live gaming webapps.""" | |
1805 | |
1806 def _RunLiveGamePerfTest(self, url, url_title_substring, | |
1807 description, graph_name): | |
1808 """Measures performance metrics for the specified live gaming webapp. | |
1809 | |
1810 This function connects to the specified URL to launch the gaming webapp, | |
1811 waits for a period of time for the webapp to run, then collects some | |
1812 performance metrics about the running webapp. | |
1813 | |
1814 Args: | |
1815 url: The string URL of the gaming webapp to analyze. | |
1816 url_title_substring: A string that is expected to be a substring of the | |
1817 webpage title for the specified gaming webapp. Used to verify that | |
1818 the webapp loads correctly. | |
1819 description: A string description for this game, used in the performance | |
1820 value description. Should not contain any spaces. | |
1821 graph_name: A string name for the performance graph associated with this | |
1822 test. Only used on Chrome desktop. | |
1823 """ | |
1824 self.NavigateToURL(url) | |
1825 loaded_tab_title = self.GetActiveTabTitle() | |
1826 self.assertTrue(url_title_substring in loaded_tab_title, | |
1827 msg='Loaded tab title missing "%s": "%s"' % | |
1828 (url_title_substring, loaded_tab_title)) | |
1829 cpu_usage_start = self._GetCPUUsage() | |
1830 | |
1831 # Let the app run for 1 minute. | |
1832 time.sleep(60) | |
1833 | |
1834 cpu_usage_end = self._GetCPUUsage() | |
1835 fraction_non_idle_time = self._GetFractionNonIdleCPUTime( | |
1836 cpu_usage_start, cpu_usage_end) | |
1837 | |
1838 logging.info('Fraction of CPU time spent non-idle: %f', | |
1839 fraction_non_idle_time) | |
1840 self._OutputPerfGraphValue(description + 'CpuBusy', fraction_non_idle_time, | |
1841 'Fraction', graph_name + '_cpu_busy') | |
1842 v8_heap_stats = self.GetV8HeapStats() | |
1843 v8_heap_size = v8_heap_stats['v8_memory_used'] / (1024.0 * 1024.0) | |
1844 logging.info('Total v8 heap size: %f MB', v8_heap_size) | |
1845 self._OutputPerfGraphValue(description + 'V8HeapSize', v8_heap_size, 'MB', | |
1846 graph_name + '_v8_heap_size') | |
1847 | |
1848 def testAngryBirds(self): | |
1849 """Measures performance for Angry Birds.""" | |
1850 self._RunLiveGamePerfTest('http://chrome.angrybirds.com', 'Angry Birds', | |
1851 'AngryBirds', 'angry_birds') | |
1852 | |
1853 | |
1854 class BasePageCyclerTest(BasePerfTest): | |
1855 """Page class for page cycler tests. | |
1856 | |
1857 Derived classes must implement StartUrl(). | |
1858 | |
1859 Environment Variables: | |
1860 PC_NO_AUTO: if set, avoids automatically loading pages. | |
1861 """ | |
1862 MAX_ITERATION_SECONDS = 60 | |
1863 TRIM_PERCENT = 20 | |
1864 DEFAULT_USE_AUTO = True | |
1865 | |
1866 # Page Cycler lives in src/data/page_cycler rather than src/chrome/test/data | |
1867 DATA_PATH = os.path.abspath( | |
1868 os.path.join(BasePerfTest.DataDir(), os.pardir, os.pardir, | |
1869 os.pardir, 'data', 'page_cycler')) | |
1870 | |
1871 def setUp(self): | |
1872 """Performs necessary setup work before running each test.""" | |
1873 super(BasePageCyclerTest, self).setUp() | |
1874 self.use_auto = 'PC_NO_AUTO' not in os.environ | |
1875 | |
1876 @classmethod | |
1877 def DataPath(cls, subdir): | |
1878 return os.path.join(cls.DATA_PATH, subdir) | |
1879 | |
1880 def ExtraChromeFlags(self): | |
1881 """Ensures Chrome is launched with custom flags. | |
1882 | |
1883 Returns: | |
1884 A list of extra flags to pass to Chrome when it is launched. | |
1885 """ | |
1886 # Extra flags required to run these tests. | |
1887 # The first two are needed for the test. | |
1888 # The plugins argument is to prevent bad scores due to pop-ups from | |
1889 # running an old version of something (like Flash). | |
1890 return (super(BasePageCyclerTest, self).ExtraChromeFlags() + | |
1891 ['--js-flags="--expose_gc"', | |
1892 '--enable-file-cookies', | |
1893 '--allow-outdated-plugins']) | |
1894 | |
1895 def WaitUntilStarted(self, start_url): | |
1896 """Check that the test navigates away from the start_url.""" | |
1897 js_is_started = """ | |
1898 var is_started = document.location.href !== "%s"; | |
1899 window.domAutomationController.send(JSON.stringify(is_started)); | |
1900 """ % start_url | |
1901 self.assertTrue( | |
1902 self.WaitUntil(lambda: self.ExecuteJavascript(js_is_started) == 'true', | |
1903 timeout=10), | |
1904 msg='Timed out when waiting to leave start page.') | |
1905 | |
1906 def WaitUntilDone(self, url, iterations): | |
1907 """Check cookies for "__pc_done=1" to know the test is over.""" | |
1908 def IsDone(): | |
1909 cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0 | |
1910 return '__pc_done=1' in cookies | |
1911 self.assertTrue( | |
1912 self.WaitUntil( | |
1913 IsDone, | |
1914 timeout=(self.MAX_ITERATION_SECONDS * iterations), | |
1915 retry_sleep=1), | |
1916 msg='Timed out waiting for page cycler test to complete.') | |
1917 | |
1918 def CollectPagesAndTimes(self, url): | |
1919 """Collect the results from the cookies.""" | |
1920 pages, times = None, None | |
1921 cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0 | |
1922 for cookie in cookies.split(';'): | |
1923 if '__pc_pages' in cookie: | |
1924 pages_str = cookie.split('=', 1)[1] | |
1925 pages = pages_str.split(',') | |
1926 elif '__pc_timings' in cookie: | |
1927 times_str = cookie.split('=', 1)[1] | |
1928 times = [float(t) for t in times_str.split(',')] | |
1929 self.assertTrue(pages and times, | |
1930 msg='Unable to find test results in cookies: %s' % cookies) | |
1931 return pages, times | |
1932 | |
1933 def IteratePageTimes(self, pages, times, iterations): | |
1934 """Regroup the times by the page. | |
1935 | |
1936 Args: | |
1937 pages: the list of pages | |
1938 times: e.g. [page1_iter1, page2_iter1, ..., page1_iter2, page2_iter2, ...] | |
1939 iterations: the number of times for each page | |
1940 Yields: | |
1941 (pageN, [pageN_iter1, pageN_iter2, ...]) | |
1942 """ | |
1943 num_pages = len(pages) | |
1944 num_times = len(times) | |
1945 expected_num_times = num_pages * iterations | |
1946 self.assertEqual( | |
1947 expected_num_times, num_times, | |
1948 msg=('num_times != num_pages * iterations: %s != %s * %s, times=%s' % | |
1949 (num_times, num_pages, iterations, times))) | |
1950 for i, page in enumerate(pages): | |
1951 yield page, list(itertools.islice(times, i, None, num_pages)) | |
1952 | |
1953 def CheckPageTimes(self, pages, times, iterations): | |
1954 """Assert that all the times are greater than zero.""" | |
1955 failed_pages = [] | |
1956 for page, times in self.IteratePageTimes(pages, times, iterations): | |
1957 failed_times = [t for t in times if t <= 0.0] | |
1958 if failed_times: | |
1959 failed_pages.append((page, failed_times)) | |
1960 if failed_pages: | |
1961 self.fail('Pages with unexpected times: %s' % failed_pages) | |
1962 | |
1963 def TrimTimes(self, times, percent): | |
1964 """Return a new list with |percent| number of times trimmed for each page. | |
1965 | |
1966 Removes the largest and smallest values. | |
1967 """ | |
1968 iterations = len(times) | |
1969 times = sorted(times) | |
1970 num_to_trim = int(iterations * float(percent) / 100.0) | |
1971 logging.debug('Before trimming %d: %s' % (num_to_trim, times)) | |
1972 a = num_to_trim / 2 | |
1973 b = iterations - (num_to_trim / 2 + num_to_trim % 2) | |
1974 trimmed_times = times[a:b] | |
1975 logging.debug('After trimming: %s', trimmed_times) | |
1976 return trimmed_times | |
1977 | |
1978 def ComputeFinalResult(self, pages, times, iterations): | |
1979 """The final score that is calculated is a geometric mean of the | |
1980 arithmetic means of each page's load time, and we drop the | |
1981 upper/lower 20% of the times for each page so they don't skew the | |
1982 mean. The geometric mean is used for the final score because the | |
1983 time range for any given site may be very different, and we don't | |
1984 want slower sites to weight more heavily than others. | |
1985 """ | |
1986 self.CheckPageTimes(pages, times, iterations) | |
1987 page_means = [ | |
1988 Mean(self.TrimTimes(times, percent=self.TRIM_PERCENT)) | |
1989 for _, times in self.IteratePageTimes(pages, times, iterations)] | |
1990 return GeometricMean(page_means) | |
1991 | |
1992 def StartUrl(self, test_name, iterations): | |
1993 """Return the URL to used to start the test. | |
1994 | |
1995 Derived classes must implement this. | |
1996 """ | |
1997 raise NotImplemented | |
1998 | |
1999 def RunPageCyclerTest(self, name, description): | |
2000 """Runs the specified PageCycler test. | |
2001 | |
2002 Args: | |
2003 name: the page cycler test name (corresponds to a directory or test file) | |
2004 description: a string description for the test | |
2005 """ | |
2006 iterations = self._num_iterations | |
2007 start_url = self.StartUrl(name, iterations) | |
2008 self.NavigateToURL(start_url) | |
2009 if self.use_auto: | |
2010 self.WaitUntilStarted(start_url) | |
2011 self.WaitUntilDone(start_url, iterations) | |
2012 pages, times = self.CollectPagesAndTimes(start_url) | |
2013 final_result = self.ComputeFinalResult(pages, times, iterations) | |
2014 logging.info('%s page cycler final result: %f' % | |
2015 (description, final_result)) | |
2016 self._OutputPerfGraphValue(description + '_PageCycler', final_result, | |
2017 'milliseconds', graph_name='PageCycler') | |
2018 | |
2019 | |
2020 class PageCyclerTest(BasePageCyclerTest): | |
2021 """Tests to run various page cyclers. | |
2022 | |
2023 Environment Variables: | |
2024 PC_NO_AUTO: if set, avoids automatically loading pages. | |
2025 """ | |
2026 | |
2027 def _PreReadDataDir(self, subdir): | |
2028 """This recursively reads all of the files in a given url directory. | |
2029 | |
2030 The intent is to get them into memory before they are used by the benchmark. | |
2031 | |
2032 Args: | |
2033 subdir: a subdirectory of the page cycler data directory. | |
2034 """ | |
2035 def _PreReadDir(dirname, names): | |
2036 for rfile in names: | |
2037 with open(os.path.join(dirname, rfile)) as fp: | |
2038 fp.read() | |
2039 for root, dirs, files in os.walk(self.DataPath(subdir)): | |
2040 _PreReadDir(root, files) | |
2041 | |
2042 def StartUrl(self, test_name, iterations): | |
2043 # Must invoke GetFileURLForPath before appending parameters to the URL, | |
2044 # otherwise those parameters will get quoted. | |
2045 start_url = self.GetFileURLForPath(self.DataPath(test_name), 'start.html') | |
2046 start_url += '?iterations=%d' % iterations | |
2047 if self.use_auto: | |
2048 start_url += '&auto=1' | |
2049 return start_url | |
2050 | |
2051 def RunPageCyclerTest(self, dirname, description): | |
2052 """Runs the specified PageCycler test. | |
2053 | |
2054 Args: | |
2055 dirname: directory containing the page cycler test | |
2056 description: a string description for the test | |
2057 """ | |
2058 self._PreReadDataDir('common') | |
2059 self._PreReadDataDir(dirname) | |
2060 super(PageCyclerTest, self).RunPageCyclerTest(dirname, description) | |
2061 | |
2062 def testMoreJSFile(self): | |
2063 self.RunPageCyclerTest('morejs', 'MoreJSFile') | |
2064 | |
2065 def testAlexaFile(self): | |
2066 self.RunPageCyclerTest('alexa_us', 'Alexa_usFile') | |
2067 | |
2068 def testBloatFile(self): | |
2069 self.RunPageCyclerTest('bloat', 'BloatFile') | |
2070 | |
2071 def testDHTMLFile(self): | |
2072 self.RunPageCyclerTest('dhtml', 'DhtmlFile') | |
2073 | |
2074 def testIntl1File(self): | |
2075 self.RunPageCyclerTest('intl1', 'Intl1File') | |
2076 | |
2077 def testIntl2File(self): | |
2078 self.RunPageCyclerTest('intl2', 'Intl2File') | |
2079 | |
2080 def testMozFile(self): | |
2081 self.RunPageCyclerTest('moz', 'MozFile') | |
2082 | |
2083 def testMoz2File(self): | |
2084 self.RunPageCyclerTest('moz2', 'Moz2File') | |
2085 | |
2086 | |
2087 class MemoryTest(BasePerfTest): | |
2088 """Tests to measure memory consumption under different usage scenarios.""" | |
2089 | |
2090 def ExtraChromeFlags(self): | |
2091 """Launches Chrome with custom flags. | |
2092 | |
2093 Returns: | |
2094 A list of extra flags to pass to Chrome when it is launched. | |
2095 """ | |
2096 # Ensure Chrome assigns one renderer process to each tab. | |
2097 return super(MemoryTest, self).ExtraChromeFlags() + ['--process-per-tab'] | |
2098 | |
2099 def _RecordMemoryStats(self, description, when, duration): | |
2100 """Outputs memory statistics to be graphed. | |
2101 | |
2102 Args: | |
2103 description: A string description for the test. Should not contain | |
2104 spaces. For example, 'MemCtrl'. | |
2105 when: A string description of when the memory stats are being recorded | |
2106 during test execution (since memory stats may be recorded multiple | |
2107 times during a test execution at certain "interesting" times). Should | |
2108 not contain spaces. | |
2109 duration: The number of seconds to sample data before outputting the | |
2110 memory statistics. | |
2111 """ | |
2112 mem = self.GetMemoryStatsChromeOS(duration) | |
2113 measurement_types = [ | |
2114 ('gem_obj', 'GemObj'), | |
2115 ('gtt', 'GTT'), | |
2116 ('mem_free', 'MemFree'), | |
2117 ('mem_available', 'MemAvail'), | |
2118 ('mem_shared', 'MemShare'), | |
2119 ('mem_cached', 'MemCache'), | |
2120 ('mem_anon', 'MemAnon'), | |
2121 ('mem_file', 'MemFile'), | |
2122 ('mem_slab', 'MemSlab'), | |
2123 ('browser_priv', 'BrowPriv'), | |
2124 ('browser_shared', 'BrowShar'), | |
2125 ('gpu_priv', 'GpuPriv'), | |
2126 ('gpu_shared', 'GpuShar'), | |
2127 ('renderer_priv', 'RendPriv'), | |
2128 ('renderer_shared', 'RendShar'), | |
2129 ] | |
2130 for type_key, type_string in measurement_types: | |
2131 if type_key not in mem: | |
2132 continue | |
2133 self._OutputPerfGraphValue( | |
2134 '%s-Min%s-%s' % (description, type_string, when), | |
2135 mem[type_key]['min'], 'KB', '%s-%s' % (description, type_string)) | |
2136 self._OutputPerfGraphValue( | |
2137 '%s-Max%s-%s' % (description, type_string, when), | |
2138 mem[type_key]['max'], 'KB', '%s-%s' % (description, type_string)) | |
2139 self._OutputPerfGraphValue( | |
2140 '%s-End%s-%s' % (description, type_string, when), | |
2141 mem[type_key]['end'], 'KB', '%s-%s' % (description, type_string)) | |
2142 | |
2143 def _RunTest(self, tabs, description, duration): | |
2144 """Runs a general memory test. | |
2145 | |
2146 Args: | |
2147 tabs: A list of strings representing the URLs of the websites to open | |
2148 during this test. | |
2149 description: A string description for the test. Should not contain | |
2150 spaces. For example, 'MemCtrl'. | |
2151 duration: The number of seconds to sample data before outputting memory | |
2152 statistics. | |
2153 """ | |
2154 self._RecordMemoryStats(description, '0Tabs0', duration) | |
2155 | |
2156 for iteration_num in xrange(2): | |
2157 for site in tabs: | |
2158 self.AppendTab(pyauto.GURL(site)) | |
2159 | |
2160 self._RecordMemoryStats(description, | |
2161 '%dTabs%d' % (len(tabs), iteration_num + 1), | |
2162 duration) | |
2163 | |
2164 for _ in xrange(len(tabs)): | |
2165 self.CloseTab(tab_index=1) | |
2166 | |
2167 self._RecordMemoryStats(description, '0Tabs%d' % (iteration_num + 1), | |
2168 duration) | |
2169 | |
2170 def testOpenCloseTabsControl(self): | |
2171 """Measures memory usage when opening/closing tabs to about:blank.""" | |
2172 tabs = ['about:blank'] * 10 | |
2173 self._RunTest(tabs, 'MemCtrl', 15) | |
2174 | |
2175 def testOpenCloseTabsLiveSites(self): | |
2176 """Measures memory usage when opening/closing tabs to live sites.""" | |
2177 tabs = [ | |
2178 'http://www.google.com/gmail', | |
2179 'http://www.google.com/calendar', | |
2180 'http://www.google.com/plus', | |
2181 'http://www.google.com/youtube', | |
2182 'http://www.nytimes.com', | |
2183 'http://www.cnn.com', | |
2184 'http://www.facebook.com/zuck', | |
2185 'http://www.techcrunch.com', | |
2186 'http://www.theverge.com', | |
2187 'http://www.yahoo.com', | |
2188 ] | |
2189 # Log in to a test Google account to make connections to the above Google | |
2190 # websites more interesting. | |
2191 self._LoginToGoogleAccount() | |
2192 self._RunTest(tabs, 'MemLive', 20) | |
2193 | |
2194 | |
2195 class PerfTestServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): | |
2196 """Request handler for the local performance test server.""" | |
2197 | |
2198 def _IgnoreHandler(self, unused_args): | |
2199 """A GET request handler that simply replies with status code 200. | |
2200 | |
2201 Args: | |
2202 unused_args: A dictionary of arguments for the current GET request. | |
2203 The arguments are ignored. | |
2204 """ | |
2205 self.send_response(200) | |
2206 self.end_headers() | |
2207 | |
2208 def _CreateFileOfSizeHandler(self, args): | |
2209 """A GET handler that creates a local file with the specified size. | |
2210 | |
2211 Args: | |
2212 args: A dictionary of arguments for the current GET request. Must | |
2213 contain 'filename' and 'mb' keys that refer to the name of the file | |
2214 to create and its desired size, respectively. | |
2215 """ | |
2216 megabytes = None | |
2217 filename = None | |
2218 try: | |
2219 megabytes = int(args['mb'][0]) | |
2220 filename = args['filename'][0] | |
2221 except (ValueError, KeyError, IndexError), e: | |
2222 logging.exception('Server error creating file: %s', e) | |
2223 assert megabytes and filename | |
2224 with open(os.path.join(self.server.docroot, filename), 'wb') as f: | |
2225 f.write('X' * 1024 * 1024 * megabytes) | |
2226 self.send_response(200) | |
2227 self.end_headers() | |
2228 | |
2229 def _DeleteFileHandler(self, args): | |
2230 """A GET handler that deletes the specified local file. | |
2231 | |
2232 Args: | |
2233 args: A dictionary of arguments for the current GET request. Must | |
2234 contain a 'filename' key that refers to the name of the file to | |
2235 delete, relative to the server's document root. | |
2236 """ | |
2237 filename = None | |
2238 try: | |
2239 filename = args['filename'][0] | |
2240 except (KeyError, IndexError), e: | |
2241 logging.exception('Server error deleting file: %s', e) | |
2242 assert filename | |
2243 try: | |
2244 os.remove(os.path.join(self.server.docroot, filename)) | |
2245 except OSError, e: | |
2246 logging.warning('OS error removing file: %s', e) | |
2247 self.send_response(200) | |
2248 self.end_headers() | |
2249 | |
2250 def _StartUploadHandler(self, args): | |
2251 """A GET handler to serve a page that uploads the given amount of data. | |
2252 | |
2253 When the page loads, the specified amount of data is automatically | |
2254 uploaded to the same local server that is handling the current request. | |
2255 | |
2256 Args: | |
2257 args: A dictionary of arguments for the current GET request. Must | |
2258 contain an 'mb' key that refers to the size of the data to upload. | |
2259 """ | |
2260 megabytes = None | |
2261 try: | |
2262 megabytes = int(args['mb'][0]) | |
2263 except (ValueError, KeyError, IndexError), e: | |
2264 logging.exception('Server error starting upload: %s', e) | |
2265 assert megabytes | |
2266 script = """ | |
2267 <html> | |
2268 <head> | |
2269 <script type='text/javascript'> | |
2270 function startUpload() { | |
2271 var megabytes = %s; | |
2272 var data = Array((1024 * 1024 * megabytes) + 1).join('X'); | |
2273 var boundary = '***BOUNDARY***'; | |
2274 var xhr = new XMLHttpRequest(); | |
2275 | |
2276 xhr.open('POST', 'process_upload', true); | |
2277 xhr.setRequestHeader( | |
2278 'Content-Type', | |
2279 'multipart/form-data; boundary="' + boundary + '"'); | |
2280 xhr.setRequestHeader('Content-Length', data.length); | |
2281 xhr.onreadystatechange = function() { | |
2282 if (xhr.readyState == 4 && xhr.status == 200) { | |
2283 document.getElementById('upload_result').innerHTML = | |
2284 xhr.responseText; | |
2285 } | |
2286 }; | |
2287 var body = '--' + boundary + '\\r\\n'; | |
2288 body += 'Content-Disposition: form-data;' + | |
2289 'file_contents=' + data; | |
2290 xhr.send(body); | |
2291 } | |
2292 </script> | |
2293 </head> | |
2294 | |
2295 <body onload="startUpload();"> | |
2296 <div id='upload_result'>Uploading...</div> | |
2297 </body> | |
2298 </html> | |
2299 """ % megabytes | |
2300 self.send_response(200) | |
2301 self.end_headers() | |
2302 self.wfile.write(script) | |
2303 | |
2304 def _ProcessUploadHandler(self, form): | |
2305 """A POST handler that discards uploaded data and sends a response. | |
2306 | |
2307 Args: | |
2308 form: A dictionary containing posted form data, as returned by | |
2309 urlparse.parse_qs(). | |
2310 """ | |
2311 upload_processed = False | |
2312 file_size = 0 | |
2313 if 'file_contents' in form: | |
2314 file_size = len(form['file_contents'][0]) | |
2315 upload_processed = True | |
2316 self.send_response(200) | |
2317 self.end_headers() | |
2318 if upload_processed: | |
2319 self.wfile.write('Upload complete (%d bytes)' % file_size) | |
2320 else: | |
2321 self.wfile.write('No file contents uploaded') | |
2322 | |
2323 GET_REQUEST_HANDLERS = { | |
2324 'create_file_of_size': _CreateFileOfSizeHandler, | |
2325 'delete_file': _DeleteFileHandler, | |
2326 'start_upload': _StartUploadHandler, | |
2327 'favicon.ico': _IgnoreHandler, | |
2328 } | |
2329 | |
2330 POST_REQUEST_HANDLERS = { | |
2331 'process_upload': _ProcessUploadHandler, | |
2332 } | |
2333 | |
2334 def translate_path(self, path): | |
2335 """Ensures files are served from the given document root. | |
2336 | |
2337 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler. | |
2338 """ | |
2339 path = urlparse.urlparse(path)[2] | |
2340 path = posixpath.normpath(urllib.unquote(path)) | |
2341 words = path.split('/') | |
2342 words = filter(None, words) # Remove empty strings from |words|. | |
2343 path = self.server.docroot | |
2344 for word in words: | |
2345 _, word = os.path.splitdrive(word) | |
2346 _, word = os.path.split(word) | |
2347 if word in (os.curdir, os.pardir): | |
2348 continue | |
2349 path = os.path.join(path, word) | |
2350 return path | |
2351 | |
2352 def do_GET(self): | |
2353 """Processes a GET request to the local server. | |
2354 | |
2355 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler. | |
2356 """ | |
2357 split_url = urlparse.urlsplit(self.path) | |
2358 base_path = split_url[2] | |
2359 if base_path.startswith('/'): | |
2360 base_path = base_path[1:] | |
2361 args = urlparse.parse_qs(split_url[3]) | |
2362 if base_path in self.GET_REQUEST_HANDLERS: | |
2363 self.GET_REQUEST_HANDLERS[base_path](self, args) | |
2364 else: | |
2365 SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) | |
2366 | |
2367 def do_POST(self): | |
2368 """Processes a POST request to the local server. | |
2369 | |
2370 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler. | |
2371 """ | |
2372 form = urlparse.parse_qs( | |
2373 self.rfile.read(int(self.headers.getheader('Content-Length')))) | |
2374 path = urlparse.urlparse(self.path)[2] | |
2375 if path.startswith('/'): | |
2376 path = path[1:] | |
2377 if path in self.POST_REQUEST_HANDLERS: | |
2378 self.POST_REQUEST_HANDLERS[path](self, form) | |
2379 else: | |
2380 self.send_response(200) | |
2381 self.send_header('Content-Type', 'text/plain') | |
2382 self.end_headers() | |
2383 self.wfile.write('No handler for POST request "%s".' % path) | |
2384 | |
2385 | |
2386 class ThreadedHTTPServer(SocketServer.ThreadingMixIn, | |
2387 BaseHTTPServer.HTTPServer): | |
2388 def __init__(self, server_address, handler_class): | |
2389 BaseHTTPServer.HTTPServer.__init__(self, server_address, handler_class) | |
2390 | |
2391 | |
2392 class PerfTestServer(object): | |
2393 """Local server for use by performance tests.""" | |
2394 | |
2395 def __init__(self, docroot): | |
2396 """Initializes the performance test server. | |
2397 | |
2398 Args: | |
2399 docroot: The directory from which to serve files. | |
2400 """ | |
2401 # The use of 0 means to start the server on an arbitrary available port. | |
2402 self._server = ThreadedHTTPServer(('', 0), | |
2403 PerfTestServerRequestHandler) | |
2404 self._server.docroot = docroot | |
2405 self._server_thread = threading.Thread(target=self._server.serve_forever) | |
2406 | |
2407 def Run(self): | |
2408 """Starts the server thread.""" | |
2409 self._server_thread.start() | |
2410 | |
2411 def ShutDown(self): | |
2412 """Shuts down the server.""" | |
2413 self._server.shutdown() | |
2414 self._server_thread.join() | |
2415 | |
2416 def GetPort(self): | |
2417 """Identifies the port number to which the server is currently bound. | |
2418 | |
2419 Returns: | |
2420 The numeric port number to which the server is currently bound. | |
2421 """ | |
2422 return self._server.server_address[1] | |
2423 | |
2424 | |
2425 if __name__ == '__main__': | |
2426 pyauto_functional.Main() | |
OLD | NEW |