| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 """Performance tests for Chrome Endure (long-running perf tests on Chrome). | |
| 7 | |
| 8 This module accepts the following environment variable inputs: | |
| 9 TEST_LENGTH: The number of seconds in which to run each test. | |
| 10 PERF_STATS_INTERVAL: The number of seconds to wait in-between each sampling | |
| 11 of performance/memory statistics. | |
| 12 | |
| 13 The following variables are related to the Deep Memory Profiler. | |
| 14 DEEP_MEMORY_PROFILE: Enable the Deep Memory Profiler if it's set to 'True'. | |
| 15 DEEP_MEMORY_PROFILE_SAVE: Don't clean up dump files if it's set to 'True'. | |
| 16 DEEP_MEMORY_PROFILE_UPLOAD: Upload dumped files if the variable has a Google | |
| 17 Storage bucket like gs://chromium-endure/. The 'gsutil' script in $PATH | |
| 18 is used by default, or set a variable 'GSUTIL' to specify a path to the | |
| 19 'gsutil' script. A variable 'REVISION' (or 'BUILDBOT_GOT_REVISION') is | |
| 20 used as a subdirectory in the destination if it is set. | |
| 21 GSUTIL: A path to the 'gsutil' script. Not mandatory. | |
| 22 REVISION: A string that represents the revision or some build configuration. | |
| 23 Not mandatory. | |
| 24 BUILDBOT_GOT_REVISION: Similar to 'REVISION', but checked only if 'REVISION' | |
| 25 is not specified. Not mandatory. | |
| 26 """ | |
| 27 | |
| 28 from datetime import datetime | |
| 29 import json | |
| 30 import logging | |
| 31 import os | |
| 32 import re | |
| 33 import subprocess | |
| 34 import tempfile | |
| 35 import time | |
| 36 | |
| 37 import perf | |
| 38 import pyauto_functional # Must be imported before pyauto. | |
| 39 import pyauto | |
| 40 import pyauto_errors | |
| 41 import pyauto_utils | |
| 42 import remote_inspector_client | |
| 43 import selenium.common.exceptions | |
| 44 from selenium.webdriver.support.ui import WebDriverWait | |
| 45 | |
| 46 | |
| 47 class NotSupportedEnvironmentError(RuntimeError): | |
| 48 """Represent an error raised since the environment (OS) is not supported.""" | |
| 49 pass | |
| 50 | |
| 51 | |
| 52 class DeepMemoryProfiler(object): | |
| 53 """Controls Deep Memory Profiler (dmprof) for endurance tests.""" | |
| 54 DEEP_MEMORY_PROFILE = False | |
| 55 DEEP_MEMORY_PROFILE_SAVE = False | |
| 56 DEEP_MEMORY_PROFILE_UPLOAD = '' | |
| 57 | |
| 58 _WORKDIR_PATTERN = re.compile('endure\.[0-9]+\.[0-9]+\.[A-Za-z0-9]+') | |
| 59 _SAVED_WORKDIRS = 8 | |
| 60 | |
| 61 _DMPROF_DIR_PATH = os.path.abspath(os.path.join( | |
| 62 os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, | |
| 63 'tools', 'deep_memory_profiler')) | |
| 64 _DMPROF_SCRIPT_PATH = os.path.join(_DMPROF_DIR_PATH, 'dmprof') | |
| 65 _POLICIES = ['l0', 'l1', 'l2', 't0'] | |
| 66 | |
| 67 def __init__(self): | |
| 68 self._enabled = self.GetEnvironmentVariable( | |
| 69 'DEEP_MEMORY_PROFILE', bool, self.DEEP_MEMORY_PROFILE) | |
| 70 self._save = self.GetEnvironmentVariable( | |
| 71 'DEEP_MEMORY_PROFILE_SAVE', bool, self.DEEP_MEMORY_PROFILE_SAVE) | |
| 72 self._upload = self.GetEnvironmentVariable( | |
| 73 'DEEP_MEMORY_PROFILE_UPLOAD', str, self.DEEP_MEMORY_PROFILE_UPLOAD) | |
| 74 if self._upload and not self._upload.endswith('/'): | |
| 75 self._upload += '/' | |
| 76 | |
| 77 self._revision = '' | |
| 78 self._gsutil = '' | |
| 79 self._json_file = None | |
| 80 self._last_json_filename = '' | |
| 81 self._proc = None | |
| 82 self._last_time = {} | |
| 83 for policy in self._POLICIES: | |
| 84 self._last_time[policy] = -1.0 | |
| 85 | |
| 86 def __nonzero__(self): | |
| 87 return self._enabled | |
| 88 | |
| 89 @staticmethod | |
| 90 def GetEnvironmentVariable(env_name, converter, default): | |
| 91 """Returns a converted environment variable for Deep Memory Profiler. | |
| 92 | |
| 93 Args: | |
| 94 env_name: A string name of an environment variable. | |
| 95 converter: A function taking a string to convert an environment variable. | |
| 96 default: A value used if the environment variable is not specified. | |
| 97 | |
| 98 Returns: | |
| 99 A value converted from the environment variable with 'converter'. | |
| 100 """ | |
| 101 return converter(os.environ.get(env_name, default)) | |
| 102 | |
| 103 def SetUp(self, is_linux, revision, gsutil): | |
| 104 """Sets up Deep Memory Profiler settings for a Chrome process. | |
| 105 | |
| 106 It sets environment variables and makes a working directory. | |
| 107 """ | |
| 108 if not self._enabled: | |
| 109 return | |
| 110 | |
| 111 if not is_linux: | |
| 112 raise NotSupportedEnvironmentError( | |
| 113 'Deep Memory Profiler is not supported in this environment (OS).') | |
| 114 | |
| 115 self._revision = revision | |
| 116 self._gsutil = gsutil | |
| 117 | |
| 118 # Remove old dumped files with keeping latest _SAVED_WORKDIRS workdirs. | |
| 119 # It keeps the latest workdirs not to miss data by failure in uploading | |
| 120 # and other operations. Dumped files are no longer available if they are | |
| 121 # removed. Re-execution doesn't generate the same files. | |
| 122 tempdir = tempfile.gettempdir() | |
| 123 saved_workdirs = 0 | |
| 124 for filename in sorted(os.listdir(tempdir), reverse=True): | |
| 125 if self._WORKDIR_PATTERN.match(filename): | |
| 126 saved_workdirs += 1 | |
| 127 if saved_workdirs > self._SAVED_WORKDIRS: | |
| 128 fullpath = os.path.abspath(os.path.join(tempdir, filename)) | |
| 129 logging.info('Removing an old workdir: %s' % fullpath) | |
| 130 pyauto_utils.RemovePath(fullpath) | |
| 131 | |
| 132 dir_prefix = 'endure.%s.' % datetime.today().strftime('%Y%m%d.%H%M%S') | |
| 133 self._workdir = tempfile.mkdtemp(prefix=dir_prefix, dir=tempdir) | |
| 134 os.environ['HEAPPROFILE'] = os.path.join(self._workdir, 'endure') | |
| 135 os.environ['HEAP_PROFILE_MMAP'] = '1' | |
| 136 os.environ['DEEP_HEAP_PROFILE'] = '1' | |
| 137 | |
| 138 def TearDown(self): | |
| 139 """Tear down Deep Memory Profiler settings for the Chrome process. | |
| 140 | |
| 141 It removes the environment variables and the temporary directory. | |
| 142 Call it after Chrome finishes. Chrome may dump last files at the end. | |
| 143 """ | |
| 144 if not self._enabled: | |
| 145 return | |
| 146 | |
| 147 del os.environ['DEEP_HEAP_PROFILE'] | |
| 148 del os.environ['HEAP_PROFILE_MMAP'] | |
| 149 del os.environ['HEAPPROFILE'] | |
| 150 if not self._save and self._workdir: | |
| 151 pyauto_utils.RemovePath(self._workdir) | |
| 152 | |
| 153 def LogFirstMessage(self): | |
| 154 """Logs first messages.""" | |
| 155 if not self._enabled: | |
| 156 return | |
| 157 | |
| 158 logging.info('Running with the Deep Memory Profiler.') | |
| 159 if self._save: | |
| 160 logging.info(' Dumped files won\'t be cleaned.') | |
| 161 else: | |
| 162 logging.info(' Dumped files will be cleaned up after every test.') | |
| 163 | |
| 164 def StartProfiler(self, proc_info, is_last, webapp_name, test_description): | |
| 165 """Starts Deep Memory Profiler in background.""" | |
| 166 if not self._enabled: | |
| 167 return | |
| 168 | |
| 169 logging.info(' Profiling with the Deep Memory Profiler...') | |
| 170 | |
| 171 # Wait for a running dmprof process for last _GetPerformanceStat call to | |
| 172 # cover last dump files. | |
| 173 if is_last: | |
| 174 logging.info(' Waiting for the last dmprof.') | |
| 175 self._WaitForDeepMemoryProfiler() | |
| 176 | |
| 177 if self._proc and self._proc.poll() is None: | |
| 178 logging.info(' Last dmprof is still running.') | |
| 179 else: | |
| 180 if self._json_file: | |
| 181 self._last_json_filename = self._json_file.name | |
| 182 self._json_file.close() | |
| 183 self._json_file = None | |
| 184 first_dump = '' | |
| 185 last_dump = '' | |
| 186 for filename in sorted(os.listdir(self._workdir)): | |
| 187 if re.match('^endure.%05d.\d+.heap$' % proc_info['tab_pid'], | |
| 188 filename): | |
| 189 logging.info(' Profiled dump file: %s' % filename) | |
| 190 last_dump = filename | |
| 191 if not first_dump: | |
| 192 first_dump = filename | |
| 193 if first_dump: | |
| 194 logging.info(' First dump file: %s' % first_dump) | |
| 195 matched = re.match('^endure.\d+.(\d+).heap$', last_dump) | |
| 196 last_sequence_id = matched.group(1) | |
| 197 self._json_file = open( | |
| 198 os.path.join(self._workdir, | |
| 199 'endure.%05d.%s.json' % (proc_info['tab_pid'], | |
| 200 last_sequence_id)), 'w+') | |
| 201 self._proc = subprocess.Popen( | |
| 202 '%s json %s' % (self._DMPROF_SCRIPT_PATH, | |
| 203 os.path.join(self._workdir, first_dump)), | |
| 204 shell=True, stdout=self._json_file) | |
| 205 if is_last: | |
| 206 # Wait only when it is the last profiling. dmprof may take long time. | |
| 207 self._WaitForDeepMemoryProfiler() | |
| 208 | |
| 209 # Upload the dumped files. | |
| 210 if first_dump and self._upload and self._gsutil: | |
| 211 if self._revision: | |
| 212 destination_path = '%s%s/' % (self._upload, self._revision) | |
| 213 else: | |
| 214 destination_path = self._upload | |
| 215 destination_path += '%s-%s-%s.zip' % ( | |
| 216 webapp_name, | |
| 217 test_description, | |
| 218 os.path.basename(self._workdir)) | |
| 219 gsutil_command = '%s upload --gsutil %s %s %s' % ( | |
| 220 self._DMPROF_SCRIPT_PATH, | |
| 221 self._gsutil, | |
| 222 os.path.join(self._workdir, first_dump), | |
| 223 destination_path) | |
| 224 logging.info('Uploading: %s' % gsutil_command) | |
| 225 try: | |
| 226 returncode = subprocess.call(gsutil_command, shell=True) | |
| 227 logging.info(' Return code: %d' % returncode) | |
| 228 except OSError, e: | |
| 229 logging.error(' Error while uploading: %s', e) | |
| 230 else: | |
| 231 logging.info('Note that the dumped files are not uploaded.') | |
| 232 else: | |
| 233 logging.info(' No dump files.') | |
| 234 | |
| 235 def ParseResultAndOutputPerfGraphValues( | |
| 236 self, webapp_name, test_description, output_perf_graph_value): | |
| 237 """Parses Deep Memory Profiler result, and outputs perf graph values.""" | |
| 238 if not self._enabled: | |
| 239 return | |
| 240 | |
| 241 results = {} | |
| 242 for policy in self._POLICIES: | |
| 243 if self._last_json_filename: | |
| 244 json_data = {} | |
| 245 with open(self._last_json_filename) as json_f: | |
| 246 json_data = json.load(json_f) | |
| 247 if json_data['version'] == 'JSON_DEEP_1': | |
| 248 results[policy] = json_data['snapshots'] | |
| 249 elif json_data['version'] == 'JSON_DEEP_2': | |
| 250 results[policy] = json_data['policies'][policy]['snapshots'] | |
| 251 for policy, result in results.iteritems(): | |
| 252 if result and result[-1]['second'] > self._last_time[policy]: | |
| 253 started = False | |
| 254 for legend in json_data['policies'][policy]['legends']: | |
| 255 if legend == 'FROM_HERE_FOR_TOTAL': | |
| 256 started = True | |
| 257 elif legend == 'UNTIL_HERE_FOR_TOTAL': | |
| 258 break | |
| 259 elif started: | |
| 260 output_perf_graph_value( | |
| 261 legend.encode('utf-8'), [ | |
| 262 (int(round(snapshot['second'])), snapshot[legend] / 1024) | |
| 263 for snapshot in result | |
| 264 if snapshot['second'] > self._last_time[policy]], | |
| 265 'KB', | |
| 266 graph_name='%s%s-%s-DMP' % ( | |
| 267 webapp_name, test_description, policy), | |
| 268 units_x='seconds', is_stacked=True) | |
| 269 self._last_time[policy] = result[-1]['second'] | |
| 270 | |
| 271 def _WaitForDeepMemoryProfiler(self): | |
| 272 """Waits for the Deep Memory Profiler to finish if running.""" | |
| 273 if not self._enabled or not self._proc: | |
| 274 return | |
| 275 | |
| 276 self._proc.wait() | |
| 277 self._proc = None | |
| 278 if self._json_file: | |
| 279 self._last_json_filename = self._json_file.name | |
| 280 self._json_file.close() | |
| 281 self._json_file = None | |
| 282 | |
| 283 | |
| 284 class ChromeEndureBaseTest(perf.BasePerfTest): | |
| 285 """Implements common functionality for all Chrome Endure tests. | |
| 286 | |
| 287 All Chrome Endure test classes should inherit from this class. | |
| 288 """ | |
| 289 | |
| 290 _DEFAULT_TEST_LENGTH_SEC = 60 * 60 * 6 # Tests run for 6 hours. | |
| 291 _GET_PERF_STATS_INTERVAL = 60 * 5 # Measure perf stats every 5 minutes. | |
| 292 # TODO(dennisjeffrey): Do we still need to tolerate errors? | |
| 293 _ERROR_COUNT_THRESHOLD = 50 # Number of errors to tolerate. | |
| 294 _REVISION = '' | |
| 295 _GSUTIL = 'gsutil' | |
| 296 | |
| 297 def setUp(self): | |
| 298 # The environment variables for the Deep Memory Profiler must be set | |
| 299 # before perf.BasePerfTest.setUp() to inherit them to Chrome. | |
| 300 self._dmprof = DeepMemoryProfiler() | |
| 301 self._revision = str(os.environ.get('REVISION', self._REVISION)) | |
| 302 if not self._revision: | |
| 303 self._revision = str(os.environ.get('BUILDBOT_GOT_REVISION', | |
| 304 self._REVISION)) | |
| 305 self._gsutil = str(os.environ.get('GSUTIL', self._GSUTIL)) | |
| 306 if self._dmprof: | |
| 307 self._dmprof.SetUp(self.IsLinux(), self._revision, self._gsutil) | |
| 308 | |
| 309 perf.BasePerfTest.setUp(self) | |
| 310 | |
| 311 self._test_length_sec = int( | |
| 312 os.environ.get('TEST_LENGTH', self._DEFAULT_TEST_LENGTH_SEC)) | |
| 313 self._get_perf_stats_interval = int( | |
| 314 os.environ.get('PERF_STATS_INTERVAL', self._GET_PERF_STATS_INTERVAL)) | |
| 315 | |
| 316 logging.info('Running test for %d seconds.', self._test_length_sec) | |
| 317 logging.info('Gathering perf stats every %d seconds.', | |
| 318 self._get_perf_stats_interval) | |
| 319 | |
| 320 if self._dmprof: | |
| 321 self._dmprof.LogFirstMessage() | |
| 322 | |
| 323 # Set up a remote inspector client associated with tab 0. | |
| 324 logging.info('Setting up connection to remote inspector...') | |
| 325 self._remote_inspector_client = ( | |
| 326 remote_inspector_client.RemoteInspectorClient()) | |
| 327 logging.info('Connection to remote inspector set up successfully.') | |
| 328 | |
| 329 self._test_start_time = 0 | |
| 330 self._num_errors = 0 | |
| 331 self._events_to_output = [] | |
| 332 | |
| 333 def tearDown(self): | |
| 334 logging.info('Terminating connection to remote inspector...') | |
| 335 self._remote_inspector_client.Stop() | |
| 336 logging.info('Connection to remote inspector terminated.') | |
| 337 | |
| 338 # Must be done at end of this function except for post-cleaning after | |
| 339 # Chrome finishes. | |
| 340 perf.BasePerfTest.tearDown(self) | |
| 341 | |
| 342 # Must be done after perf.BasePerfTest.tearDown() | |
| 343 if self._dmprof: | |
| 344 self._dmprof.TearDown() | |
| 345 | |
| 346 def ExtraChromeFlags(self): | |
| 347 """Ensures Chrome is launched with custom flags. | |
| 348 | |
| 349 Returns: | |
| 350 A list of extra flags to pass to Chrome when it is launched. | |
| 351 """ | |
| 352 # The same with setUp, but need to fetch the environment variable since | |
| 353 # ExtraChromeFlags is called before setUp. | |
| 354 deep_memory_profile = DeepMemoryProfiler.GetEnvironmentVariable( | |
| 355 'DEEP_MEMORY_PROFILE', bool, DeepMemoryProfiler.DEEP_MEMORY_PROFILE) | |
| 356 | |
| 357 # Ensure Chrome enables remote debugging on port 9222. This is required to | |
| 358 # interact with Chrome's remote inspector. | |
| 359 # Also, enable the memory benchmarking V8 extension for heap dumps. | |
| 360 extra_flags = ['--remote-debugging-port=9222', | |
| 361 '--enable-memory-benchmarking'] | |
| 362 if deep_memory_profile: | |
| 363 extra_flags.append('--no-sandbox') | |
| 364 return perf.BasePerfTest.ExtraChromeFlags(self) + extra_flags | |
| 365 | |
| 366 def _OnTimelineEvent(self, event_info): | |
| 367 """Invoked by the Remote Inspector Client when a timeline event occurs. | |
| 368 | |
| 369 Args: | |
| 370 event_info: A dictionary containing raw information associated with a | |
| 371 timeline event received from Chrome's remote inspector. Refer to | |
| 372 chrome/src/third_party/WebKit/Source/WebCore/inspector/Inspector.json | |
| 373 for the format of this dictionary. | |
| 374 """ | |
| 375 elapsed_time = int(round(time.time() - self._test_start_time)) | |
| 376 | |
| 377 if event_info['type'] == 'GCEvent': | |
| 378 self._events_to_output.append({ | |
| 379 'type': 'GarbageCollection', | |
| 380 'time': elapsed_time, | |
| 381 'data': | |
| 382 {'collected_bytes': event_info['data']['usedHeapSizeDelta']}, | |
| 383 }) | |
| 384 | |
| 385 def _RunEndureTest(self, webapp_name, tab_title_substring, test_description, | |
| 386 do_scenario, frame_xpath=''): | |
| 387 """The main test harness function to run a general Chrome Endure test. | |
| 388 | |
| 389 After a test has performed any setup work and has navigated to the proper | |
| 390 starting webpage, this function should be invoked to run the endurance test. | |
| 391 | |
| 392 Args: | |
| 393 webapp_name: A string name for the webapp being tested. Should not | |
| 394 include spaces. For example, 'Gmail', 'Docs', or 'Plus'. | |
| 395 tab_title_substring: A unique substring contained within the title of | |
| 396 the tab to use, for identifying the appropriate tab. | |
| 397 test_description: A string description of what the test does, used for | |
| 398 outputting results to be graphed. Should not contain spaces. For | |
| 399 example, 'ComposeDiscard' for Gmail. | |
| 400 do_scenario: A callable to be invoked that implements the scenario to be | |
| 401 performed by this test. The callable is invoked iteratively for the | |
| 402 duration of the test. | |
| 403 frame_xpath: The string xpath of the frame in which to inject javascript | |
| 404 to clear chromedriver's cache (a temporary workaround until the | |
| 405 WebDriver team changes how they handle their DOM node cache). | |
| 406 """ | |
| 407 self._num_errors = 0 | |
| 408 self._test_start_time = time.time() | |
| 409 last_perf_stats_time = time.time() | |
| 410 if self._dmprof: | |
| 411 self.HeapProfilerDump('renderer', 'Chrome Endure (first)') | |
| 412 self._GetPerformanceStats( | |
| 413 webapp_name, test_description, tab_title_substring) | |
| 414 self._iteration_num = 0 # Available to |do_scenario| if needed. | |
| 415 | |
| 416 self._remote_inspector_client.StartTimelineEventMonitoring( | |
| 417 self._OnTimelineEvent) | |
| 418 | |
| 419 while time.time() - self._test_start_time < self._test_length_sec: | |
| 420 self._iteration_num += 1 | |
| 421 | |
| 422 if self._num_errors >= self._ERROR_COUNT_THRESHOLD: | |
| 423 logging.error('Error count threshold (%d) reached. Terminating test ' | |
| 424 'early.' % self._ERROR_COUNT_THRESHOLD) | |
| 425 break | |
| 426 | |
| 427 if time.time() - last_perf_stats_time >= self._get_perf_stats_interval: | |
| 428 last_perf_stats_time = time.time() | |
| 429 if self._dmprof: | |
| 430 self.HeapProfilerDump('renderer', 'Chrome Endure') | |
| 431 self._GetPerformanceStats( | |
| 432 webapp_name, test_description, tab_title_substring) | |
| 433 | |
| 434 if self._iteration_num % 10 == 0: | |
| 435 remaining_time = self._test_length_sec - (time.time() - | |
| 436 self._test_start_time) | |
| 437 logging.info('Chrome interaction #%d. Time remaining in test: %d sec.' % | |
| 438 (self._iteration_num, remaining_time)) | |
| 439 | |
| 440 do_scenario() | |
| 441 # Clear ChromeDriver's DOM node cache so its growth doesn't affect the | |
| 442 # results of Chrome Endure. | |
| 443 # TODO(dennisjeffrey): Once the WebDriver team implements changes to | |
| 444 # handle their DOM node cache differently, we need to revisit this. It | |
| 445 # may no longer be necessary at that point to forcefully delete the cache. | |
| 446 # Additionally, the Javascript below relies on an internal property of | |
| 447 # WebDriver that may change at any time. This is only a temporary | |
| 448 # workaround to stabilize the Chrome Endure test results. | |
| 449 js = """ | |
| 450 (function() { | |
| 451 delete document.$wdc_; | |
| 452 window.domAutomationController.send('done'); | |
| 453 })(); | |
| 454 """ | |
| 455 try: | |
| 456 self.ExecuteJavascript(js, frame_xpath=frame_xpath) | |
| 457 except pyauto_errors.AutomationCommandTimeout: | |
| 458 self._num_errors += 1 | |
| 459 logging.warning('Logging an automation timeout: delete chromedriver ' | |
| 460 'cache.') | |
| 461 | |
| 462 self._remote_inspector_client.StopTimelineEventMonitoring() | |
| 463 | |
| 464 if self._dmprof: | |
| 465 self.HeapProfilerDump('renderer', 'Chrome Endure (last)') | |
| 466 self._GetPerformanceStats( | |
| 467 webapp_name, test_description, tab_title_substring, is_last=True) | |
| 468 | |
| 469 def _GetProcessInfo(self, tab_title_substring): | |
| 470 """Gets process info associated with an open browser/tab. | |
| 471 | |
| 472 Args: | |
| 473 tab_title_substring: A unique substring contained within the title of | |
| 474 the tab to use; needed for locating the tab info. | |
| 475 | |
| 476 Returns: | |
| 477 A dictionary containing information about the browser and specified tab | |
| 478 process: | |
| 479 { | |
| 480 'browser_private_mem': integer, # Private memory associated with the | |
| 481 # browser process, in KB. | |
| 482 'tab_private_mem': integer, # Private memory associated with the tab | |
| 483 # process, in KB. | |
| 484 'tab_pid': integer, # Process ID of the tab process. | |
| 485 } | |
| 486 """ | |
| 487 browser_process_name = ( | |
| 488 self.GetBrowserInfo()['properties']['BrowserProcessExecutableName']) | |
| 489 info = self.GetProcessInfo() | |
| 490 | |
| 491 # Get the information associated with the browser process. | |
| 492 browser_proc_info = [] | |
| 493 for browser_info in info['browsers']: | |
| 494 if browser_info['process_name'] == browser_process_name: | |
| 495 for proc_info in browser_info['processes']: | |
| 496 if proc_info['child_process_type'] == 'Browser': | |
| 497 browser_proc_info.append(proc_info) | |
| 498 self.assertEqual(len(browser_proc_info), 1, | |
| 499 msg='Expected to find 1 Chrome browser process, but found ' | |
| 500 '%d instead.\nCurrent process info:\n%s.' % ( | |
| 501 len(browser_proc_info), self.pformat(info))) | |
| 502 | |
| 503 # Get the process information associated with the specified tab. | |
| 504 tab_proc_info = [] | |
| 505 for browser_info in info['browsers']: | |
| 506 for proc_info in browser_info['processes']: | |
| 507 if (proc_info['child_process_type'] == 'Tab' and | |
| 508 [x for x in proc_info['titles'] if tab_title_substring in x]): | |
| 509 tab_proc_info.append(proc_info) | |
| 510 self.assertEqual(len(tab_proc_info), 1, | |
| 511 msg='Expected to find 1 %s tab process, but found %d ' | |
| 512 'instead.\nCurrent process info:\n%s.' % ( | |
| 513 tab_title_substring, len(tab_proc_info), | |
| 514 self.pformat(info))) | |
| 515 | |
| 516 browser_proc_info = browser_proc_info[0] | |
| 517 tab_proc_info = tab_proc_info[0] | |
| 518 return { | |
| 519 'browser_private_mem': browser_proc_info['working_set_mem']['priv'], | |
| 520 'tab_private_mem': tab_proc_info['working_set_mem']['priv'], | |
| 521 'tab_pid': tab_proc_info['pid'], | |
| 522 } | |
| 523 | |
| 524 def _GetPerformanceStats(self, webapp_name, test_description, | |
| 525 tab_title_substring, is_last=False): | |
| 526 """Gets performance statistics and outputs the results. | |
| 527 | |
| 528 Args: | |
| 529 webapp_name: A string name for the webapp being tested. Should not | |
| 530 include spaces. For example, 'Gmail', 'Docs', or 'Plus'. | |
| 531 test_description: A string description of what the test does, used for | |
| 532 outputting results to be graphed. Should not contain spaces. For | |
| 533 example, 'ComposeDiscard' for Gmail. | |
| 534 tab_title_substring: A unique substring contained within the title of | |
| 535 the tab to use, for identifying the appropriate tab. | |
| 536 is_last: A boolean value which should be True if it's the last call of | |
| 537 _GetPerformanceStats. The default is False. | |
| 538 """ | |
| 539 logging.info('Gathering performance stats...') | |
| 540 elapsed_time = int(round(time.time() - self._test_start_time)) | |
| 541 | |
| 542 memory_counts = self._remote_inspector_client.GetMemoryObjectCounts() | |
| 543 proc_info = self._GetProcessInfo(tab_title_substring) | |
| 544 | |
| 545 if self._dmprof: | |
| 546 self._dmprof.StartProfiler( | |
| 547 proc_info, is_last, webapp_name, test_description) | |
| 548 | |
| 549 # DOM node count. | |
| 550 dom_node_count = memory_counts['DOMNodeCount'] | |
| 551 self._OutputPerfGraphValue( | |
| 552 'TotalDOMNodeCount', [(elapsed_time, dom_node_count)], 'nodes', | |
| 553 graph_name='%s%s-Nodes-DOM' % (webapp_name, test_description), | |
| 554 units_x='seconds') | |
| 555 | |
| 556 # Event listener count. | |
| 557 event_listener_count = memory_counts['EventListenerCount'] | |
| 558 self._OutputPerfGraphValue( | |
| 559 'EventListenerCount', [(elapsed_time, event_listener_count)], | |
| 560 'listeners', | |
| 561 graph_name='%s%s-EventListeners' % (webapp_name, test_description), | |
| 562 units_x='seconds') | |
| 563 | |
| 564 # Browser process private memory. | |
| 565 self._OutputPerfGraphValue( | |
| 566 'BrowserPrivateMemory', | |
| 567 [(elapsed_time, proc_info['browser_private_mem'])], 'KB', | |
| 568 graph_name='%s%s-BrowserMem-Private' % (webapp_name, test_description), | |
| 569 units_x='seconds') | |
| 570 | |
| 571 # Tab process private memory. | |
| 572 self._OutputPerfGraphValue( | |
| 573 'TabPrivateMemory', | |
| 574 [(elapsed_time, proc_info['tab_private_mem'])], 'KB', | |
| 575 graph_name='%s%s-TabMem-Private' % (webapp_name, test_description), | |
| 576 units_x='seconds') | |
| 577 | |
| 578 # V8 memory used. | |
| 579 v8_info = self.GetV8HeapStats() # First window, first tab. | |
| 580 v8_mem_used = v8_info['v8_memory_used'] / 1024.0 # Convert to KB. | |
| 581 self._OutputPerfGraphValue( | |
| 582 'V8MemoryUsed', [(elapsed_time, v8_mem_used)], 'KB', | |
| 583 graph_name='%s%s-V8MemUsed' % (webapp_name, test_description), | |
| 584 units_x='seconds') | |
| 585 | |
| 586 # V8 memory allocated. | |
| 587 v8_mem_allocated = v8_info['v8_memory_allocated'] / 1024.0 # Convert to KB. | |
| 588 self._OutputPerfGraphValue( | |
| 589 'V8MemoryAllocated', [(elapsed_time, v8_mem_allocated)], 'KB', | |
| 590 graph_name='%s%s-V8MemAllocated' % (webapp_name, test_description), | |
| 591 units_x='seconds') | |
| 592 | |
| 593 if self._dmprof: | |
| 594 self._dmprof.ParseResultAndOutputPerfGraphValues( | |
| 595 webapp_name, test_description, self._OutputPerfGraphValue) | |
| 596 | |
| 597 logging.info(' Total DOM node count: %d nodes' % dom_node_count) | |
| 598 logging.info(' Event listener count: %d listeners' % event_listener_count) | |
| 599 logging.info(' Browser process private memory: %d KB' % | |
| 600 proc_info['browser_private_mem']) | |
| 601 logging.info(' Tab process private memory: %d KB' % | |
| 602 proc_info['tab_private_mem']) | |
| 603 logging.info(' V8 memory used: %f KB' % v8_mem_used) | |
| 604 logging.info(' V8 memory allocated: %f KB' % v8_mem_allocated) | |
| 605 | |
| 606 # Output any new timeline events that have occurred. | |
| 607 if self._events_to_output: | |
| 608 logging.info('Logging timeline events...') | |
| 609 event_type_to_value_list = {} | |
| 610 for event_info in self._events_to_output: | |
| 611 if not event_info['type'] in event_type_to_value_list: | |
| 612 event_type_to_value_list[event_info['type']] = [] | |
| 613 event_type_to_value_list[event_info['type']].append( | |
| 614 (event_info['time'], event_info['data'])) | |
| 615 for event_type, value_list in event_type_to_value_list.iteritems(): | |
| 616 self._OutputEventGraphValue(event_type, value_list) | |
| 617 self._events_to_output = [] | |
| 618 else: | |
| 619 logging.info('No new timeline events to log.') | |
| 620 | |
| 621 def _GetElement(self, find_by, value): | |
| 622 """Gets a WebDriver element object from the webpage DOM. | |
| 623 | |
| 624 Args: | |
| 625 find_by: A callable that queries WebDriver for an element from the DOM. | |
| 626 value: A string value that can be passed to the |find_by| callable. | |
| 627 | |
| 628 Returns: | |
| 629 The identified WebDriver element object, if found in the DOM, or | |
| 630 None, otherwise. | |
| 631 """ | |
| 632 try: | |
| 633 return find_by(value) | |
| 634 except selenium.common.exceptions.NoSuchElementException: | |
| 635 return None | |
| 636 | |
| 637 def _ClickElementByXpath(self, driver, xpath): | |
| 638 """Given the xpath for a DOM element, clicks on it using WebDriver. | |
| 639 | |
| 640 Args: | |
| 641 driver: A WebDriver object, as returned by self.NewWebDriver(). | |
| 642 xpath: The string xpath associated with the DOM element to click. | |
| 643 | |
| 644 Returns: | |
| 645 True, if the DOM element was found and clicked successfully, or | |
| 646 False, otherwise. | |
| 647 """ | |
| 648 try: | |
| 649 self.WaitForDomNode(xpath) | |
| 650 except (pyauto_errors.JSONInterfaceError, | |
| 651 pyauto_errors.JavascriptRuntimeError) as e: | |
| 652 logging.exception('PyAuto exception: %s' % e) | |
| 653 return False | |
| 654 | |
| 655 try: | |
| 656 element = self._GetElement(driver.find_element_by_xpath, xpath) | |
| 657 element.click() | |
| 658 except (selenium.common.exceptions.StaleElementReferenceException, | |
| 659 selenium.common.exceptions.TimeoutException) as e: | |
| 660 logging.exception('WebDriver exception: %s' % e) | |
| 661 return False | |
| 662 | |
| 663 return True | |
| 664 | |
| 665 | |
| 666 class ChromeEndureControlTest(ChromeEndureBaseTest): | |
| 667 """Control tests for Chrome Endure.""" | |
| 668 | |
| 669 _WEBAPP_NAME = 'Control' | |
| 670 _TAB_TITLE_SUBSTRING = 'Chrome Endure Control Test' | |
| 671 | |
| 672 def testControlAttachDetachDOMTree(self): | |
| 673 """Continually attach and detach a DOM tree from a basic document.""" | |
| 674 test_description = 'AttachDetachDOMTree' | |
| 675 url = self.GetHttpURLForDataPath('chrome_endure', 'endurance_control.html') | |
| 676 self.NavigateToURL(url) | |
| 677 loaded_tab_title = self.GetActiveTabTitle() | |
| 678 self.assertTrue(self._TAB_TITLE_SUBSTRING in loaded_tab_title, | |
| 679 msg='Loaded tab title does not contain "%s": "%s"' % | |
| 680 (self._TAB_TITLE_SUBSTRING, loaded_tab_title)) | |
| 681 | |
| 682 def scenario(): | |
| 683 # Just sleep. Javascript in the webpage itself does the work. | |
| 684 time.sleep(5) | |
| 685 | |
| 686 self._RunEndureTest(self._WEBAPP_NAME, self._TAB_TITLE_SUBSTRING, | |
| 687 test_description, scenario) | |
| 688 | |
| 689 def testControlAttachDetachDOMTreeWebDriver(self): | |
| 690 """Use WebDriver to attach and detach a DOM tree from a basic document.""" | |
| 691 test_description = 'AttachDetachDOMTreeWebDriver' | |
| 692 url = self.GetHttpURLForDataPath('chrome_endure', | |
| 693 'endurance_control_webdriver.html') | |
| 694 self.NavigateToURL(url) | |
| 695 loaded_tab_title = self.GetActiveTabTitle() | |
| 696 self.assertTrue(self._TAB_TITLE_SUBSTRING in loaded_tab_title, | |
| 697 msg='Loaded tab title does not contain "%s": "%s"' % | |
| 698 (self._TAB_TITLE_SUBSTRING, loaded_tab_title)) | |
| 699 | |
| 700 driver = self.NewWebDriver() | |
| 701 | |
| 702 def scenario(driver): | |
| 703 # Click the "attach" button to attach a large DOM tree (with event | |
| 704 # listeners) to the document, wait half a second, click "detach" to detach | |
| 705 # the DOM tree from the document, wait half a second. | |
| 706 self._ClickElementByXpath(driver, 'id("attach")') | |
| 707 time.sleep(0.5) | |
| 708 self._ClickElementByXpath(driver, 'id("detach")') | |
| 709 time.sleep(0.5) | |
| 710 | |
| 711 self._RunEndureTest(self._WEBAPP_NAME, self._TAB_TITLE_SUBSTRING, | |
| 712 test_description, lambda: scenario(driver)) | |
| 713 | |
| 714 | |
| 715 class IndexedDBOfflineTest(ChromeEndureBaseTest): | |
| 716 """Long-running performance tests for IndexedDB, modeling offline usage.""" | |
| 717 | |
| 718 _WEBAPP_NAME = 'IndexedDBOffline' | |
| 719 _TAB_TITLE_SUBSTRING = 'IndexedDB Offline' | |
| 720 | |
| 721 def setUp(self): | |
| 722 ChromeEndureBaseTest.setUp(self) | |
| 723 | |
| 724 url = self.GetHttpURLForDataPath('indexeddb', 'endure', 'app.html') | |
| 725 self.NavigateToURL(url) | |
| 726 loaded_tab_title = self.GetActiveTabTitle() | |
| 727 self.assertTrue(self._TAB_TITLE_SUBSTRING in loaded_tab_title, | |
| 728 msg='Loaded tab title does not contain "%s": "%s"' % | |
| 729 (self._TAB_TITLE_SUBSTRING, loaded_tab_title)) | |
| 730 | |
| 731 self._driver = self.NewWebDriver() | |
| 732 | |
| 733 def testOfflineOnline(self): | |
| 734 """Simulates user input while offline and sync while online. | |
| 735 | |
| 736 This test alternates between a simulated "Offline" state (where user | |
| 737 input events are queued) and an "Online" state (where user input events | |
| 738 are dequeued, sync data is staged, and sync data is unstaged). | |
| 739 """ | |
| 740 test_description = 'OnlineOfflineSync' | |
| 741 | |
| 742 def scenario(): | |
| 743 # Click the "Online" button and let simulated sync run for 1 second. | |
| 744 if not self._ClickElementByXpath(self._driver, 'id("online")'): | |
| 745 self._num_errors += 1 | |
| 746 logging.warning('Logging an automation error: click "online" button.') | |
| 747 | |
| 748 try: | |
| 749 self.WaitForDomNode('id("state")[text()="online"]') | |
| 750 except (pyauto_errors.JSONInterfaceError, | |
| 751 pyauto_errors.JavascriptRuntimeError): | |
| 752 self._num_errors += 1 | |
| 753 logging.warning('Logging an automation error: wait for "online".') | |
| 754 | |
| 755 time.sleep(1) | |
| 756 | |
| 757 # Click the "Offline" button and let user input occur for 1 second. | |
| 758 if not self._ClickElementByXpath(self._driver, 'id("offline")'): | |
| 759 self._num_errors += 1 | |
| 760 logging.warning('Logging an automation error: click "offline" button.') | |
| 761 | |
| 762 try: | |
| 763 self.WaitForDomNode('id("state")[text()="offline"]') | |
| 764 except (pyauto_errors.JSONInterfaceError, | |
| 765 pyauto_errors.JavascriptRuntimeError): | |
| 766 self._num_errors += 1 | |
| 767 logging.warning('Logging an automation error: wait for "offline".') | |
| 768 | |
| 769 time.sleep(1) | |
| 770 | |
| 771 self._RunEndureTest(self._WEBAPP_NAME, self._TAB_TITLE_SUBSTRING, | |
| 772 test_description, scenario) | |
| 773 | |
| 774 | |
| 775 if __name__ == '__main__': | |
| 776 pyauto_functional.Main() | |
| OLD | NEW |