Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(201)

Side by Side Diff: third_party/WebKit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

Issue 2679393004: Fix all "dangerous-default-value" pylint warnings. (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2012 Google Inc. All rights reserved. 1 # Copyright (C) 2012 Google Inc. All rights reserved.
2 # 2 #
3 # Redistribution and use in source and binary forms, with or without 3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are 4 # modification, are permitted provided that the following conditions are
5 # met: 5 # met:
6 # 6 #
7 # * Redistributions of source code must retain the above copyright 7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer. 8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above 9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer 10 # copyright notice, this list of conditions and the following disclaimer
(...skipping 29 matching lines...) Expand all
40 from webkitpy.layout_tests.port.test import TestPort 40 from webkitpy.layout_tests.port.test import TestPort
41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest 41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT 42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
43 from webkitpy.performance_tests.perftest import PerfTest 43 from webkitpy.performance_tests.perftest import PerfTest
44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner 44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
45 45
46 46
47 class MainTest(unittest.TestCase): 47 class MainTest(unittest.TestCase):
48 48
49 def create_runner(self, args=[]): 49 def create_runner(self, args=[]):
50 args = args or []
50 options, _ = PerfTestsRunner._parse_args(args) 51 options, _ = PerfTestsRunner._parse_args(args)
51 test_port = TestPort(host=MockHost(), options=options) 52 test_port = TestPort(host=MockHost(), options=options)
52 runner = PerfTestsRunner(args=args, port=test_port) 53 runner = PerfTestsRunner(args=args, port=test_port)
53 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or') 54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or')
54 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s') 55 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s')
55 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' ) 56 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' )
56 return runner, test_port 57 return runner, test_port
57 58
58 def _add_file(self, runner, dirname, filename, content=True): 59 def _add_file(self, runner, dirname, filename, content=True):
59 dirname = runner._host.filesystem.join(runner._base_path, dirname) if di rname else runner._base_path 60 dirname = runner._host.filesystem.join(runner._base_path, dirname) if di rname else runner._base_path
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
385 386
386 class IntegrationTest(unittest.TestCase): 387 class IntegrationTest(unittest.TestCase):
387 388
388 def _normalize_output(self, log): 389 def _normalize_output(self, log):
389 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0- 9\.]+ s', 'Finished: 0.1 s', log)) 390 return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0- 9\.]+ s', 'Finished: 0.1 s', log))
390 391
391 def _load_output_json(self, runner): 392 def _load_output_json(self, runner):
392 json_content = runner._host.filesystem.read_text_file(runner._output_jso n_path()) 393 json_content = runner._host.filesystem.read_text_file(runner._output_jso n_path())
393 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont ent)) 394 return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_cont ent))
394 395
395 def create_runner(self, args=[], driver_class=TestDriver): 396 def create_runner(self, args=None, driver_class=TestDriver):
397 args = args or []
396 options, _ = PerfTestsRunner._parse_args(args) 398 options, _ = PerfTestsRunner._parse_args(args)
397 test_port = TestPort(host=MockHost(), options=options) 399 test_port = TestPort(host=MockHost(), options=options)
398 test_port.create_driver = lambda worker_number=None, no_timeout=False: d river_class() 400 test_port.create_driver = lambda worker_number=None, no_timeout=False: d river_class()
399 401
400 runner = PerfTestsRunner(args=args, port=test_port) 402 runner = PerfTestsRunner(args=args, port=test_port)
401 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or') 403 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspect or')
402 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s') 404 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Binding s')
403 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' ) 405 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser' )
404 406
405 return runner, test_port 407 return runner, test_port
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
557 def test_run_with_description(self): 559 def test_run_with_description(self):
558 runner, port = self.create_runner_and_setup_results_template( 560 runner, port = self.create_runner_and_setup_results_template(
559 args=['--output-json-path=/mock-checkout/output.json', 561 args=['--output-json-path=/mock-checkout/output.json',
560 '--test-results-server=some.host', '--description', 'some desc ription']) 562 '--test-results-server=some.host', '--description', 'some desc ription'])
561 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True) 563 self._test_run_with_json_output(runner, port.host.filesystem, upload_suc ceeds=True)
562 self.assertEqual(self._load_output_json(runner), [{ 564 self.assertEqual(self._load_output_json(runner), [{
563 "buildTime": "2013-02-08T15:19:37.460000", "description": "some desc ription", 565 "buildTime": "2013-02-08T15:19:37.460000", "description": "some desc ription",
564 "tests": self._event_target_wrapper_and_inspector_results, 566 "tests": self._event_target_wrapper_and_inspector_results,
565 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}]) 567 "revisions": {"chromium": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
566 568
567 def create_runner_and_setup_results_template(self, args=[]): 569 def create_runner_and_setup_results_template(self, args=None):
570 args = args or []
568 runner, port = self.create_runner(args) 571 runner, port = self.create_runner(args)
569 filesystem = port.host.filesystem 572 filesystem = port.host.filesystem
570 filesystem.write_text_file( 573 filesystem.write_text_file(
571 runner._base_path + '/resources/results-template.html', # pylint: d isable=protected-access 574 runner._base_path + '/resources/results-template.html', # pylint: d isable=protected-access
572 ('BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>' 575 ('BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
573 '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><scrip t>%PeformanceTestsResultsJSON%</script>END')) 576 '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><scrip t>%PeformanceTestsResultsJSON%</script>END'))
574 filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/droma eo/web/lib/jquery-1.6.4.js', 'jquery content') 577 filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/droma eo/web/lib/jquery-1.6.4.js', 'jquery content')
575 return runner, port 578 return runner, port
576 579
577 def test_run_respects_no_results(self): 580 def test_run_respects_no_results(self):
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
795 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo gs=False) 798 self._test_run_with_json_output(runner, port.host.filesystem, compare_lo gs=False)
796 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json']) 799 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/o utput.json'])
797 self.assertTrue(isinstance(generated_json, list)) 800 self.assertTrue(isinstance(generated_json, list))
798 self.assertEqual(len(generated_json), 1) 801 self.assertEqual(len(generated_json), 1)
799 802
800 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w rapper']['metrics']['Time']['current'] 803 output = generated_json[0]['tests']['Bindings']['tests']['event-target-w rapper']['metrics']['Time']['current']
801 self.assertEqual(len(output), 3) 804 self.assertEqual(len(output), 3)
802 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][ 'current'][0] 805 expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time'][ 'current'][0]
803 for metrics in output: 806 for metrics in output:
804 self.assertEqual(metrics, expectedMetrics) 807 self.assertEqual(metrics, expectedMetrics)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698