| OLD | NEW |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import json | 5 import json |
| 6 import os | 6 import os |
| 7 import subprocess | 7 import subprocess |
| 8 import sys | 8 import sys |
| 9 import tempfile | 9 import tempfile |
| 10 import unittest | 10 import unittest |
| 11 | 11 |
| 12 | 12 |
| 13 class ScriptsSmokeTest(unittest.TestCase): | 13 class ScriptsSmokeTest(unittest.TestCase): |
| 14 | 14 |
| 15 perf_dir = os.path.dirname(__file__) | 15 perf_dir = os.path.dirname(__file__) |
| 16 | 16 |
| 17 def RunPerfScript(self, command): | 17 def RunPerfScript(self, command): |
| 18 args = [sys.executable] + command.split(' ') | 18 args = [sys.executable] + command.split(' ') |
| 19 proc = subprocess.Popen(args, stdout=subprocess.PIPE, | 19 proc = subprocess.Popen(args, stdout=subprocess.PIPE, |
| 20 stderr=subprocess.STDOUT, cwd=self.perf_dir) | 20 stderr=subprocess.STDOUT, cwd=self.perf_dir) |
| 21 stdout = proc.communicate()[0] | 21 stdout = proc.communicate()[0] |
| 22 return_code = proc.returncode | 22 return_code = proc.returncode |
| 23 return return_code, stdout | 23 return return_code, stdout |
| 24 | 24 |
| 25 def testRunBenchmarkHelp(self): | 25 def testRunBenchmarkHelp(self): |
| 26 return_code, stdout = self.RunPerfScript('run_benchmark help') | 26 return_code, stdout = self.RunPerfScript('run_benchmark help') |
| 27 self.assertEquals(return_code, 0, stdout) |
| 27 self.assertIn('Available commands are', stdout) | 28 self.assertIn('Available commands are', stdout) |
| 28 self.assertEquals(return_code, 0) | |
| 29 | 29 |
| 30 def testRunBenchmarkRunListsOutBenchmarks(self): | 30 def testRunBenchmarkRunListsOutBenchmarks(self): |
| 31 return_code, stdout = self.RunPerfScript('run_benchmark run') | 31 return_code, stdout = self.RunPerfScript('run_benchmark run') |
| 32 self.assertIn('Pass --browser to list benchmarks', stdout) | 32 self.assertIn('Pass --browser to list benchmarks', stdout) |
| 33 self.assertNotEquals(return_code, 0) | 33 self.assertNotEquals(return_code, 0) |
| 34 | 34 |
| 35 def testRunBenchmarkRunNonExistingBenchmark(self): | 35 def testRunBenchmarkRunNonExistingBenchmark(self): |
| 36 return_code, stdout = self.RunPerfScript('run_benchmark foo') | 36 return_code, stdout = self.RunPerfScript('run_benchmark foo') |
| 37 self.assertIn('No benchmark named "foo"', stdout) | 37 self.assertIn('No benchmark named "foo"', stdout) |
| 38 self.assertNotEquals(return_code, 0) | 38 self.assertNotEquals(return_code, 0) |
| 39 | 39 |
| 40 def testRunBenchmarkListListsOutBenchmarks(self): | 40 def testRunBenchmarkListListsOutBenchmarks(self): |
| 41 return_code, stdout = self.RunPerfScript('run_benchmark list') | 41 return_code, stdout = self.RunPerfScript('run_benchmark list') |
| 42 self.assertEquals(return_code, 0, stdout) |
| 42 self.assertIn('Pass --browser to list benchmarks', stdout) | 43 self.assertIn('Pass --browser to list benchmarks', stdout) |
| 43 self.assertIn('dummy_benchmark.stable_benchmark_1', stdout) | 44 self.assertIn('dummy_benchmark.stable_benchmark_1', stdout) |
| 44 self.assertEquals(return_code, 0) | |
| 45 | 45 |
| 46 def testRunRecordWprHelp(self): | 46 def testRunRecordWprHelp(self): |
| 47 return_code, stdout = self.RunPerfScript('record_wpr') | 47 return_code, stdout = self.RunPerfScript('record_wpr') |
| 48 self.assertEquals(return_code, 0, stdout) |
| 48 self.assertIn('optional arguments:', stdout) | 49 self.assertIn('optional arguments:', stdout) |
| 49 self.assertEquals(return_code, 0) | 50 |
| 51 def testRunRecordWprList(self): |
| 52 return_code, stdout = self.RunPerfScript('record_wpr --list-benchmarks') |
| 53 # TODO(nednguyen): Remove this once we figure out why importing |
| 54 # small_profile_extender fails on Android dbg. |
| 55 # crbug.com/561668 |
| 56 if 'ImportError: cannot import name small_profile_extender' in stdout: |
| 57 self.skipTest('small_profile_extender is missing') |
| 58 self.assertEquals(return_code, 0, stdout) |
| 59 self.assertIn('kraken', stdout) |
| 50 | 60 |
| 51 def testRunBenchmarkListJSONListsOutBenchmarks(self): | 61 def testRunBenchmarkListJSONListsOutBenchmarks(self): |
| 52 tmp_file = tempfile.NamedTemporaryFile(delete=False) | 62 tmp_file = tempfile.NamedTemporaryFile(delete=False) |
| 53 tmp_file_name = tmp_file.name | 63 tmp_file_name = tmp_file.name |
| 54 tmp_file.close() | 64 tmp_file.close() |
| 55 try: | 65 try: |
| 56 return_code, _ = self.RunPerfScript( | 66 return_code, _ = self.RunPerfScript( |
| 57 'run_benchmark list --json-output %s' % tmp_file_name) | 67 'run_benchmark list --json-output %s' % tmp_file_name) |
| 58 self.assertEquals(return_code, 0) | 68 self.assertEquals(return_code, 0) |
| 59 with open(tmp_file_name, 'r') as f: | 69 with open(tmp_file_name, 'r') as f: |
| 60 benchmark_data = json.load(f) | 70 benchmark_data = json.load(f) |
| 61 self.assertIn('dummy_benchmark.stable_benchmark_1', | 71 self.assertIn('dummy_benchmark.stable_benchmark_1', |
| 62 benchmark_data['steps']) | 72 benchmark_data['steps']) |
| 63 finally: | 73 finally: |
| 64 os.remove(tmp_file_name) | 74 os.remove(tmp_file_name) |
| OLD | NEW |