Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(121)

Side by Side Diff: third_party/grpc/tools/run_tests/run_tests.py

Issue 1932353002: Initial checkin of gRPC to third_party/ Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 #!/usr/bin/env python2.7
2 # Copyright 2015-2016, Google Inc.
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Run tests in parallel."""
32
33 import argparse
34 import ast
35 import glob
36 import hashlib
37 import itertools
38 import json
39 import multiprocessing
40 import os
41 import platform
42 import random
43 import re
44 import socket
45 import subprocess
46 import sys
47 import tempfile
48 import traceback
49 import time
50 import urllib2
51 import uuid
52
53 import jobset
54 import report_utils
55 import watch_dirs
56
57
58 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59 os.chdir(_ROOT)
60
61
62 _FORCE_ENVIRON_FOR_WRAPPERS = {}
63
64
65 def platform_string():
66 return jobset.platform_string()
67
68
69 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
70 class Config(object):
71
72 def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[]) :
73 if environ is None:
74 environ = {}
75 self.build_config = config
76 self.allow_hashing = (config != 'gcov')
77 self.environ = environ
78 self.environ['CONFIG'] = config
79 self.tool_prefix = tool_prefix
80 self.timeout_multiplier = timeout_multiplier
81
82 def job_spec(self, cmdline, hash_targets, timeout_seconds=5*60,
83 shortname=None, environ={}, cpu_cost=1.0):
84 """Construct a jobset.JobSpec for a test under this config
85
86 Args:
87 cmdline: a list of strings specifying the command line the test
88 would like to run
89 hash_targets: either None (don't do caching of test results), or
90 a list of strings specifying files to include in a
91 binary hash to check if a test has changed
92 -- if used, all artifacts needed to run the test must
93 be listed
94 """
95 actual_environ = self.environ.copy()
96 for k, v in environ.iteritems():
97 actual_environ[k] = v
98 return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
99 shortname=shortname,
100 environ=actual_environ,
101 cpu_cost=cpu_cost,
102 timeout_seconds=(self.timeout_multiplier * timeout_sec onds if timeout_seconds else None),
103 hash_targets=hash_targets
104 if self.allow_hashing else None,
105 flake_retries=5 if args.allow_flakes else 0,
106 timeout_retries=3 if args.allow_flakes else 0)
107
108
109 def get_c_tests(travis, test_lang) :
110 out = []
111 platforms_str = 'ci_platforms' if travis else 'platforms'
112 with open('tools/run_tests/tests.json') as f:
113 js = json.load(f)
114 return [tgt
115 for tgt in js
116 if tgt['language'] == test_lang and
117 platform_string() in tgt[platforms_str] and
118 not (travis and tgt['flaky'])]
119
120
121 def _check_compiler(compiler, supported_compilers):
122 if compiler not in supported_compilers:
123 raise Exception('Compiler %s not supported.' % compiler)
124
125
126 def _is_use_docker_child():
127 """Returns True if running running as a --use_docker child."""
128 return True if os.getenv('RUN_TESTS_COMMAND') else False
129
130
131 class CLanguage(object):
132
133 def __init__(self, make_target, test_lang):
134 self.make_target = make_target
135 self.platform = platform_string()
136 self.test_lang = test_lang
137
138 def configure(self, config, args):
139 self.config = config
140 self.args = args
141 if self.platform == 'windows':
142 self._make_options = [_windows_toolset_option(self.args.compiler),
143 _windows_arch_option(self.args.arch)]
144 else:
145 self._make_options = []
146 self._docker_distro = self._get_docker_distro(self.args.use_docker,
147 self.args.compiler)
148
149 def test_specs(self):
150 out = []
151 binaries = get_c_tests(self.args.travis, self.test_lang)
152 for target in binaries:
153 if self.config.build_config in target['exclude_configs']:
154 continue
155 if self.platform == 'windows':
156 binary = 'vsprojects/%s%s/%s.exe' % (
157 'x64/' if self.args.arch == 'x64' else '',
158 _MSBUILD_CONFIG[self.config.build_config],
159 target['name'])
160 else:
161 binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
162 if os.path.isfile(binary):
163 if 'gtest' in target and target['gtest']:
164 # here we parse the output of --gtest_list_tests to build up a
165 # complete list of the tests contained in a binary
166 # for each test, we then add a job to run, filtering for just that
167 # test
168 with open(os.devnull, 'w') as fnull:
169 tests = subprocess.check_output([binary, '--gtest_list_tests'],
170 stderr=fnull)
171 base = None
172 for line in tests.split('\n'):
173 i = line.find('#')
174 if i >= 0: line = line[:i]
175 if not line: continue
176 if line[0] != ' ':
177 base = line.strip()
178 else:
179 assert base is not None
180 assert line[1] == ' '
181 test = base + line.strip()
182 cmdline = [binary] + ['--gtest_filter=%s' % test]
183 out.append(self.config.job_spec(cmdline, [binary],
184 shortname='%s:%s' % (binary, test) ,
185 cpu_cost=target['cpu_cost'],
186 environ={'GRPC_DEFAULT_SSL_ROOTS_F ILE_PATH':
187 _ROOT + '/src/core/tsi/te st_creds/ca.pem'}))
188 else:
189 cmdline = [binary] + target['args']
190 out.append(self.config.job_spec(cmdline, [binary],
191 shortname=' '.join(cmdline),
192 cpu_cost=target['cpu_cost'],
193 environ={'GRPC_DEFAULT_SSL_ROOTS_FILE_ PATH':
194 _ROOT + '/src/core/tsi/test_c reds/ca.pem'}))
195 elif self.args.regex == '.*' or self.platform == 'windows':
196 print '\nWARNING: binary not found, skipping', binary
197 return sorted(out)
198
199 def make_targets(self):
200 test_regex = self.args.regex
201 if self.platform != 'windows' and self.args.regex != '.*':
202 # use the regex to minimize the number of things to build
203 return [os.path.basename(target['name'])
204 for target in get_c_tests(False, self.test_lang)
205 if re.search(test_regex, '/' + target['name'])]
206 if self.platform == 'windows':
207 # don't build tools on windows just yet
208 return ['buildtests_%s' % self.make_target]
209 return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
210
211 def make_options(self):
212 return self._make_options;
213
214 def pre_build_steps(self):
215 if self.platform == 'windows':
216 return [['tools\\run_tests\\pre_build_c.bat']]
217 else:
218 return []
219
220 def build_steps(self):
221 return []
222
223 def post_tests_steps(self):
224 if self.platform == 'windows':
225 return []
226 else:
227 return [['tools/run_tests/post_tests_c.sh']]
228
229 def makefile_name(self):
230 return 'Makefile'
231
232 def _get_docker_distro(self, use_docker, compiler):
233 if _is_use_docker_child():
234 return "already_under_docker"
235 if not use_docker:
236 _check_compiler(compiler, ['default'])
237
238 if compiler == 'gcc4.9' or compiler == 'default':
239 return 'jessie'
240 elif compiler == 'gcc4.4':
241 return 'squeeze'
242 elif compiler == 'gcc5.3':
243 return 'ubuntu1604'
244 else:
245 raise Exception('Compiler %s not supported.' % compiler)
246
247 def dockerfile_dir(self):
248 return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
249 _docker_arch_suffix(self.args.ar ch))
250
251 def __str__(self):
252 return self.make_target
253
254
255 class NodeLanguage(object):
256
257 def __init__(self):
258 self.platform = platform_string()
259 self.node_version = '0.12'
260
261 def configure(self, config, args):
262 self.config = config
263 self.args = args
264 _check_compiler(self.args.compiler, ['default'])
265
266 def test_specs(self):
267 if self.platform == 'windows':
268 return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)]
269 else:
270 return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_ver sion],
271 None,
272 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
273
274 def pre_build_steps(self):
275 if self.platform == 'windows':
276 return [['tools\\run_tests\\pre_build_node.bat']]
277 else:
278 return [['tools/run_tests/pre_build_node.sh', self.node_version]]
279
280 def make_targets(self):
281 return []
282
283 def make_options(self):
284 return []
285
286 def build_steps(self):
287 if self.platform == 'windows':
288 return [['tools\\run_tests\\build_node.bat']]
289 else:
290 return [['tools/run_tests/build_node.sh', self.node_version]]
291
292 def post_tests_steps(self):
293 return []
294
295 def makefile_name(self):
296 return 'Makefile'
297
298 def dockerfile_dir(self):
299 return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.arg s.arch)
300
301 def __str__(self):
302 return 'node'
303
304
305 class PhpLanguage(object):
306
307 def configure(self, config, args):
308 self.config = config
309 self.args = args
310 _check_compiler(self.args.compiler, ['default'])
311
312 def test_specs(self):
313 return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
314 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
315
316 def pre_build_steps(self):
317 return []
318
319 def make_targets(self):
320 return ['static_c', 'shared_c']
321
322 def make_options(self):
323 return []
324
325 def build_steps(self):
326 return [['tools/run_tests/build_php.sh']]
327
328 def post_tests_steps(self):
329 return [['tools/run_tests/post_tests_php.sh']]
330
331 def makefile_name(self):
332 return 'Makefile'
333
334 def dockerfile_dir(self):
335 return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args .arch)
336
337 def __str__(self):
338 return 'php'
339
340
341 class PythonLanguage(object):
342
343 def __init__(self):
344 self._build_python_versions = ['2.7']
345 self._has_python_versions = []
346
347 def configure(self, config, args):
348 self.config = config
349 self.args = args
350 _check_compiler(self.args.compiler, ['default'])
351
352 def test_specs(self):
353 # load list of known test suites
354 with open('src/python/grpcio/tests/tests.json') as tests_json_file:
355 tests_json = json.load(tests_json_file)
356 environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
357 environment['PYVER'] = '2.7'
358 if self.config.build_config != 'gcov':
359 return [self.config.job_spec(
360 ['tools/run_tests/run_python.sh'],
361 None,
362 environ=dict(environment.items() +
363 [('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
364 shortname='py.test.%s' % suite_name,
365 timeout_seconds=5*60)
366 for suite_name in tests_json]
367 else:
368 return [self.config.job_spec(['tools/run_tests/run_python.sh'],
369 None,
370 environ=environment,
371 shortname='py.test.coverage',
372 timeout_seconds=15*60)]
373
374
375 def pre_build_steps(self):
376 return []
377
378 def make_targets(self):
379 return ['static_c', 'grpc_python_plugin', 'shared_c']
380
381 def make_options(self):
382 return []
383
384 def build_steps(self):
385 commands = []
386 for python_version in self._build_python_versions:
387 try:
388 with open(os.devnull, 'w') as output:
389 subprocess.check_call(['which', 'python' + python_version],
390 stdout=output, stderr=output)
391 commands.append(['tools/run_tests/build_python.sh', python_version])
392 self._has_python_versions.append(python_version)
393 except:
394 jobset.message('WARNING', 'Missing Python ' + python_version,
395 do_newline=True)
396 return commands
397
398 def post_tests_steps(self):
399 return []
400
401 def makefile_name(self):
402 return 'Makefile'
403
404 def dockerfile_dir(self):
405 return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.a rgs.arch)
406
407 def __str__(self):
408 return 'python'
409
410
411 class RubyLanguage(object):
412
413 def configure(self, config, args):
414 self.config = config
415 self.args = args
416 _check_compiler(self.args.compiler, ['default'])
417
418 def test_specs(self):
419 return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], None,
420 timeout_seconds=10*60,
421 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
422
423 def pre_build_steps(self):
424 return [['tools/run_tests/pre_build_ruby.sh']]
425
426 def make_targets(self):
427 return []
428
429 def make_options(self):
430 return []
431
432 def build_steps(self):
433 return [['tools/run_tests/build_ruby.sh']]
434
435 def post_tests_steps(self):
436 return [['tools/run_tests/post_tests_ruby.sh']]
437
438 def makefile_name(self):
439 return 'Makefile'
440
441 def dockerfile_dir(self):
442 return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.arg s.arch)
443
444 def __str__(self):
445 return 'ruby'
446
447
448 class CSharpLanguage(object):
449
450 def __init__(self):
451 self.platform = platform_string()
452
453 def configure(self, config, args):
454 self.config = config
455 self.args = args
456 _check_compiler(self.args.compiler, ['default'])
457
458 def test_specs(self):
459 with open('src/csharp/tests.json') as f:
460 tests_json = json.load(f)
461 assemblies = tests_json['assemblies']
462 tests = tests_json['tests']
463
464 msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
465 assembly_files = ['%s/bin/%s/%s.dll' % (a, msbuild_config, a)
466 for a in assemblies]
467
468 extra_args = ['-labels'] + assembly_files
469
470 if self.platform == 'windows':
471 script_name = 'tools\\run_tests\\run_csharp.bat'
472 extra_args += ['-domain=None']
473 else:
474 script_name = 'tools/run_tests/run_csharp.sh'
475
476 if self.config.build_config == 'gcov':
477 # On Windows, we only collect C# code coverage.
478 # On Linux, we only collect coverage for native extension.
479 # For code coverage all tests need to run as one suite.
480 return [self.config.job_spec([script_name] + extra_args, None,
481 shortname='csharp.coverage',
482 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
483 else:
484 specs = []
485 for test in tests:
486 cmdline = [script_name, '-run=%s' % test] + extra_args
487 if self.platform == 'windows':
488 # use different output directory for each test to prevent
489 # TestResult.xml clash between parallel test runs.
490 cmdline += ['-work=test-result/%s' % uuid.uuid4()]
491 specs.append(self.config.job_spec(cmdline, None,
492 shortname='csharp.%s' % test,
493 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
494 return specs
495
496 def pre_build_steps(self):
497 if self.platform == 'windows':
498 return [['tools\\run_tests\\pre_build_csharp.bat']]
499 else:
500 return [['tools/run_tests/pre_build_csharp.sh']]
501
502 def make_targets(self):
503 # For Windows, this target doesn't really build anything,
504 # everything is build by buildall script later.
505 if self.platform == 'windows':
506 return []
507 else:
508 return ['grpc_csharp_ext']
509
510 def make_options(self):
511 if self.platform == 'mac':
512 # On Mac, official distribution of mono is 32bit.
513 return ['CFLAGS=-arch i386', 'LDFLAGS=-arch i386']
514 else:
515 return []
516
517 def build_steps(self):
518 if self.platform == 'windows':
519 return [['src\\csharp\\buildall.bat']]
520 else:
521 return [['tools/run_tests/build_csharp.sh']]
522
523 def post_tests_steps(self):
524 return []
525
526 def makefile_name(self):
527 return 'Makefile'
528
529 def dockerfile_dir(self):
530 return 'tools/dockerfile/test/csharp_jessie_%s' % _docker_arch_suffix(self.a rgs.arch)
531
532 def __str__(self):
533 return 'csharp'
534
535
536 class ObjCLanguage(object):
537
538 def configure(self, config, args):
539 self.config = config
540 self.args = args
541 _check_compiler(self.args.compiler, ['default'])
542
543 def test_specs(self):
544 return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'], None,
545 environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
546
547 def pre_build_steps(self):
548 return []
549
550 def make_targets(self):
551 return ['grpc_objective_c_plugin', 'interop_server']
552
553 def make_options(self):
554 return []
555
556 def build_steps(self):
557 return [['src/objective-c/tests/build_tests.sh']]
558
559 def post_tests_steps(self):
560 return []
561
562 def makefile_name(self):
563 return 'Makefile'
564
565 def dockerfile_dir(self):
566 return None
567
568 def __str__(self):
569 return 'objc'
570
571
572 class Sanity(object):
573
574 def configure(self, config, args):
575 self.config = config
576 self.args = args
577 _check_compiler(self.args.compiler, ['default'])
578
579 def test_specs(self):
580 import yaml
581 with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
582 return [self.config.job_spec(cmd['script'].split(), None,
583 timeout_seconds=None, environ={'TEST': 'true' },
584 cpu_cost=cmd.get('cpu_cost', 1))
585 for cmd in yaml.load(f)]
586
587 def pre_build_steps(self):
588 return []
589
590 def make_targets(self):
591 return ['run_dep_checks']
592
593 def make_options(self):
594 return []
595
596 def build_steps(self):
597 return []
598
599 def post_tests_steps(self):
600 return []
601
602 def makefile_name(self):
603 return 'Makefile'
604
605 def dockerfile_dir(self):
606 return 'tools/dockerfile/test/sanity'
607
608 def __str__(self):
609 return 'sanity'
610
611
612 # different configurations we can run under
613 with open('tools/run_tests/configs.json') as f:
614 _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.r ead()))
615
616
617 _LANGUAGES = {
618 'c++': CLanguage('cxx', 'c++'),
619 'c': CLanguage('c', 'c'),
620 'node': NodeLanguage(),
621 'php': PhpLanguage(),
622 'python': PythonLanguage(),
623 'ruby': RubyLanguage(),
624 'csharp': CSharpLanguage(),
625 'objc' : ObjCLanguage(),
626 'sanity': Sanity()
627 }
628
629
630 _MSBUILD_CONFIG = {
631 'dbg': 'Debug',
632 'opt': 'Release',
633 'gcov': 'Debug',
634 }
635
636
637 def _windows_arch_option(arch):
638 """Returns msbuild cmdline option for selected architecture."""
639 if arch == 'default' or arch == 'x86':
640 return '/p:Platform=Win32'
641 elif arch == 'x64':
642 return '/p:Platform=x64'
643 else:
644 print 'Architecture %s not supported.' % arch
645 sys.exit(1)
646
647
648 def _check_arch_option(arch):
649 """Checks that architecture option is valid."""
650 if platform_string() == 'windows':
651 _windows_arch_option(arch)
652 elif platform_string() == 'linux':
653 # On linux, we need to be running under docker with the right architecture.
654 runtime_arch = platform.architecture()[0]
655 if arch == 'default':
656 return
657 elif runtime_arch == '64bit' and arch == 'x64':
658 return
659 elif runtime_arch == '32bit' and arch == 'x86':
660 return
661 else:
662 print 'Architecture %s does not match current runtime architecture.' % arc h
663 sys.exit(1)
664 else:
665 if args.arch != 'default':
666 print 'Architecture %s not supported on current platform.' % args.arch
667 sys.exit(1)
668
669
670 def _windows_build_bat(compiler):
671 """Returns name of build.bat for selected compiler."""
672 if compiler == 'default' or compiler == 'vs2013':
673 return 'vsprojects\\build_vs2013.bat'
674 elif compiler == 'vs2015':
675 return 'vsprojects\\build_vs2015.bat'
676 elif compiler == 'vs2010':
677 return 'vsprojects\\build_vs2010.bat'
678 else:
679 print 'Compiler %s not supported.' % compiler
680 sys.exit(1)
681
682
683 def _windows_toolset_option(compiler):
684 """Returns msbuild PlatformToolset for selected compiler."""
685 if compiler == 'default' or compiler == 'vs2013':
686 return '/p:PlatformToolset=v120'
687 elif compiler == 'vs2015':
688 return '/p:PlatformToolset=v140'
689 elif compiler == 'vs2010':
690 return '/p:PlatformToolset=v100'
691 else:
692 print 'Compiler %s not supported.' % compiler
693 sys.exit(1)
694
695
696 def _docker_arch_suffix(arch):
697 """Returns suffix to dockerfile dir to use."""
698 if arch == 'default' or arch == 'x64':
699 return 'x64'
700 elif arch == 'x86':
701 return 'x86'
702 else:
703 print 'Architecture %s not supported with current settings.' % arch
704 sys.exit(1)
705
706
707 def runs_per_test_type(arg_str):
708 """Auxilary function to parse the "runs_per_test" flag.
709
710 Returns:
711 A positive integer or 0, the latter indicating an infinite number of
712 runs.
713
714 Raises:
715 argparse.ArgumentTypeError: Upon invalid input.
716 """
717 if arg_str == 'inf':
718 return 0
719 try:
720 n = int(arg_str)
721 if n <= 0: raise ValueError
722 return n
723 except:
724 msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
725 raise argparse.ArgumentTypeError(msg)
726
727 # parse command line
728 argp = argparse.ArgumentParser(description='Run grpc tests.')
729 argp.add_argument('-c', '--config',
730 choices=sorted(_CONFIGS.keys()),
731 default='opt')
732 argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
733 help='A positive integer or "inf". If "inf", all tests will run in an '
734 'infinite loop. Especially useful in combination with "-f"')
735 argp.add_argument('-r', '--regex', default='.*', type=str)
736 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
737 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
738 argp.add_argument('-f', '--forever',
739 default=False,
740 action='store_const',
741 const=True)
742 argp.add_argument('-t', '--travis',
743 default=False,
744 action='store_const',
745 const=True)
746 argp.add_argument('--newline_on_success',
747 default=False,
748 action='store_const',
749 const=True)
750 argp.add_argument('-l', '--language',
751 choices=['all'] + sorted(_LANGUAGES.keys()),
752 nargs='+',
753 default=['all'])
754 argp.add_argument('-S', '--stop_on_failure',
755 default=False,
756 action='store_const',
757 const=True)
758 argp.add_argument('--use_docker',
759 default=False,
760 action='store_const',
761 const=True,
762 help='Run all the tests under docker. That provides ' +
763 'additional isolation and prevents the need to install ' +
764 'language specific prerequisites. Only available on Linux.')
765 argp.add_argument('--allow_flakes',
766 default=False,
767 action='store_const',
768 const=True,
769 help='Allow flaky tests to show as passing (re-runs failed tes ts up to five times)')
770 argp.add_argument('--arch',
771 choices=['default', 'x86', 'x64'],
772 default='default',
773 help='Selects architecture to target. For some platforms "defa ult" is the only supported choice.')
774 argp.add_argument('--compiler',
775 choices=['default',
776 'gcc4.4', 'gcc4.9', 'gcc5.3',
777 'vs2010', 'vs2013', 'vs2015'],
778 default='default',
779 help='Selects compiler to use. Allowed values depend on the pl atform and language.')
780 argp.add_argument('--build_only',
781 default=False,
782 action='store_const',
783 const=True,
784 help='Perform all the build steps but dont run any tests.')
785 argp.add_argument('--measure_cpu_costs', default=False, action='store_const', co nst=True,
786 help='Measure the cpu costs of tests')
787 argp.add_argument('--update_submodules', default=[], nargs='*',
788 help='Update some submodules before building. If any are updat ed, also run generate_projects. ' +
789 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BR ANCH is omitted, master is assumed.')
790 argp.add_argument('-a', '--antagonists', default=0, type=int)
791 argp.add_argument('-x', '--xml_report', default=None, type=str,
792 help='Generates a JUnit-compatible XML report')
793 args = argp.parse_args()
794
795 jobset.measure_cpu_costs = args.measure_cpu_costs
796
797 # update submodules if necessary
798 need_to_regenerate_projects = False
799 for spec in args.update_submodules:
800 spec = spec.split(':', 1)
801 if len(spec) == 1:
802 submodule = spec[0]
803 branch = 'master'
804 elif len(spec) == 2:
805 submodule = spec[0]
806 branch = spec[1]
807 cwd = 'third_party/%s' % submodule
808 def git(cmd, cwd=cwd):
809 print 'in %s: git %s' % (cwd, cmd)
810 subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
811 git('fetch')
812 git('checkout %s' % branch)
813 git('pull origin %s' % branch)
814 if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
815 need_to_regenerate_projects = True
816 if need_to_regenerate_projects:
817 if jobset.platform_string() == 'linux':
818 subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
819 else:
820 print 'WARNING: may need to regenerate projects, but since we are not on'
821 print ' Linux this step is being skipped. Compilation MAY fail.'
822
823
824 # grab config
825 run_config = _CONFIGS[args.config]
826 build_config = run_config.build_config
827
828 if args.travis:
829 _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
830
831 if 'all' in args.language:
832 lang_list = _LANGUAGES.keys()
833 else:
834 lang_list = args.language
835 # We don't support code coverage on some languages
836 if 'gcov' in args.config:
837 for bad in ['objc', 'sanity']:
838 if bad in lang_list:
839 lang_list.remove(bad)
840
841 languages = set(_LANGUAGES[l] for l in lang_list)
842 for l in languages:
843 l.configure(run_config, args)
844
845 language_make_options=[]
846 if any(language.make_options() for language in languages):
847 if len(languages) != 1:
848 print 'languages with custom make options cannot be built simultaneously wit h other languages'
849 sys.exit(1)
850 else:
851 language_make_options = next(iter(languages)).make_options()
852
853 if args.use_docker:
854 if not args.travis:
855 print 'Seen --use_docker flag, will run tests under docker.'
856 print
857 print 'IMPORTANT: The changes you are testing need to be locally committed'
858 print 'because only the committed changes in the current branch will be'
859 print 'copied to the docker environment.'
860 time.sleep(5)
861
862 dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
863 if len(dockerfile_dirs) > 1:
864 print 'Languages to be tested require running under different docker images. '
865 sys.exit(1)
866 dockerfile_dir = next(iter(dockerfile_dirs))
867
868 child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
869 run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv [1:])
870
871 env = os.environ.copy()
872 env['RUN_TESTS_COMMAND'] = run_tests_cmd
873 env['DOCKERFILE_DIR'] = dockerfile_dir
874 env['DOCKER_RUN_SCRIPT'] = 'tools/jenkins/docker_run_tests.sh'
875 if args.xml_report:
876 env['XML_REPORT'] = args.xml_report
877 if not args.travis:
878 env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
879
880 subprocess.check_call(['tools/jenkins/build_docker_and_run_tests.sh'],
881 shell=True,
882 env=env)
883 sys.exit(0)
884
885 _check_arch_option(args.arch)
886
887 def make_jobspec(cfg, targets, makefile='Makefile'):
888 if platform_string() == 'windows':
889 extra_args = []
890 # better do parallel compilation
891 # empirically /m:2 gives the best performance/price and should prevent
892 # overloading the windows workers.
893 extra_args.extend(['/m:2'])
894 # disable PDB generation: it's broken, and we don't need it during CI
895 extra_args.extend(['/p:Jenkins=true'])
896 return [
897 jobset.JobSpec([_windows_build_bat(args.compiler),
898 'vsprojects\\%s.sln' % target,
899 '/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
900 extra_args +
901 language_make_options,
902 shell=True, timeout_seconds=None)
903 for target in targets]
904 else:
905 if targets:
906 return [jobset.JobSpec([os.getenv('MAKE', 'make'),
907 '-f', makefile,
908 '-j', '%d' % args.jobs,
909 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=% f' % args.slowdown,
910 'CONFIG=%s' % cfg] +
911 language_make_options +
912 ([] if not args.travis else ['JENKINS_BUILD=1']) +
913 targets,
914 timeout_seconds=None)]
915 else:
916 return []
917
918 make_targets = {}
919 for l in languages:
920 makefile = l.makefile_name()
921 make_targets[makefile] = make_targets.get(makefile, set()).union(
922 set(l.make_targets()))
923
924 def build_step_environ(cfg):
925 environ = {'CONFIG': cfg}
926 msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
927 if msbuild_cfg:
928 environ['MSBUILD_CONFIG'] = msbuild_cfg
929 return environ
930
931 build_steps = list(set(
932 jobset.JobSpec(cmdline, environ=build_step_environ(build_conf ig), flake_retries=5)
933 for l in languages
934 for cmdline in l.pre_build_steps()))
935 if make_targets:
936 make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list( targets), makefile) for (makefile, targets) in make_targets.iteritems())
937 build_steps.extend(set(make_commands))
938 build_steps.extend(set(
939 jobset.JobSpec(cmdline, environ=build_step_environ(build_conf ig), timeout_seconds=None)
940 for l in languages
941 for cmdline in l.build_steps()))
942
943 post_tests_steps = list(set(
944 jobset.JobSpec(cmdline, environ=build_step_environ(build _config))
945 for l in languages
946 for cmdline in l.post_tests_steps()))
947 runs_per_test = args.runs_per_test
948 forever = args.forever
949
950
951 class TestCache(object):
952 """Cache for running tests."""
953
954 def __init__(self, use_cache_results):
955 self._last_successful_run = {}
956 self._use_cache_results = use_cache_results
957 self._last_save = time.time()
958
959 def should_run(self, cmdline, bin_hash):
960 if cmdline not in self._last_successful_run:
961 return True
962 if self._last_successful_run[cmdline] != bin_hash:
963 return True
964 if not self._use_cache_results:
965 return True
966 return False
967
968 def finished(self, cmdline, bin_hash):
969 self._last_successful_run[cmdline] = bin_hash
970 if time.time() - self._last_save > 1:
971 self.save()
972
973 def dump(self):
974 return [{'cmdline': k, 'hash': v}
975 for k, v in self._last_successful_run.iteritems()]
976
977 def parse(self, exdump):
978 self._last_successful_run = dict((o['cmdline'], o['hash']) for o in exdump)
979
980 def save(self):
981 with open('.run_tests_cache', 'w') as f:
982 f.write(json.dumps(self.dump()))
983 self._last_save = time.time()
984
985 def maybe_load(self):
986 if os.path.exists('.run_tests_cache'):
987 with open('.run_tests_cache') as f:
988 self.parse(json.loads(f.read()))
989
990
991 def _start_port_server(port_server_port):
992 # check if a compatible port server is running
993 # if incompatible (version mismatch) ==> start a new one
994 # if not running ==> start a new one
995 # otherwise, leave it up
996 try:
997 version = int(urllib2.urlopen(
998 'http://localhost:%d/version_number' % port_server_port,
999 timeout=1).read())
1000 print 'detected port server running version %d' % version
1001 running = True
1002 except Exception as e:
1003 print 'failed to detect port server: %s' % sys.exc_info()[0]
1004 print e.strerror
1005 running = False
1006 if running:
1007 current_version = int(subprocess.check_output(
1008 [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
1009 'dump_version']))
1010 print 'my port server is version %d' % current_version
1011 running = (version >= current_version)
1012 if not running:
1013 print 'port_server version mismatch: killing the old one'
1014 urllib2.urlopen('http://localhost:%d/quitquitquit' % port_server_port).rea d()
1015 time.sleep(1)
1016 if not running:
1017 fd, logfile = tempfile.mkstemp()
1018 os.close(fd)
1019 print 'starting port_server, with log file %s' % logfile
1020 args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
1021 '-p', '%d' % port_server_port, '-l', logfile]
1022 env = dict(os.environ)
1023 env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
1024 if platform_string() == 'windows':
1025 # Working directory of port server needs to be outside of Jenkins
1026 # workspace to prevent file lock issues.
1027 tempdir = tempfile.mkdtemp()
1028 port_server = subprocess.Popen(
1029 args,
1030 env=env,
1031 cwd=tempdir,
1032 creationflags = 0x00000008, # detached process
1033 close_fds=True)
1034 else:
1035 port_server = subprocess.Popen(
1036 args,
1037 env=env,
1038 preexec_fn=os.setsid,
1039 close_fds=True)
1040 time.sleep(1)
1041 # ensure port server is up
1042 waits = 0
1043 while True:
1044 if waits > 10:
1045 print 'killing port server due to excessive start up waits'
1046 port_server.kill()
1047 if port_server.poll() is not None:
1048 print 'port_server failed to start'
1049 # try one final time: maybe another build managed to start one
1050 time.sleep(1)
1051 try:
1052 urllib2.urlopen('http://localhost:%d/get' % port_server_port,
1053 timeout=1).read()
1054 print 'last ditch attempt to contact port server succeeded'
1055 break
1056 except:
1057 traceback.print_exc()
1058 port_log = open(logfile, 'r').read()
1059 print port_log
1060 sys.exit(1)
1061 try:
1062 urllib2.urlopen('http://localhost:%d/get' % port_server_port,
1063 timeout=1).read()
1064 print 'port server is up and ready'
1065 break
1066 except socket.timeout:
1067 print 'waiting for port_server: timeout'
1068 traceback.print_exc();
1069 time.sleep(1)
1070 waits += 1
1071 except urllib2.URLError:
1072 print 'waiting for port_server: urlerror'
1073 traceback.print_exc();
1074 time.sleep(1)
1075 waits += 1
1076 except:
1077 traceback.print_exc()
1078 port_server.kill()
1079 raise
1080
1081
1082 def _calculate_num_runs_failures(list_of_results):
1083 """Caculate number of runs and failures for a particular test.
1084
1085 Args:
1086 list_of_results: (List) of JobResult object.
1087 Returns:
1088 A tuple of total number of runs and failures.
1089 """
1090 num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
1091 num_failures = 0
1092 for jobresult in list_of_results:
1093 if jobresult.retries > 0:
1094 num_runs += jobresult.retries
1095 if jobresult.num_failures > 0:
1096 num_failures += jobresult.num_failures
1097 return num_runs, num_failures
1098
1099
1100 # _build_and_run results
1101 class BuildAndRunError(object):
1102
1103 BUILD = object()
1104 TEST = object()
1105 POST_TEST = object()
1106
1107
1108 # returns a list of things that failed (or an empty list on success)
1109 def _build_and_run(
1110 check_cancelled, newline_on_success, cache, xml_report=None, build_only=Fals e):
1111 """Do one pass of building & running tests."""
1112 # build latest sequentially
1113 num_failures, resultset = jobset.run(
1114 build_steps, maxjobs=1, stop_on_failure=True,
1115 newline_on_success=newline_on_success, travis=args.travis)
1116 if num_failures:
1117 return [BuildAndRunError.BUILD]
1118
1119 if build_only:
1120 if xml_report:
1121 report_utils.render_junit_xml_report(resultset, xml_report)
1122 return []
1123
1124 # start antagonists
1125 antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
1126 for _ in range(0, args.antagonists)]
1127 port_server_port = 32767
1128 _start_port_server(port_server_port)
1129 resultset = None
1130 num_test_failures = 0
1131 try:
1132 infinite_runs = runs_per_test == 0
1133 one_run = set(
1134 spec
1135 for language in languages
1136 for spec in language.test_specs()
1137 if re.search(args.regex, spec.shortname))
1138 # When running on travis, we want out test runs to be as similar as possible
1139 # for reproducibility purposes.
1140 if args.travis:
1141 massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
1142 else:
1143 # whereas otherwise, we want to shuffle things up to give all tests a
1144 # chance to run.
1145 massaged_one_run = list(one_run) # random.shuffle needs an indexable seq.
1146 random.shuffle(massaged_one_run) # which it modifies in-place.
1147 if infinite_runs:
1148 assert len(massaged_one_run) > 0, 'Must have at least one test for a -n in f run'
1149 runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1150 else itertools.repeat(massaged_one_run, runs_per_test))
1151 all_runs = itertools.chain.from_iterable(runs_sequence)
1152
1153 num_test_failures, resultset = jobset.run(
1154 all_runs, check_cancelled, newline_on_success=newline_on_success,
1155 travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
1156 stop_on_failure=args.stop_on_failure,
1157 cache=cache if not xml_report else None,
1158 add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
1159 if resultset:
1160 for k, v in resultset.iteritems():
1161 num_runs, num_failures = _calculate_num_runs_failures(v)
1162 if num_failures == num_runs: # what about infinite_runs???
1163 jobset.message('FAILED', k, do_newline=True)
1164 elif num_failures > 0:
1165 jobset.message(
1166 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
1167 do_newline=True)
1168 else:
1169 jobset.message('PASSED', k, do_newline=True)
1170 finally:
1171 for antagonist in antagonists:
1172 antagonist.kill()
1173 if xml_report and resultset:
1174 report_utils.render_junit_xml_report(resultset, xml_report)
1175
1176 number_failures, _ = jobset.run(
1177 post_tests_steps, maxjobs=1, stop_on_failure=True,
1178 newline_on_success=newline_on_success, travis=args.travis)
1179
1180 out = []
1181 if number_failures:
1182 out.append(BuildAndRunError.POST_TEST)
1183 if num_test_failures:
1184 out.append(BuildAndRunError.TEST)
1185
1186 if cache: cache.save()
1187
1188 return out
1189
1190
1191 test_cache = TestCache(runs_per_test == 1)
1192 test_cache.maybe_load()
1193
1194 if forever:
1195 success = True
1196 while True:
1197 dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1198 initial_time = dw.most_recent_change()
1199 have_files_changed = lambda: dw.most_recent_change() != initial_time
1200 previous_success = success
1201 errors = _build_and_run(check_cancelled=have_files_changed,
1202 newline_on_success=False,
1203 cache=test_cache,
1204 build_only=args.build_only) == 0
1205 if not previous_success and not errors:
1206 jobset.message('SUCCESS',
1207 'All tests are now passing properly',
1208 do_newline=True)
1209 jobset.message('IDLE', 'No change detected')
1210 while not have_files_changed():
1211 time.sleep(1)
1212 else:
1213 errors = _build_and_run(check_cancelled=lambda: False,
1214 newline_on_success=args.newline_on_success,
1215 cache=test_cache,
1216 xml_report=args.xml_report,
1217 build_only=args.build_only)
1218 if not errors:
1219 jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1220 else:
1221 jobset.message('FAILED', 'Some tests failed', do_newline=True)
1222 exit_code = 0
1223 if BuildAndRunError.BUILD in errors:
1224 exit_code |= 1
1225 if BuildAndRunError.TEST in errors and not args.travis:
1226 exit_code |= 2
1227 if BuildAndRunError.POST_TEST in errors:
1228 exit_code |= 4
1229 sys.exit(exit_code)
OLDNEW
« no previous file with comments | « third_party/grpc/tools/run_tests/run_stress_tests.py ('k') | third_party/grpc/tools/run_tests/sanity/check_cache_mk.sh » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698