Index: tools/nanobench_flags.py |
diff --git a/tools/nanobench_flags.py b/tools/nanobench_flags.py |
deleted file mode 100755 |
index 4f8016b178f8225fc7952803b988316fb5934592..0000000000000000000000000000000000000000 |
--- a/tools/nanobench_flags.py |
+++ /dev/null |
@@ -1,205 +0,0 @@ |
-# |
-# Copyright 2015 Google Inc. |
-# |
-# Use of this source code is governed by a BSD-style license that can be |
-# found in the LICENSE file. |
-# |
- |
-#!/usr/bin/env python |
- |
-usage = ''' |
-Write extra flags to outfile for nanobench based on the bot name: |
- $ python nanobench_flags.py outfile Perf-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Release |
-Or run self-tests: |
- $ python nanobench_flags.py test |
-''' |
- |
-import inspect |
-import json |
-import os |
-import sys |
- |
- |
-def lineno(): |
- caller = inspect.stack()[1] # Up one level to our caller. |
- return inspect.getframeinfo(caller[0]).lineno |
- |
- |
-cov_start = lineno()+1 # We care about coverage starting just past this def. |
-def get_args(bot): |
- args = ['--pre_log'] |
- |
- if 'GPU' in bot: |
- args.append('--images') |
- args.extend(['--gpuStatsDump', 'true']) |
- |
- if 'Android' in bot and 'GPU' in bot: |
- args.extend(['--useThermalManager', '1,1,10,1000']) |
- |
- args.extend(['--scales', '1.0', '1.1']) |
- |
- if 'iOS' in bot: |
- args.extend(['--skps', 'ignore_skps']) |
- |
- config = ['565', '8888', 'gpu', 'nonrendering', 'angle', 'hwui' ] |
- config += [ 'f16', 'srgb' ] |
- # The S4 crashes and the NP produces a long error stream when we run with |
- # MSAA. |
- if ('GalaxyS4' not in bot and |
- 'NexusPlayer' not in bot): |
- if 'Android' in bot: |
- # The TegraX1 has a regular OpenGL implementation. We bench that instead |
- # of ES. |
- if 'TegraX1' in bot: |
- config.remove('gpu') |
- config.extend(['gl', 'glmsaa4', 'glnvpr4', 'glnvprdit4']) |
- else: |
- config.extend(['msaa4', 'nvpr4', 'nvprdit4']) |
- else: |
- config.extend(['msaa16', 'nvpr16', 'nvprdit16']) |
- |
- # Bench instanced rendering on a limited number of platforms |
- if 'Nexus6' in bot: |
- config.append('esinst') # esinst4 isn't working yet on Adreno. |
- elif 'TegraX1' in bot: |
- config.extend(['glinst', 'glinst4']) |
- elif 'MacMini6.2' in bot: |
- config.extend(['glinst', 'glinst16']) |
- |
- if 'Vulkan' in bot: |
- config = ['vk'] |
- |
- args.append('--config') |
- args.extend(config) |
- |
- if 'Valgrind' in bot: |
- # Don't care about Valgrind performance. |
- args.extend(['--loops', '1']) |
- args.extend(['--samples', '1']) |
- # Ensure that the bot framework does not think we have timed out. |
- args.extend(['--keepAlive', 'true']) |
- |
- if 'HD2000' in bot: |
- args.extend(['--GPUbenchTileW', '256']) |
- args.extend(['--GPUbenchTileH', '256']) |
- |
- match = [] |
- if 'Android' in bot: |
- # Segfaults when run as GPU bench. Very large texture? |
- match.append('~blurroundrect') |
- match.append('~patch_grid') # skia:2847 |
- match.append('~desk_carsvg') |
- if 'HD2000' in bot: |
- match.extend(['~gradient', '~etc1bitmap']) # skia:2895 |
- if 'NexusPlayer' in bot: |
- match.append('~desk_unicodetable') |
- if 'GalaxyS4' in bot: |
- match.append('~GLInstancedArraysBench') # skia:4371 |
- if 'Nexus5' in bot: |
- match.append('~keymobi_shop_mobileweb_ebay_com.skp') # skia:5178 |
- if 'iOS' in bot: |
- match.append('~blurroundrect') |
- match.append('~patch_grid') # skia:2847 |
- match.append('~desk_carsvg') |
- match.append('~keymobi') |
- match.append('~path_hairline') |
- match.append('~GLInstancedArraysBench') # skia:4714 |
- |
- # the 32-bit GCE bots run out of memory in DM when running these large images |
- # so defensively disable them in nanobench, too. |
- # FIXME (scroggo): This may have just been due to SkImageDecoder's |
- # buildTileIndex leaking memory (https://bug.skia.org/4360). That is |
- # disabled by default for nanobench, so we may not need this. |
- # FIXME (scroggo): Share image blacklists between dm and nanobench? |
- if 'x86' in bot and not 'x86-64' in bot: |
- match.append('~interlaced1.png') |
- match.append('~interlaced2.png') |
- match.append('~interlaced3.png') |
- |
- # This low-end Android bot crashes about 25% of the time while running the |
- # (somewhat intense) shapes benchmarks. |
- if 'Perf-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Release' in bot: |
- match.append('~shapes_') |
- |
- # We do not need or want to benchmark the decodes of incomplete images. |
- # In fact, in nanobench we assert that the full image decode succeeds. |
- match.append('~inc0.gif') |
- match.append('~inc1.gif') |
- match.append('~incInterlaced.gif') |
- match.append('~inc0.jpg') |
- match.append('~incGray.jpg') |
- match.append('~inc0.wbmp') |
- match.append('~inc1.wbmp') |
- match.append('~inc0.webp') |
- match.append('~inc1.webp') |
- match.append('~inc0.ico') |
- match.append('~inc1.ico') |
- match.append('~inc0.png') |
- match.append('~inc1.png') |
- match.append('~inc2.png') |
- match.append('~inc12.png') |
- match.append('~inc13.png') |
- match.append('~inc14.png') |
- match.append('~inc0.webp') |
- match.append('~inc1.webp') |
- |
- if match: |
- args.append('--match') |
- args.extend(match) |
- |
- return args |
-cov_end = lineno() # Don't care about code coverage past here. |
- |
- |
-def self_test(): |
- args = {} |
- cases = [ |
- 'Perf-Android-GCC-Nexus6-GPU-Adreno420-Arm7-Release', |
- 'Test-Android-GCC-Nexus6-GPU-Adreno420-Arm7-Debug', |
- 'Perf-Android-Nexus7-Tegra3-Arm7-Release', |
- 'Perf-Android-GCC-NexusPlayer-GPU-PowerVR-x86-Release', |
- 'Perf-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Release', |
- 'Test-Mac-Clang-MacMini6.2-GPU-HD4000-x86_64-Debug', |
- 'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind', |
- 'Test-Win7-MSVC-ShuttleA-GPU-HD2000-x86-Debug-ANGLE', |
- 'Test-iOS-Clang-iPad4-GPU-SGX554-Arm7-Debug', |
- 'Test-Android-GCC-GalaxyS4-GPU-SGX544-Arm7-Release', |
- 'Perf-Android-GCC-NVIDIA_Shield-GPU-TegraX1-Arm64-Release', |
- 'Perf-Android-GCC-NVIDIA_Shield-GPU-TegraX1-Arm64-Release-Vulkan', |
- 'Perf-Android-GCC-Nexus5-GPU-Adreno330-Arm7-Release', |
- ] |
- |
- this_file = os.path.basename(__file__) |
- try: |
- import coverage |
- cov = coverage.coverage() |
- cov.start() |
- for case in cases: |
- args[case] = get_args(case) |
- cov.stop() |
- |
- _, _, not_run, _ = cov.analysis(this_file) |
- filtered = [line for line in not_run if line > cov_start and line < cov_end] |
- if filtered: |
- print 'Lines not covered by test cases: ', filtered |
- sys.exit(1) |
- except ImportError: |
- print ("We cannot guarantee that this files tests are comprehensive " + |
- "without coverage.py. Please install it when you get a chance.") |
- |
- golden = this_file.replace('.py', '.json') |
- with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f: |
- json.dump(args, f, indent=2, sort_keys=True) |
- |
- |
-if __name__ == '__main__': |
- if len(sys.argv) == 2 and sys.argv[1] == 'test': |
- self_test() |
- sys.exit(0) |
- |
- if len(sys.argv) != 3: |
- print usage |
- sys.exit(1) |
- |
- with open(sys.argv[1], 'w') as out: |
- json.dump(get_args(sys.argv[2]), out) |