Index: slave/skia_slave_scripts/check_for_regressions.py |
diff --git a/slave/skia_slave_scripts/check_for_regressions.py b/slave/skia_slave_scripts/check_for_regressions.py |
index a1c75efc9ad44be1299b703be1228e67aaeeea48..9c6d8fd7c5a63561ddba617709305a21414a86f3 100644 |
--- a/slave/skia_slave_scripts/check_for_regressions.py |
+++ b/slave/skia_slave_scripts/check_for_regressions.py |
@@ -6,10 +6,13 @@ |
""" Check for regressions in bench data. """ |
from build_step import BuildStep |
+from config_private import AUTOGEN_SVN_BASEURL |
+from slave import slave_utils |
from utils import shell_utils |
import builder_name_schema |
import os |
+import subprocess |
import sys |
@@ -21,16 +24,29 @@ class CheckForRegressions(BuildStep): |
**kwargs) |
def _RunInternal(self, representation): |
+ # Reads expectations from skia-autogen svn repo using 'svn cat'. |
+ expectations_filename = ('bench_expectations_' + |
+ builder_name_schema.GetWaterfallBot(self.builder_name) + '.txt') |
+ url = '%s/%s/%s' % (AUTOGEN_SVN_BASEURL, 'bench', expectations_filename) |
+ |
+ svn_binary = slave_utils.SubversionExe() |
+ proc = subprocess.Popen([svn_binary, 'cat', url], |
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
+ (stdout, stderr) = proc.communicate() |
borenet
2014/03/14 12:06:46
Please use shell_utils.run() for this:
try:
out
benchen
2014/03/14 13:25:42
Done. Thanks for the suggestion.
On 2014/03/14 12:
|
+ if proc.returncode is not 0: |
+ print 'Skip due to missing expectations: %s' % url |
+ return |
+ |
path_to_check_bench_regressions = os.path.join('bench', |
'check_bench_regressions.py') |
- # TODO(borenet): We should move these expectations into expectations/bench. |
+ |
+ # Writes the expectations from svn repo to the local file. |
path_to_bench_expectations = os.path.join( |
- 'bench', |
- 'bench_expectations_%s.txt' % builder_name_schema.GetWaterfallBot( |
- self._builder_name)) |
- if not os.path.isfile(path_to_bench_expectations): |
- print 'Skip due to missing expectations: %s' % path_to_bench_expectations |
- return |
+ self._perf_range_input_dir, expectations_filename) |
+ os.makedirs(self._perf_range_input_dir) |
+ with open(path_to_bench_expectations, 'w') as file_handle: |
+ file_handle.write(stdout) |
+ |
cmd = ['python', path_to_check_bench_regressions, |
'-a', representation, |
'-b', self._builder_name, |