| OLD | NEW |
| 1 import json | 1 import json |
| 2 | 2 |
| 3 from slave import recipe_test_api | 3 from slave import recipe_test_api |
| 4 | 4 |
| 5 from .util import GTestResults, TestResults | 5 from .util import GTestResults, TestResults |
| 6 | 6 |
| 7 class JsonTestApi(recipe_test_api.RecipeTestApi): | 7 class JsonTestApi(recipe_test_api.RecipeTestApi): |
| 8 @recipe_test_api.placeholder_step_data | 8 @recipe_test_api.placeholder_step_data |
| 9 @staticmethod | 9 @staticmethod |
| 10 def output(data, retcode=None): | 10 def output(data, retcode=None): |
| 11 return json.dumps(data), retcode | 11 return json.dumps(data), retcode |
| 12 | 12 |
| 13 # TODO(phajdan.jr): Rename to layout_test_results. | |
| 14 @recipe_test_api.placeholder_step_data | 13 @recipe_test_api.placeholder_step_data |
| 15 def test_results(self, test_results, retcode=None): | 14 def test_results(self, test_results, retcode=None): |
| 16 return self.output(test_results.as_jsonish(), retcode) | 15 return self.output(test_results.as_jsonish(), retcode) |
| 17 | 16 |
| 18 # TODO(phajdan.jr): Rename to canned_layout_test_output. | |
| 19 def canned_test_output(self, passing, minimal=False, passes=9001, | 17 def canned_test_output(self, passing, minimal=False, passes=9001, |
| 20 num_additional_failures=0, | 18 num_additional_failures=0, |
| 19 path_separator=None, |
| 21 retcode=None): | 20 retcode=None): |
| 22 """Produces a 'json test results' compatible object with some canned tests. | 21 """Produces a 'json test results' compatible object with some canned tests. |
| 23 Args: | 22 Args: |
| 24 passing - Determines if this test result is passing or not. | 23 passing - Determines if this test result is passing or not. |
| 25 passes - The number of (theoretically) passing tests. | 24 passes - The number of (theoretically) passing tests. |
| 26 minimal - If True, the canned output will omit one test to emulate the | 25 minimal - If True, the canned output will omit one test to emulate the |
| 27 effect of running fewer than the total number of tests. | 26 effect of running fewer than the total number of tests. |
| 28 num_additional_failures - the number of failed tests to simulate in | 27 num_additional_failures - the number of failed tests to simulate in |
| 29 addition to the three generated if passing is False | 28 addition to the three generated if passing is False |
| 30 """ | 29 """ |
| 31 if_failing = lambda fail_val: None if passing else fail_val | 30 if_failing = lambda fail_val: None if passing else fail_val |
| 32 t = TestResults() | 31 t = TestResults() |
| 32 sep = path_separator or '/' |
| 33 if path_separator: |
| 34 t.raw['path_separator'] = path_separator |
| 33 t.raw['num_passes'] = passes | 35 t.raw['num_passes'] = passes |
| 34 t.raw['num_regressions'] = 0 | 36 t.raw['num_regressions'] = 0 |
| 35 t.add_result('flake/totally-flakey.html', 'PASS', | 37 t.add_result('flake%stotally-flakey.html' % sep, 'PASS', |
| 36 if_failing('TIMEOUT PASS')) | 38 if_failing('TIMEOUT PASS')) |
| 37 t.add_result('flake/timeout-then-crash.html', 'CRASH', | 39 t.add_result('flake%stimeout-then-crash.html' % sep, 'CRASH', |
| 38 if_failing('TIMEOUT CRASH')) | 40 if_failing('TIMEOUT CRASH')) |
| 39 t.add_result('flake/slow.html', 'SLOW', | 41 t.add_result('flake%sslow.html' % sep, 'SLOW', |
| 40 if_failing('TIMEOUT SLOW')) | 42 if_failing('TIMEOUT SLOW')) |
| 41 t.add_result('tricky/totally-maybe-not-awesome.html', 'PASS', | 43 t.add_result('tricky%stotally-maybe-not-awesome.html' % sep, 'PASS', |
| 42 if_failing('FAIL')) | 44 if_failing('FAIL')) |
| 43 t.add_result('bad/totally-bad-probably.html', 'PASS', | 45 t.add_result('bad%stotally-bad-probably.html' % sep, 'PASS', |
| 44 if_failing('FAIL')) | 46 if_failing('FAIL')) |
| 45 if not minimal: | 47 if not minimal: |
| 46 t.add_result('good/totally-awesome.html', 'PASS') | 48 t.add_result('good%stotally-awesome.html' % sep, 'PASS') |
| 47 for i in xrange(num_additional_failures): | 49 for i in xrange(num_additional_failures): |
| 48 t.add_result('bad/failing%d.html' %i, 'PASS', 'FAIL') | 50 t.add_result('bad%sfailing%d.html' % (sep, i), 'PASS', 'FAIL') |
| 49 ret = self.test_results(t) | 51 ret = self.test_results(t) |
| 50 if retcode is not None: | 52 if retcode is not None: |
| 51 ret.retcode = retcode | 53 ret.retcode = retcode |
| 52 else: | 54 else: |
| 53 ret.retcode = min(t.raw['num_regressions'], t.MAX_FAILURES_EXIT_STATUS) | 55 ret.retcode = min(t.raw['num_regressions'], t.MAX_FAILURES_EXIT_STATUS) |
| 54 return ret | 56 return ret |
| 55 | 57 |
| 56 @recipe_test_api.placeholder_step_data | 58 @recipe_test_api.placeholder_step_data |
| 57 def gtest_results(self, test_results, retcode=None): | 59 def gtest_results(self, test_results, retcode=None): |
| 58 return self.output(test_results.as_jsonish(), retcode) | 60 return self.output(test_results.as_jsonish(), retcode) |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 95 | 97 |
| 96 canned_jsonish = { | 98 canned_jsonish = { |
| 97 'per_iteration_data': [cur_iteration_data] | 99 'per_iteration_data': [cur_iteration_data] |
| 98 } | 100 } |
| 99 canned_jsonish.update(extra_json or {}) | 101 canned_jsonish.update(extra_json or {}) |
| 100 | 102 |
| 101 t = GTestResults(canned_jsonish) | 103 t = GTestResults(canned_jsonish) |
| 102 ret = self.gtest_results(t) | 104 ret = self.gtest_results(t) |
| 103 ret.retcode = None if passing else 1 | 105 ret.retcode = None if passing else 1 |
| 104 return ret | 106 return ret |
| OLD | NEW |