| OLD | NEW |
| 1 # Copyright 2017 The Chromium Authors. All rights reserved. | 1 # Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 from google.appengine.ext import ndb | 5 from google.appengine.ext import ndb |
| 6 | 6 |
| 7 from common.pipeline_wrapper import BasePipeline | 7 from common.pipeline_wrapper import BasePipeline |
| 8 from gae_libs.http.http_client_appengine import HttpClientAppengine |
| 8 from model.flake.master_flake_analysis import DataPoint | 9 from model.flake.master_flake_analysis import DataPoint |
| 10 from waterfall import swarming_util |
| 11 |
| 12 |
| 13 def _GetSwarmingTaskIdForTryJob(report, revision, step_name, test_name): |
| 14 """Check json output for each task and return id of the one with test result. |
| 15 """ |
| 16 if not report: |
| 17 return None |
| 18 |
| 19 http_client = HttpClientAppengine() |
| 20 |
| 21 step_result = report.get('result', {}).get(revision, {}).get( |
| 22 step_name, {}) |
| 23 pass_fail_counts = step_result.get('pass_fail_counts', {}).get(test_name) |
| 24 task_ids = step_result.get('step_metadata', {}).get('swarm_task_ids', []) |
| 25 |
| 26 if len(task_ids) == 1: |
| 27 return task_ids[0] |
| 28 |
| 29 if not pass_fail_counts: # Test doesn't exist. |
| 30 return task_ids[0] if task_ids else None |
| 31 |
| 32 for task_id in task_ids: |
| 33 output_json = swarming_util.GetIsolatedOutputForTask(task_id, http_client) |
| 34 if output_json: |
| 35 for data in output_json.get('per_iteration_data', []): |
| 36 # If this task doesn't have result, per_iteration_data will look like |
| 37 # [{}, {}, ...] |
| 38 if data: |
| 39 return task_id |
| 40 |
| 41 return None |
| 9 | 42 |
| 10 | 43 |
| 11 class ProcessFlakeTryJobResultPipeline(BasePipeline): | 44 class ProcessFlakeTryJobResultPipeline(BasePipeline): |
| 12 """A pipeline for processing a flake try job result.""" | 45 """A pipeline for processing a flake try job result.""" |
| 13 | 46 |
| 14 # Arguments number differs from overridden method - pylint: disable=W0221 | 47 # Arguments number differs from overridden method - pylint: disable=W0221 |
| 15 def run(self, revision, commit_position, try_job_result, urlsafe_try_job_key, | 48 def run(self, revision, commit_position, try_job_result, urlsafe_try_job_key, |
| 16 urlsafe_flake_analysis_key): | 49 urlsafe_flake_analysis_key): |
| 17 """Extracts pass rate information and updates flake analysis. | 50 """Extracts pass rate information and updates flake analysis. |
| 18 | 51 |
| 19 Args: | 52 Args: |
| 20 revision (str): The git hash the try job was run against. | 53 revision (str): The git hash the try job was run against. |
| 21 commit_position (int): The commit position corresponding to |revision|. | 54 commit_position (int): The commit position corresponding to |revision|. |
| 22 try_job_result (dict): The result dict reported by buildbucket. | 55 try_job_result (dict): The result dict reported by buildbucket. |
| 23 Example: | 56 Example: |
| 24 { | 57 { |
| 25 'report': { | 58 'metadata': {}, |
| 26 'metadata': {}, | 59 'result': { |
| 27 'result': { | 60 'cafed52c5f3313646b8e04e05601b5cb98f305b3': { |
| 28 'cafed52c5f3313646b8e04e05601b5cb98f305b3': { | 61 'browser_tests': { |
| 29 'browser_tests': { | 62 'status': 'failed', |
| 30 'status': 'failed', | 63 'failures': ['TabCaptureApiTest.FullscreenEvents'], |
| 31 'failures': ['TabCaptureApiTest.FullscreenEvents'], | 64 'valid': True, |
| 32 'valid': True, | 65 'pass_fail_counts': { |
| 33 'pass_fail_counts': { | 66 'TabCaptureApiTest.FullscreenEvents': { |
| 34 'TabCaptureApiTest.FullscreenEvents': { | 67 'pass_count': 28, |
| 35 'pass_count': 28, | 68 'fail_count': 72 |
| 36 'fail_count': 72 | 69 } |
| 37 } | 70 }, |
| 38 } | 71 'step_metadata': { |
| 39 } | 72 'task_ids': [], |
| 40 } | 73 ... |
| 41 } | 74 } |
| 42 } | 75 } |
| 76 } |
| 77 } |
| 78 } |
| 43 urlsafe_try_job_key (str): The urlsafe key to the corresponding try job | 79 urlsafe_try_job_key (str): The urlsafe key to the corresponding try job |
| 44 entity. | 80 entity. |
| 45 urlsafe_flake_analysis_key (str): The urlsafe key for the master flake | 81 urlsafe_flake_analysis_key (str): The urlsafe key for the master flake |
| 46 analysis entity to be updated. | 82 analysis entity to be updated. |
| 47 """ | 83 """ |
| 48 flake_analysis = ndb.Key(urlsafe=urlsafe_flake_analysis_key).get() | 84 flake_analysis = ndb.Key(urlsafe=urlsafe_flake_analysis_key).get() |
| 49 try_job = ndb.Key(urlsafe=urlsafe_try_job_key).get() | 85 try_job = ndb.Key(urlsafe=urlsafe_try_job_key).get() |
| 50 assert flake_analysis | 86 assert flake_analysis |
| 51 assert try_job | 87 assert try_job |
| 52 | 88 |
| 53 step_name = flake_analysis.step_name | 89 step_name = flake_analysis.step_name |
| 54 test_name = flake_analysis.test_name | 90 test_name = flake_analysis.test_name |
| 55 result = try_job_result['report']['result'] | 91 result = try_job_result['report']['result'] |
| 56 pass_fail_counts = result[revision][step_name].get('pass_fail_counts', {}) | 92 pass_fail_counts = result[revision][step_name].get('pass_fail_counts', {}) |
| 57 | 93 |
| 58 if pass_fail_counts: | 94 if pass_fail_counts: |
| 59 test_results = pass_fail_counts[test_name] | 95 test_results = pass_fail_counts[test_name] |
| 60 pass_count = test_results['pass_count'] | 96 pass_count = test_results['pass_count'] |
| 61 fail_count = test_results['fail_count'] | 97 fail_count = test_results['fail_count'] |
| 62 tries = pass_count + fail_count | 98 tries = pass_count + fail_count |
| 63 pass_rate = float(pass_count) / tries | 99 pass_rate = float(pass_count) / tries |
| 64 else: # Test does not exist. | 100 else: # Test does not exist. |
| 65 pass_rate = -1 | 101 pass_rate = -1 |
| 66 | 102 |
| 67 data_point = DataPoint() | 103 data_point = DataPoint() |
| 68 data_point.commit_position = commit_position | 104 data_point.commit_position = commit_position |
| 69 data_point.git_hash = revision | 105 data_point.git_hash = revision |
| 70 data_point.pass_rate = pass_rate | 106 data_point.pass_rate = pass_rate |
| 71 data_point.try_job_url = try_job.flake_results[-1].get('url') | 107 data_point.try_job_url = try_job.flake_results[-1].get('url') |
| 72 # TODO(chanli): Add swarming task data. | 108 data_point.task_id = _GetSwarmingTaskIdForTryJob( |
| 109 try_job.flake_results[-1].get('report'), revision, step_name, test_name) |
| 73 flake_analysis.data_points.append(data_point) | 110 flake_analysis.data_points.append(data_point) |
| 74 flake_analysis.put() | 111 flake_analysis.put() |
| OLD | NEW |