| OLD | NEW |
| (Empty) |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 import json | |
| 6 | |
| 7 from common.git_repository import GitRepository | |
| 8 from common.pipeline_wrapper import BasePipeline | |
| 9 from common.pipeline_wrapper import pipeline_handlers | |
| 10 from common.waterfall import buildbucket_client | |
| 11 from model import result_status | |
| 12 from model.wf_analysis import WfAnalysis | |
| 13 from model.wf_swarming_task import WfSwarmingTask | |
| 14 from model.wf_try_job import WfTryJob | |
| 15 from waterfall import send_notification_for_culprit_pipeline | |
| 16 from waterfall import swarming_util | |
| 17 from waterfall import trigger_swarming_task_pipeline | |
| 18 from waterfall.swarming_task_request import SwarmingTaskRequest | |
| 19 from waterfall.swarming_tasks_to_try_job_pipeline import ( | |
| 20 SwarmingTasksToTryJobPipeline) | |
| 21 from waterfall.test import wf_testcase | |
| 22 from waterfall.try_job_type import TryJobType | |
| 23 | |
| 24 | |
| 25 _ISOLATED_SERVER = 'https://isolateserver.appspot.com' | |
| 26 _ISOLATED_STORAGE_URL = 'isolateserver.storage.googleapis.com' | |
| 27 _SAMPLE_FAILURE_LOG = { | |
| 28 'per_iteration_data': [ | |
| 29 { | |
| 30 'TestSuite1.test1': [ | |
| 31 { | |
| 32 'status': 'SUCCESS', | |
| 33 'other_info': 'N/A' | |
| 34 } | |
| 35 ], | |
| 36 'TestSuite1.test2': [ | |
| 37 { | |
| 38 'status': 'FAILURE', | |
| 39 'other_info': 'N/A' | |
| 40 }, | |
| 41 { | |
| 42 'status': 'FAILURE', | |
| 43 'other_info': 'N/A' | |
| 44 }, | |
| 45 { | |
| 46 'status': 'SUCCESS', | |
| 47 'other_info': 'N/A' | |
| 48 } | |
| 49 ], | |
| 50 'TestSuite1.test3': [ | |
| 51 { | |
| 52 'status': 'FAILURE', | |
| 53 'other_info': 'N/A' | |
| 54 }, | |
| 55 { | |
| 56 'status': 'FAILURE', | |
| 57 'other_info': 'N/A' | |
| 58 }, | |
| 59 { | |
| 60 'status': 'FAILURE', | |
| 61 'other_info': 'N/A' | |
| 62 } | |
| 63 ] | |
| 64 }, | |
| 65 { | |
| 66 'TestSuite1.test1': [ | |
| 67 { | |
| 68 'status': 'SUCCESS', | |
| 69 'other_info': 'N/A' | |
| 70 } | |
| 71 ], | |
| 72 'TestSuite1.test2': [ | |
| 73 { | |
| 74 'status': 'SUCCESS', | |
| 75 'other_info': 'N/A' | |
| 76 } | |
| 77 ], | |
| 78 'TestSuite1.test3': [ | |
| 79 { | |
| 80 'status': 'FAILURE', | |
| 81 'other_info': 'N/A' | |
| 82 } | |
| 83 ] | |
| 84 } | |
| 85 ] | |
| 86 } | |
| 87 | |
| 88 | |
| 89 class SwarmingTasksToTryJobPipelineTest(wf_testcase.WaterfallTestCase): | |
| 90 app_module = pipeline_handlers._APP | |
| 91 | |
| 92 def _MockTriggerTryJobs(self, responses): | |
| 93 def MockedTriggerTryJobs(*_): | |
| 94 try_job_results = [] | |
| 95 for response in responses: | |
| 96 if response.get('error'): # pragma: no cover | |
| 97 try_job_results.append(( | |
| 98 buildbucket_client.BuildbucketError(response['error']), None)) | |
| 99 else: | |
| 100 try_job_results.append(( | |
| 101 None, buildbucket_client.BuildbucketBuild(response['build']))) | |
| 102 return try_job_results | |
| 103 self.mock(buildbucket_client, 'TriggerTryJobs', MockedTriggerTryJobs) | |
| 104 | |
| 105 def _MockGetTryJobs(self, build_id): | |
| 106 def MockedGetTryJobs(*_): | |
| 107 data = { | |
| 108 '1': { | |
| 109 'build': { | |
| 110 'id': '1', | |
| 111 'url': 'url', | |
| 112 'status': 'COMPLETED', | |
| 113 'result_details_json': json.dumps({ | |
| 114 'properties': { | |
| 115 'report': { | |
| 116 'result': { | |
| 117 'rev1': 'passed', | |
| 118 'rev2': 'failed' | |
| 119 }, | |
| 120 'metadata': { | |
| 121 'regression_range_size': 2 | |
| 122 } | |
| 123 } | |
| 124 } | |
| 125 }) | |
| 126 } | |
| 127 }, | |
| 128 '2': { | |
| 129 'build': { | |
| 130 'id': '2', | |
| 131 'url': 'url', | |
| 132 'status': 'COMPLETED', | |
| 133 'result_details_json': json.dumps({ | |
| 134 'properties': { | |
| 135 'report': { | |
| 136 'result': { | |
| 137 'rev1': { | |
| 138 'a_test': { | |
| 139 'status': 'failed', | |
| 140 'valid': True, | |
| 141 'failures': ['TestSuite1.test3'] | |
| 142 }, | |
| 143 'b_test': { | |
| 144 'status': 'passed', | |
| 145 'valid': True, | |
| 146 'failures': [], | |
| 147 }, | |
| 148 } | |
| 149 }, | |
| 150 'metadata': { | |
| 151 'regression_range_size': 2 | |
| 152 } | |
| 153 } | |
| 154 } | |
| 155 }) | |
| 156 } | |
| 157 }, | |
| 158 '3': { | |
| 159 'error': { | |
| 160 'reason': 'BUILD_NOT_FOUND', | |
| 161 'message': 'message', | |
| 162 } | |
| 163 } | |
| 164 } | |
| 165 try_job_results = [] | |
| 166 build_error = data.get(build_id) | |
| 167 if build_error.get('error'): # pragma: no cover | |
| 168 try_job_results.append(( | |
| 169 buildbucket_client.BuildbucketError(build_error['error']), None)) | |
| 170 else: | |
| 171 try_job_results.append(( | |
| 172 None, buildbucket_client.BuildbucketBuild(build_error['build']))) | |
| 173 return try_job_results | |
| 174 self.mock(buildbucket_client, 'GetTryJobs', MockedGetTryJobs) | |
| 175 | |
| 176 def _MockGetChangeLog(self, revision): | |
| 177 def MockedGetChangeLog(*_): | |
| 178 class MockedChangeLog(object): | |
| 179 | |
| 180 def __init__(self, commit_position, code_review_url): | |
| 181 self.commit_position = commit_position | |
| 182 self.code_review_url = code_review_url | |
| 183 | |
| 184 mock_change_logs = {} | |
| 185 mock_change_logs['rev1'] = MockedChangeLog('1', 'url_1') | |
| 186 mock_change_logs['rev2'] = MockedChangeLog('2', 'url_2') | |
| 187 return mock_change_logs.get(revision) | |
| 188 self.mock(GitRepository, 'GetChangeLog', MockedGetChangeLog) | |
| 189 | |
| 190 def _Mock_SendNotificationForCulpritPipeline(self): | |
| 191 class Mocked_Pipeline(BasePipeline): | |
| 192 def run(self, *args, **kwargs): # unused arg - pylint: disable=W0612 | |
| 193 pass | |
| 194 self.mock(send_notification_for_culprit_pipeline, | |
| 195 'SendNotificationForCulpritPipeline', Mocked_Pipeline) | |
| 196 | |
| 197 def testSuccessfullyScheduleNewTryJobForCompile(self): | |
| 198 master_name = 'm' | |
| 199 builder_name = 'b' | |
| 200 build_number = 1 | |
| 201 | |
| 202 responses = [ | |
| 203 { | |
| 204 'build': { | |
| 205 'id': '1', | |
| 206 'url': 'url', | |
| 207 'status': 'SCHEDULED', | |
| 208 } | |
| 209 } | |
| 210 ] | |
| 211 self._MockTriggerTryJobs(responses) | |
| 212 self._MockGetTryJobs('1') | |
| 213 self._MockGetChangeLog('rev2') | |
| 214 self._Mock_SendNotificationForCulpritPipeline() | |
| 215 | |
| 216 WfTryJob.Create(master_name, builder_name, build_number).put() | |
| 217 analysis = WfAnalysis.Create(master_name, builder_name, build_number) | |
| 218 analysis.put() | |
| 219 | |
| 220 root_pipeline = SwarmingTasksToTryJobPipeline( | |
| 221 master_name, builder_name, build_number, 'rev1', 'rev2', ['rev2'], | |
| 222 TryJobType.COMPILE) | |
| 223 root_pipeline.start() | |
| 224 self.execute_queued_tasks() | |
| 225 | |
| 226 try_job = WfTryJob.Get(master_name, builder_name, build_number) | |
| 227 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 228 | |
| 229 expected_suspected_cl = { | |
| 230 'revision': 'rev2', | |
| 231 'commit_position': '2', | |
| 232 'url': 'url_2', | |
| 233 'repo_name': 'chromium' | |
| 234 } | |
| 235 | |
| 236 expected_try_job_results = [ | |
| 237 { | |
| 238 'report': { | |
| 239 'result': { | |
| 240 'rev1': 'passed', | |
| 241 'rev2': 'failed' | |
| 242 }, | |
| 243 'metadata': { | |
| 244 'regression_range_size': 2 | |
| 245 } | |
| 246 }, | |
| 247 'url': 'url', | |
| 248 'try_job_id': '1', | |
| 249 'culprit': { | |
| 250 'compile': expected_suspected_cl | |
| 251 } | |
| 252 } | |
| 253 ] | |
| 254 | |
| 255 self.assertEqual(expected_try_job_results, try_job.compile_results) | |
| 256 self.assertEqual(analysis.result_status, | |
| 257 result_status.FOUND_UNTRIAGED) | |
| 258 self.assertEqual(analysis.suspected_cls, | |
| 259 [expected_suspected_cl]) | |
| 260 | |
| 261 def testSuccessfullyScheduleNewTryJobForTest(self): | |
| 262 master_name = 'm' | |
| 263 builder_name = 'b' | |
| 264 build_number = 1 | |
| 265 targeted_tests = { | |
| 266 'a_test': ['TestSuite1.test1', 'TestSuite1.test3'], | |
| 267 'b_test': [], # Non-swarming test. | |
| 268 } | |
| 269 | |
| 270 # Mocks for ProcessSwarmingTaskResultPipeline. | |
| 271 def MockedGetSwarmingTaskResultById(task_id, _): | |
| 272 swarming_task_results = { | |
| 273 'task_id1': { | |
| 274 'state': 'COMPLETED', | |
| 275 'outputs_ref': { | |
| 276 'isolatedserver': _ISOLATED_SERVER, | |
| 277 'namespace': 'default-gzip', | |
| 278 'isolated': 'shard1_isolated' | |
| 279 } | |
| 280 } | |
| 281 } | |
| 282 mocked_result = swarming_task_results.get(task_id) | |
| 283 return mocked_result | |
| 284 self.mock(swarming_util, 'GetSwarmingTaskResultById', | |
| 285 MockedGetSwarmingTaskResultById) | |
| 286 | |
| 287 def MockedGetSwarmingTaskFailureLog(*_): | |
| 288 return _SAMPLE_FAILURE_LOG | |
| 289 self.mock(swarming_util, 'GetSwarmingTaskFailureLog', | |
| 290 MockedGetSwarmingTaskFailureLog) | |
| 291 | |
| 292 # Mocks for try job pipelines. | |
| 293 responses = [ | |
| 294 { | |
| 295 'build': { | |
| 296 'id': '2', | |
| 297 'url': 'url', | |
| 298 'status': 'SCHEDULED', | |
| 299 } | |
| 300 } | |
| 301 ] | |
| 302 self._MockTriggerTryJobs(responses) | |
| 303 self._MockGetTryJobs('2') | |
| 304 self._MockGetChangeLog('rev1') | |
| 305 self._Mock_SendNotificationForCulpritPipeline() | |
| 306 | |
| 307 task = WfSwarmingTask.Create( | |
| 308 master_name, builder_name, build_number, 'a_test') | |
| 309 task.task_id = 'task_id1' | |
| 310 task.put() | |
| 311 WfTryJob.Create(master_name, builder_name, build_number).put() | |
| 312 analysis = WfAnalysis.Create(master_name, builder_name, build_number) | |
| 313 analysis.put() | |
| 314 | |
| 315 root_pipeline = SwarmingTasksToTryJobPipeline( | |
| 316 master_name, builder_name, build_number, 'rev0', 'rev1', ['rev1'], | |
| 317 TryJobType.TEST, None, targeted_tests) | |
| 318 root_pipeline.start() | |
| 319 self.execute_queued_tasks() | |
| 320 | |
| 321 try_job = WfTryJob.Get(master_name, builder_name, build_number) | |
| 322 analysis = WfAnalysis.Get(master_name, builder_name, build_number) | |
| 323 | |
| 324 expected_suspected_cl = { | |
| 325 'revision': 'rev1', | |
| 326 'commit_position': '1', | |
| 327 'url': 'url_1', | |
| 328 'repo_name': 'chromium' | |
| 329 } | |
| 330 | |
| 331 expected_try_job_results = [ | |
| 332 { | |
| 333 'report': { | |
| 334 'result': { | |
| 335 'rev1': { | |
| 336 'a_test': { | |
| 337 'status': 'failed', | |
| 338 'valid': True, | |
| 339 'failures': ['TestSuite1.test3'] | |
| 340 }, | |
| 341 'b_test': { | |
| 342 'status': 'passed', | |
| 343 'valid': True, | |
| 344 'failures': [], | |
| 345 }, | |
| 346 } | |
| 347 }, | |
| 348 'metadata': { | |
| 349 'regression_range_size': 2 | |
| 350 } | |
| 351 }, | |
| 352 'url': 'url', | |
| 353 'try_job_id': '2', | |
| 354 'culprit': { | |
| 355 'a_test': { | |
| 356 'tests': { | |
| 357 'TestSuite1.test3': expected_suspected_cl | |
| 358 } | |
| 359 } | |
| 360 } | |
| 361 } | |
| 362 ] | |
| 363 | |
| 364 self.assertEqual(expected_try_job_results, try_job.test_results) | |
| 365 self.assertEqual(analysis.result_status, | |
| 366 result_status.FOUND_UNTRIAGED) | |
| 367 self.assertEqual(analysis.suspected_cls, [expected_suspected_cl]) | |
| OLD | NEW |