Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(59)

Side by Side Diff: appengine/findit/waterfall/flake/initialize_flake_pipeline.py

Issue 2438673004: [Findit] Post analysis results of flakes to bug filed by chromium-try-flakes. (Closed)
Patch Set: Add a config flag to enable/disable updating monorail bug. Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import logging 5 import logging
6 6
7 from common import appengine_util 7 from common import appengine_util
8 from common import constants 8 from common import constants
9 from common import time_util 9 from common import time_util
10 from model import analysis_status 10 from model import analysis_status
11 from model.flake.master_flake_analysis import MasterFlakeAnalysis 11 from model.flake.master_flake_analysis import MasterFlakeAnalysis
12 from waterfall import waterfall_config 12 from waterfall import waterfall_config
13 from waterfall.flake import triggering_sources 13 from waterfall.flake import triggering_sources
14 from waterfall.flake.recursive_flake_pipeline import RecursiveFlakePipeline 14 from waterfall.flake.recursive_flake_pipeline import RecursiveFlakePipeline
15 15
16 16
17 def _NeedANewAnalysis( 17 def _NeedANewAnalysis(
18 master_name, builder_name, build_number, step_name, test_name, 18 normalized_test, original_test, algorithm_parameters,
19 algorithm_parameters, allow_new_analysis=False, force=False, 19 bug_id=None, allow_new_analysis=False, force=False,
20 user_email='', triggering_source=triggering_sources.FINDIT_PIPELINE): 20 user_email='', triggering_source=triggering_sources.FINDIT_PIPELINE):
21 """Checks status of analysis for the test and decides if a new one is needed. 21 """Checks status of analysis for the test and decides if a new one is needed.
22 22
23 A MasterFlakeAnalysis entity for the given parameters will be created if none 23 A MasterFlakeAnalysis entity for the given parameters will be created if none
24 exists. When a new analysis is needed, this function will create and 24 exists. When a new analysis is needed, this function will create and
25 save a MasterFlakeAnalysis entity to the datastore. 25 save a MasterFlakeAnalysis entity to the datastore.
26 26
27 Args: 27 Args:
28 master_name (str): The master name on Waterfall. 28 normalized_test (TestInfo): Info of the normalized flaky test after mapping
29 builder_name (str): The builder name on Waterfall. 29 a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
30 build_number (int): The build number on Waterfall. 30 from a gtest, etc.
31 step_name (str): The step in which the flaky test is found. 31 original_test (TestInfo): Info of the original flaky test.
32 test_name (str): The flaky test to be analyzed. 32 algorithm_parameters (dict): Algorithm parameters to run the analysis.
33 bug_id (int): The monorail bug id to update when analysis is done.
33 allow_new_analysis (bool): Indicate whether a new analysis is allowed. 34 allow_new_analysis (bool): Indicate whether a new analysis is allowed.
34 force (bool): Indicate whether to force a rerun of current analysis. 35 force (bool): Indicate whether to force a rerun of current analysis.
35 user_email (str): The user triggering this analysis. 36 user_email (str): The user triggering this analysis.
36 triggering_source (int): The source from which this analysis was triggered. 37 triggering_source (int): The source from which this analysis was triggered.
37 38
38 Returns: 39 Returns:
39 (need_new_analysis, analysis) 40 (need_new_analysis, analysis)
40 need_new_analysis (bool): True if an analysis is needed, otherwise False. 41 need_new_analysis (bool): True if an analysis is needed, otherwise False.
41 analysis (MasterFlakeAnalysis): The MasterFlakeAnalysis entity. 42 analysis (MasterFlakeAnalysis): The MasterFlakeAnalysis entity.
42 """ 43 """
43 analysis = MasterFlakeAnalysis.GetVersion( 44 analysis = MasterFlakeAnalysis.GetVersion(
44 master_name, builder_name, build_number, step_name, test_name) 45 normalized_test.master_name, normalized_test.builder_name,
46 normalized_test.build_number, normalized_test.step_name,
47 normalized_test.test_name)
45 48
46 if not analysis: 49 def PopulateAnalysisInfo(analysis):
47 if not allow_new_analysis: 50 analysis.Reset()
48 return False, None
49 analysis = MasterFlakeAnalysis.Create(
50 master_name, builder_name, build_number, step_name, test_name)
51 analysis.request_time = time_util.GetUTCNow() 51 analysis.request_time = time_util.GetUTCNow()
52 analysis.status = analysis_status.PENDING 52 analysis.status = analysis_status.PENDING
53 analysis.algorithm_parameters = algorithm_parameters 53 analysis.algorithm_parameters = algorithm_parameters
54 analysis.version = appengine_util.GetCurrentVersion() 54 analysis.version = appengine_util.GetCurrentVersion()
55 analysis.triggering_user_email = user_email 55 analysis.triggering_user_email = user_email
56 analysis.triggering_source = triggering_source 56 analysis.triggering_source = triggering_source
57 analysis.original_master_name = original_test.master_name
58 analysis.original_builder_name = original_test.builder_name
59 analysis.original_build_number = original_test.build_number
60 analysis.original_step_name = original_test.step_name
61 analysis.original_test_name = original_test.test_name
62 analysis.bug_id = bug_id
63
64 if not analysis:
65 if not allow_new_analysis:
66 return False, None
67 analysis = MasterFlakeAnalysis.Create(
68 normalized_test.master_name, normalized_test.builder_name,
69 normalized_test.build_number, normalized_test.step_name,
70 normalized_test.test_name)
71 PopulateAnalysisInfo(analysis)
57 _, saved = analysis.Save() 72 _, saved = analysis.Save()
58 return saved, analysis 73 return saved, analysis
59 elif (analysis.status == analysis_status.PENDING or 74 elif (analysis.status == analysis_status.PENDING or
60 analysis.status == analysis_status.RUNNING): 75 analysis.status == analysis_status.RUNNING):
61 return False, analysis 76 return False, analysis
62 elif allow_new_analysis and force and analysis.status in ( 77 elif allow_new_analysis and force and analysis.status in (
63 analysis_status.ERROR, analysis_status.COMPLETED): 78 analysis_status.ERROR, analysis_status.COMPLETED):
64 analysis.Reset() 79 PopulateAnalysisInfo(analysis)
65 analysis.request_time = time_util.GetUTCNow()
66 analysis.status = analysis_status.PENDING
67 analysis.algorithm_parameters = algorithm_parameters
68 analysis.version = appengine_util.GetCurrentVersion()
69 analysis.triggering_user_email = user_email
70 analysis.triggering_source = triggering_source
71 _, saved = analysis.Save() 80 _, saved = analysis.Save()
72 return saved, analysis 81 return saved, analysis
73 else: 82 else:
74 return False, analysis 83 return False, analysis
75 84
76 85
77 def ScheduleAnalysisIfNeeded( 86 def ScheduleAnalysisIfNeeded(
78 master_name, builder_name, build_number, step_name, test_name, 87 normalized_test, original_test, bug_id=None,
79 allow_new_analysis=False, force=False, manually_triggered=False, 88 allow_new_analysis=False, force=False, manually_triggered=False,
80 user_email='', triggering_source=triggering_sources.FINDIT_PIPELINE, 89 user_email=None, triggering_source=triggering_sources.FINDIT_PIPELINE,
81 queue_name=constants.DEFAULT_QUEUE): 90 queue_name=constants.DEFAULT_QUEUE):
82 """Schedules an analysis if needed and returns the MasterFlakeAnalysis. 91 """Schedules an analysis if needed and returns the MasterFlakeAnalysis.
83 92
84 When the build failure was already analyzed and a new analysis is scheduled, 93 When the build failure was already analyzed and a new analysis is scheduled,
85 the returned WfAnalysis will still have the result of last completed analysis. 94 the returned WfAnalysis will still have the result of last completed analysis.
86 95
87 Args: 96 Args:
88 master_name (str): The master name of the failed test 97 normalized_test (TestInfo): Info of the normalized flaky test after mapping
89 builder_name (str): The builder name of the failed test 98 a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
90 build_number (int): The build number of the failed test 99 from a gtest, etc.
91 step_name (str): The name of the test suite 100 original_test (TestInfo): Info of the original flaky test.
92 test_name (str): The single test we are checking 101 bug_id (int): The monorail bug id to update when analysis is done.
93 allow_new_analysis (bool): Indicate whether a new analysis is allowed. 102 allow_new_analysis (bool): Indicate whether a new analysis is allowed.
94 force (bool): Indicate whether to force a rerun of current analysis. 103 force (bool): Indicate whether to force a rerun of current analysis.
95 manually_triggered (bool): True if the analysis was requested manually, 104 manually_triggered (bool): True if the analysis was requested manually,
96 such as by a Chromium sheriff. 105 such as by a Chromium sheriff.
97 user_email (str): The email of the user requesting the analysis. 106 user_email (str): The email of the user requesting the analysis.
98 triggering_source (int): From where this analysis was triggered, such as 107 triggering_source (int): From where this analysis was triggered, such as
99 through Findit pipeline, UI, or through Findit API. 108 through Findit pipeline, UI, or through Findit API.
100 queue_name (str): The App Engine queue to run the analysis. 109 queue_name (str): The App Engine queue to run the analysis.
101 110
102 Returns: 111 Returns:
103 A MasterFlakeAnalysis instance. 112 A MasterFlakeAnalysis instance.
104 None if no analysis was scheduled and the user has no permission to. 113 None if no analysis was scheduled and the user has no permission to.
105 """ 114 """
106 algorithm_parameters = waterfall_config.GetCheckFlakeSettings() 115 algorithm_parameters = waterfall_config.GetCheckFlakeSettings()
107 116
108 need_new_analysis, analysis = _NeedANewAnalysis( 117 need_new_analysis, analysis = _NeedANewAnalysis(
109 master_name, builder_name, build_number, step_name, test_name, 118 normalized_test, original_test, algorithm_parameters, bug_id=bug_id,
110 algorithm_parameters, allow_new_analysis, force, 119 allow_new_analysis=allow_new_analysis, force=force, user_email=user_email,
111 user_email, triggering_source) 120 triggering_source=triggering_source)
112 121
113 if need_new_analysis: 122 if need_new_analysis:
114 # _NeedANewAnalysis just created master_flake_analysis. Use the latest 123 # _NeedANewAnalysis just created master_flake_analysis. Use the latest
115 # version number and pass that along to the other pipelines for updating 124 # version number and pass that along to the other pipelines for updating
116 # results and data. 125 # results and data.
117 logging.info( 126 logging.info(
118 'A new master flake analysis was successfully saved for %s/%s/%s/%s/%s ' 127 'A new master flake analysis was successfully saved for %s (%s) and '
119 'and will be captured in version %s', master_name, builder_name, 128 'will be captured in version %s', repr(normalized_test),
120 build_number, step_name, test_name, analysis.version_number) 129 repr(original_test), analysis.version_number)
121 130
122 max_build_numbers_to_look_back = algorithm_parameters.get( 131 max_build_numbers_to_look_back = algorithm_parameters.get(
123 'max_build_numbers_to_look_back') 132 'max_build_numbers_to_look_back')
124 flakiness_algorithm_results_dict = { 133 flakiness_algorithm_results_dict = {
125 'flakes_in_a_row': 0, 134 'flakes_in_a_row': 0,
126 'stable_in_a_row': 0, 135 'stable_in_a_row': 0,
127 'stabled_out': False, 136 'stabled_out': False,
128 'flaked_out': False, 137 'flaked_out': False,
129 'last_build_number': max( 138 'last_build_number': max(
130 0, build_number - max_build_numbers_to_look_back), 139 0, normalized_test.build_number - max_build_numbers_to_look_back),
131 'lower_boundary': None, 140 'lower_boundary': None,
132 'upper_boundary': None, 141 'upper_boundary': None,
133 'lower_boundary_result': None, 142 'lower_boundary_result': None,
134 'sequential_run_index': 0 143 'sequential_run_index': 0
135 } 144 }
136 145
137 pipeline_job = RecursiveFlakePipeline( 146 pipeline_job = RecursiveFlakePipeline(
138 master_name, builder_name, build_number, step_name, test_name, 147 normalized_test.master_name, normalized_test.builder_name,
139 analysis.version_number, master_build_number=build_number, 148 normalized_test.build_number, normalized_test.step_name,
149 normalized_test.test_name, analysis.version_number,
150 master_build_number=normalized_test.build_number,
140 flakiness_algorithm_results_dict=flakiness_algorithm_results_dict, 151 flakiness_algorithm_results_dict=flakiness_algorithm_results_dict,
141 manually_triggered=manually_triggered) 152 manually_triggered=manually_triggered)
142 pipeline_job.target = appengine_util.GetTargetNameForModule( 153 pipeline_job.target = appengine_util.GetTargetNameForModule(
143 constants.WATERFALL_BACKEND) 154 constants.WATERFALL_BACKEND)
144 pipeline_job.StartOffPSTPeakHours(queue_name=queue_name) 155 pipeline_job.StartOffPSTPeakHours(queue_name=queue_name)
145 156
146 return analysis 157 return analysis
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698