Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(926)

Unified Diff: appengine/findit/waterfall/flake/recursive_flake_pipeline.py

Issue 2243673002: [Findit] Added algorithm to analysis (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: appengine/findit/waterfall/flake/recursive_flake_pipeline.py
diff --git a/appengine/findit/waterfall/flake/recursive_flake_pipeline.py b/appengine/findit/waterfall/flake/recursive_flake_pipeline.py
index 332a54897f63632062efe28164a47b8612d01d08..d192f30014ac8d85b45c1a5c783728b845b7c315 100644
--- a/appengine/findit/waterfall/flake/recursive_flake_pipeline.py
+++ b/appengine/findit/waterfall/flake/recursive_flake_pipeline.py
@@ -3,13 +3,13 @@
# found in the LICENSE file.
from datetime import datetime
chanli 2016/08/11 23:59:39 datetime should be in a separate group
caiw 2016/08/13 00:09:15 Done.
-
from common import appengine_util
from common import constants
from common.pipeline_wrapper import BasePipeline
from model import analysis_status
from model.flake.master_flake_analysis import MasterFlakeAnalysis
+from model.flake.flake_swarming_task import FlakeSwarmingTask
from waterfall.trigger_flake_swarming_task_pipeline import (
TriggerFlakeSwarmingTaskPipeline)
from waterfall.process_flake_swarming_task_result_pipeline import (
@@ -20,37 +20,73 @@ class RecursiveFlakePipeline(BasePipeline):
# Arguments number differs from overridden method - pylint: disable=W0221
def run(self, master_name, builder_name, run_build_number, step_name,
- test_name, master_build_number, queue_name=constants.DEFAULT_QUEUE):
+ test_name, master_build_number, algo_dict,
+ queue_name=constants.DEFAULT_QUEUE):
chanli 2016/08/11 23:59:39 Could you add a docstring to explain the args? For
caiw 2016/08/13 00:09:15 Done.
# Call trigger pipeline (flake style).
- task_id = yield TriggerFlakeSwarmingTaskPipeline(master_name, builder_name,
- run_build_number, step_name, [test_name])
+ task_id = yield TriggerFlakeSwarmingTaskPipeline(
+ master_name, builder_name, run_build_number, step_name, [test_name])
# Pass the trigger pipeline into a process pipeline.
test_result_future = yield ProcessFlakeSwarmingTaskResultPipeline(
master_name, builder_name, run_build_number,
step_name, task_id, master_build_number, test_name)
yield NextBuildNumberPipeline(
- master_name, builder_name, master_build_number,
- step_name, test_name, test_result_future, queue_name)
+ master_name, builder_name, master_build_number, run_build_number,
+ step_name, test_name, test_result_future, queue_name, algo_dict)
+
class NextBuildNumberPipeline(BasePipeline):
+
# Arguments number differs from overridden method - pylint: disable=W0221
# Unused argument - pylint: disable=W0613
- def run(self, master_name, builder_name, master_build_number, step_name,
- test_name, test_result_future, queue_name):
+ def run(self, master_name, builder_name, master_build_number,
+ run_build_number, step_name, test_name, test_result_future,
+ queue_name, algo_dict):
+
# Get MasterFlakeAnalysis success list corresponding to parameters.
master = MasterFlakeAnalysis.Get(master_name, builder_name,
master_build_number, step_name, test_name)
+
+ # Don't call another pipeline if we fail.
+ flake_swarming_task = FlakeSwarmingTask.Get(
+ master_name, builder_name, run_build_number, step_name, test_name)
+ if flake_swarming_task.status == analysis_status.ERROR:
+ return
+
# Figure out what build_number we should call, if any
# This is a placeholder for testing:
- next_run = False
- if len(master.build_numbers) < 10:
- # TODO(caiw): Develop algorithm to optimize this.
- next_run = min(master.build_numbers) - 10
+
+ # Get the last result.
+ last_result = master.success_rates[-1]
+ if last_result < .02 or last_result > .98:
lijeffrey 2016/08/11 22:22:25 move all these values to constants at the top of t
caiw 2016/08/13 00:09:15 Done.
+ algo_dict['stable_in_a_row'] += 1
+ if algo_dict['stable_in_a_row'] > 4:
+ algo_dict['stabled_out'] = True
+ algo_dict['flakes_in_a_row'] = 0
+ step_size = algo_dict['stable_in_a_row'] + 1
+ else:
+ algo_dict['flakes_in_a_row'] += 1
+ if algo_dict['flakes_in_a_row'] > 4:
+ algo_dict['flaked_out'] = True
+ algo_dict['stable_in_a_row'] = 0
+ step_size = algo_dict['flakes_in_a_row'] + 1
+ next_run = min(master.build_numbers) - step_size
+
+ if (next_run < algo_dict['last_build_number'] or
+ (algo_dict['stabled_out'] and algo_dict['flaked_out'])):
+ next_run = False
chanli 2016/08/11 23:59:39 Maybe set next_run to 0 or -1 instead of False?
caiw 2016/08/13 00:09:15 Done.
+
if next_run:
+ new_algo_dict = {
lijeffrey 2016/08/11 22:22:25 it looks like this is a direct copy. You can do ne
caiw 2016/08/13 00:09:15 Done.
+ 'flakes_in_a_row': algo_dict['flakes_in_a_row'],
+ 'stable_in_a_row': algo_dict['stable_in_a_row'],
+ 'stabled_out': algo_dict['stabled_out'],
+ 'flaked_out': algo_dict['flaked_out'],
+ 'last_build_number': algo_dict['last_build_number']
+ }
pipeline_job = RecursiveFlakePipeline(
master_name, builder_name, next_run, step_name, test_name,
- master_build_number)
- #pylint: disable=W0201
+ master_build_number, algo_dict=new_algo_dict)
+ # pylint: disable=W0201
pipeline_job.target = appengine_util.GetTargetNameForModule(
constants.WATERFALL_BACKEND)
pipeline_job.start(queue_name=queue_name)

Powered by Google App Engine
This is Rietveld 408576698