Index: frontend/planner/rpc_interface.py |
diff --git a/frontend/planner/rpc_interface.py b/frontend/planner/rpc_interface.py |
index 219436f7fd7be8809ab57161fb08dea6337c1697..a974b9f13f70e389fa63b64cc42e6afb6a6b74af 100644 |
--- a/frontend/planner/rpc_interface.py |
+++ b/frontend/planner/rpc_interface.py |
@@ -12,7 +12,8 @@ from autotest_lib.frontend import thread_local |
from autotest_lib.frontend.afe import model_logic, models as afe_models |
from autotest_lib.frontend.afe import rpc_utils as afe_rpc_utils |
from autotest_lib.frontend.tko import models as tko_models |
-from autotest_lib.frontend.planner import models, rpc_utils |
+from autotest_lib.frontend.planner import models, rpc_utils, model_attributes |
+from autotest_lib.frontend.planner import failure_actions |
from autotest_lib.client.common_lib import utils |
# basic getter/setter calls |
@@ -27,12 +28,6 @@ def modify_plan(id, **data): |
models.Plan.smart_get(id).update_object(data) |
-def get_test_runs(**filter_data): |
- return afe_rpc_utils.prepare_for_serialization( |
- [test_run.get_object_dict() for test_run |
- in models.TestRun.objects.filter(**filter_data)]) |
- |
- |
def modify_test_run(id, **data): |
models.TestRun.objects.get(id=id).update_object(data) |
@@ -70,7 +65,7 @@ def submit_plan(name, hosts, host_labels, tests, |
is_server: True if is a server-side control file |
estimated_runtime: estimated number of hours this test |
will run |
- @param support: the global support object |
+ @param support: the global support script |
@param label_override: label to prepend to all AFE jobs for this test plan. |
Defaults to the plan name. |
""" |
@@ -185,9 +180,10 @@ def get_next_test_configs(plan_id): |
rpc_utils.update_hosts_table(plan) |
for host in models.Host.objects.filter(plan=plan): |
- next_test_config_id = rpc_utils.compute_next_test_config(plan, host) |
- if next_test_config_id: |
- config = {'next_test_config_id': next_test_config_id, |
+ next_test_config = rpc_utils.compute_next_test_config(plan, host) |
+ if next_test_config: |
+ config = {'next_test_config_id': next_test_config.id, |
+ 'next_test_config_alias': next_test_config.alias, |
'host': host.host.hostname} |
result['next_configs'].append(config) |
@@ -212,7 +208,7 @@ def update_test_runs(plan_id): |
tko_test_idx: the ID of the TKO test added |
hostname: the host added |
""" |
- plan = models.Plan.objects.get(id=plan_id) |
+ plan = models.Plan.smart_get(plan_id) |
updated = [] |
for planner_job in plan.job_set.all(): |
@@ -235,3 +231,158 @@ def update_test_runs(plan_id): |
'hostname': hostname}) |
return updated |
+ |
+ |
+def get_failures(plan_id): |
+ """ |
+ Gets a list of the untriaged failures associated with this plan |
+ |
+ @return a list of dictionaries: |
+ id: the failure ID, for passing back to triage the failure |
+ group: the group for the failure. Normally the same as the |
+ reason, but can be different for custom queries |
+ machine: the failed machine |
+ blocked: True if the failure caused the machine to block |
+ test_name: Concatenation of the Planner alias and the TKO test |
+ name for the failed test |
+ reason: test failure reason |
+ seen: True if the failure is marked as "seen" |
+ """ |
+ plan = models.Plan.smart_get(plan_id) |
+ result = {} |
+ |
+ failures = plan.testrun_set.filter( |
+ finalized=True, triaged=False, |
+ status=model_attributes.TestRunStatus.FAILED) |
+ failures = failures.select_related('test_job__test', 'host__host', |
+ 'tko_test') |
+ for failure in failures: |
+ test_name = '%s:%s' % ( |
+ failure.test_job.test_config.alias, failure.tko_test.test) |
+ |
+ group_failures = result.setdefault(failure.tko_test.reason, []) |
+ failure_dict = {'id': failure.id, |
+ 'machine': failure.host.host.hostname, |
+ 'blocked': bool(failure.host.blocked), |
+ 'test_name': test_name, |
+ 'reason': failure.tko_test.reason, |
+ 'seen': bool(failure.seen)} |
+ group_failures.append(failure_dict) |
+ |
+ return result |
+ |
+ |
+def get_test_runs(**filter_data): |
+ """ |
+ Gets a list of test runs that match the filter data. |
+ |
+ Returns a list of expanded TestRun object dictionaries. Specifically, the |
+ "host" and "test_job" fields are expanded. Additionally, the "test_config" |
+ field of the "test_job" expansion is also expanded. |
+ """ |
+ result = [] |
+ for test_run in models.TestRun.objects.filter(**filter_data): |
+ test_run_dict = test_run.get_object_dict() |
+ test_run_dict['host'] = test_run.host.get_object_dict() |
+ test_run_dict['test_job'] = test_run.test_job.get_object_dict() |
+ test_run_dict['test_job']['test_config'] = ( |
+ test_run.test_job.test_config.get_object_dict()) |
+ result.append(test_run_dict) |
+ return result |
+ |
+ |
+def skip_test(test_config_id, hostname): |
+ """ |
+ Marks a test config as "skipped" for a given host |
+ """ |
+ config = models.TestConfig.objects.get(id=test_config_id) |
+ config.skipped_hosts.add(afe_models.Host.objects.get(hostname=hostname)) |
+ |
+ |
+def mark_failures_as_seen(failure_ids): |
+ """ |
+ Marks a set of failures as 'seen' |
+ |
+ @param failure_ids: A list of failure IDs, as returned by get_failures(), to |
+ mark as seen |
+ """ |
+ models.TestRun.objects.filter(id__in=failure_ids).update(seen=True) |
+ |
+ |
+def process_failure(failure_id, host_action, test_action, labels=(), |
+ keyvals=None, bugs=(), reason=None, invalidate=False): |
+ """ |
+ Triage a failure |
+ |
+ @param failure_id: The failure ID, as returned by get_failures() |
+ @param host_action: One of 'Block', 'Unblock', 'Reinstall' |
+ @param test_action: One of 'Skip', 'Rerun' |
+ |
+ @param labels: Test labels to apply, by name |
+ @param keyvals: Dictionary of job keyvals to add (or replace) |
+ @param bugs: List of bug IDs to associate with this failure |
+ @param reason: An override for the test failure reason |
+ @param invalidate: True if failure should be invalidated for the purposes of |
+ reporting. Defaults to False. |
+ """ |
+ if keyvals is None: |
+ keyvals = {} |
+ |
+ host_choices = failure_actions.HostAction.values |
+ test_choices = failure_actions.TestAction.values |
+ if host_action not in host_choices: |
+ raise model_logic.ValidationError( |
+ {'host_action': ('host action %s not valid; must be one of %s' |
+ % (host_action, ', '.join(host_choices)))}) |
+ if test_action not in test_choices: |
+ raise model_logic.ValidationError( |
+ {'test_action': ('test action %s not valid; must be one of %s' |
+ % (test_action, ', '.join(test_choices)))}) |
+ |
+ failure = models.TestRun.objects.get(id=failure_id) |
+ |
+ rpc_utils.process_host_action(failure.host, host_action) |
+ rpc_utils.process_test_action(failure.test_job, test_action) |
+ |
+ # Add the test labels |
+ for label in labels: |
+ tko_test_label, _ = ( |
+ tko_models.TestLabel.objects.get_or_create(name=label)) |
+ failure.tko_test.testlabel_set.add(tko_test_label) |
+ |
+ # Set the job keyvals |
+ for key, value in keyvals.iteritems(): |
+ keyval, created = tko_models.JobKeyval.objects.get_or_create( |
+ job=failure.tko_test.job, key=key) |
+ if not created: |
+ tko_models.JobKeyval.objects.create(job=failure.tko_test.job, |
+ key='original_' + key, |
+ value=keyval.value) |
+ keyval.value = value |
+ keyval.save() |
+ |
+ # Add the bugs |
+ for bug_id in bugs: |
+ bug, _ = models.Bug.objects.get_or_create(external_uid=bug_id) |
+ failure.bugs.add(bug) |
+ |
+ # Set the failure reason |
+ if reason is not None: |
+ tko_models.TestAttribute.objects.create(test=failure.tko_test, |
+ attribute='original_reason', |
+ value=failure.tko_test.reason) |
+ failure.tko_test.reason = reason |
+ failure.tko_test.save() |
+ |
+ # Set 'invalidated', 'seen', and 'triaged' |
+ failure.invalidated = invalidate |
+ failure.seen = True |
+ failure.triaged = True |
+ failure.save() |
+ |
+ |
+def get_static_data(): |
+ result = {'motd': afe_rpc_utils.get_motd(), |
+ 'host_actions': sorted(failure_actions.HostAction.values), |
+ 'test_actions': sorted(failure_actions.TestAction.values)} |
+ return result |