Index: telemetry/telemetry/internal/story_runner.py |
diff --git a/telemetry/telemetry/internal/story_runner.py b/telemetry/telemetry/internal/story_runner.py |
index 3e6593799bdee33f563a6a6f39e6409057b844d4..d3c2815f470948c7d0a4815750f4ea0138d19b77 100644 |
--- a/telemetry/telemetry/internal/story_runner.py |
+++ b/telemetry/telemetry/internal/story_runner.py |
@@ -383,10 +383,8 @@ def RunBenchmark(benchmark, finder_options): |
expectations=expectations) |
return_code = min(254, len(results.failures)) |
# We want to make sure that all expectations are linked to real stories, |
- # but we also do not want bad expectations to stop a run completely. Doing |
- # the check here allows us to return a failure code for bad expectations, |
- # while still collecting perf data. |
- benchmark.ValidateExpectations(stories) |
+ # this will log error messages if names do not match what is in the set. |
+ benchmark.GetBrokenExpectations(stories) |
nednguyen
2017/06/02 11:39:18
Sorry but I stumble on this code again. This is qu
|
except Exception: |
exception_formatter.PrintFormattedException() |
return_code = 255 |