Index: tools/telemetry/telemetry/results/json_output_formatter_unittest.py |
diff --git a/tools/telemetry/telemetry/results/json_output_formatter_unittest.py b/tools/telemetry/telemetry/results/json_output_formatter_unittest.py |
index 8c5af58e07ecb990163c90fb75e3caaae640911e..5601d01cd4e7c2b5ec8a49ae7d0dfee292dd2f39 100644 |
--- a/tools/telemetry/telemetry/results/json_output_formatter_unittest.py |
+++ b/tools/telemetry/telemetry/results/json_output_formatter_unittest.py |
@@ -3,14 +3,18 @@ |
# found in the LICENSE file. |
import json |
import os |
+import shutil |
import StringIO |
import unittest |
+import tempfile |
from telemetry import benchmark |
from telemetry.page import page_set |
from telemetry.results import json_output_formatter |
from telemetry.results import page_test_results |
from telemetry.value import scalar |
+from telemetry.value import trace |
+from telemetry.timeline import tracing_timeline_data |
def _MakePageSet(): |
@@ -28,10 +32,15 @@ def _HasValueNamed(values, name): |
class JsonOutputFormatterTest(unittest.TestCase): |
def setUp(self): |
self._output = StringIO.StringIO() |
+ self._ouput_dir = tempfile.mkdtemp() |
self._page_set = _MakePageSet() |
- self._formatter = json_output_formatter.JsonOutputFormatter(self._output, |
+ self._formatter = json_output_formatter.JsonOutputFormatter( |
+ self._output, self._ouput_dir, |
benchmark.BenchmarkMetadata('benchmark_name')) |
+ def tearDown(self): |
+ shutil.rmtree(self._ouput_dir) |
+ |
def testOutputAndParse(self): |
results = page_test_results.PageTestResults() |
@@ -48,7 +57,7 @@ class JsonOutputFormatterTest(unittest.TestCase): |
def testAsDictBaseKeys(self): |
results = page_test_results.PageTestResults() |
d = json_output_formatter.ResultsAsDict(results, |
- self._formatter.benchmark_metadata) |
+ self._formatter.benchmark_metadata, self._ouput_dir) |
self.assertEquals(d['format_version'], '0.2') |
self.assertEquals(d['benchmark_name'], 'benchmark_name') |
@@ -61,11 +70,31 @@ class JsonOutputFormatterTest(unittest.TestCase): |
results.DidRunPage(self._page_set[0]) |
d = json_output_formatter.ResultsAsDict(results, |
- self._formatter.benchmark_metadata) |
+ self._formatter.benchmark_metadata, self._ouput_dir) |
self.assertTrue(_HasPage(d['pages'], self._page_set[0])) |
self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo')) |
+ def testAsDictWithTraceValue(self): |
+ results = page_test_results.PageTestResults() |
+ results.WillRunPage(self._page_set[0]) |
+ v0 = trace.TraceValue( |
+ results.current_page, |
+ tracing_timeline_data.TracingTimelineData({'event': 'test'})) |
+ results.AddValue(v0) |
+ results.DidRunPage(self._page_set[0]) |
+ |
+ d = json_output_formatter.ResultsAsDict(results, |
+ self._formatter.benchmark_metadata, self._ouput_dir) |
+ |
+ self.assertTrue(_HasPage(d['pages'], self._page_set[0])) |
+ self.assertTrue(_HasValueNamed(d['per_page_values'], 'trace')) |
+ self.assertEquals(len(d['files']), 1) |
+ output_trace_path = d['files'].values()[0] |
+ self.assertTrue(output_trace_path.startswith(self._ouput_dir)) |
+ self.assertTrue(os.path.exists(output_trace_path)) |
+ |
+ |
def testAsDictWithTwoPages(self): |
results = page_test_results.PageTestResults() |
results.WillRunPage(self._page_set[0]) |
@@ -79,7 +108,7 @@ class JsonOutputFormatterTest(unittest.TestCase): |
results.DidRunPage(self._page_set[1]) |
d = json_output_formatter.ResultsAsDict(results, |
- self._formatter.benchmark_metadata) |
+ self._formatter.benchmark_metadata, self._ouput_dir) |
self.assertTrue(_HasPage(d['pages'], self._page_set[0])) |
self.assertTrue(_HasPage(d['pages'], self._page_set[1])) |
@@ -92,7 +121,7 @@ class JsonOutputFormatterTest(unittest.TestCase): |
results.AddSummaryValue(v) |
d = json_output_formatter.ResultsAsDict(results, |
- self._formatter.benchmark_metadata) |
+ self._formatter.benchmark_metadata, self._ouput_dir) |
self.assertFalse(d['pages']) |
self.assertTrue(_HasValueNamed(d['summary_values'], 'baz')) |