Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(548)

Unified Diff: tracing/tracing/metrics/compare_samples_unittest.py

Issue 2089833002: Entry point for bisect sample comparison. (Closed) Base URL: https://github.com/catapult-project/catapult.git@mann
Patch Set: Adding tests that pass Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: tracing/tracing/metrics/compare_samples_unittest.py
diff --git a/tracing/tracing/metrics/compare_samples_unittest.py b/tracing/tracing/metrics/compare_samples_unittest.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6dd68356aad97149df0e65a86cdace9bb86b386
--- /dev/null
+++ b/tracing/tracing/metrics/compare_samples_unittest.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import random
+import tempfile
+import unittest
+
+from tracing.metrics import compare_samples
+
+class CompareSamplesUnittest(unittest.TestCase):
+ def setUp(self):
+ self._tempfiles = []
+ self._tempdir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ for tf in self._tempfiles:
+ try:
+ os.remove(tf)
+ except OSError:
+ pass
+ try:
+ os.rmdir(self._tempdir)
+ except OSError:
+ pass
+
+ def NewJsonTempfile(self, jsonable_contents):
+ _, new_json_file = tempfile.mkstemp(
+ suffix='.json',
+ dir=self._tempdir,
+ text=True)
+ self._tempfiles.append(new_json_file)
+ with open(new_json_file, 'w') as f:
+ json.dump(jsonable_contents, f)
+ return new_json_file
+
+ def MakeChart(self, metric, seed, mu, sigma, n):
+ """Creates a normally distributed pseudo-random sample. (continuous).
+
+ This function creates a deterministic pseudo-random sample and stores it in
+ chartjson format to facilitate the testing of the sample comparison logic.
+
+ Args:
+ metric (str pair): name of chart, name of the trace.
+ seed (hashable obj): to make the sequences deterministic we seed the RNG.
+ mu (float): desired mean for the sample
+ sigma (float): desired standard deviation for the sample
+ n (int): number of values to generate.
+ """
+ chart_name, trace_name = metric
+ random.seed(seed)
+ values = [random.gauss(mu, sigma) for _ in range(n)]
+ charts = {
+ 'charts': {
+ chart_name: {
+ trace_name: {
+ 'type': 'list_of_scalar_values',
+ 'values': values}
+ }
+ }
+ }
+ return self.NewJsonTempfile(charts)
+
+ def testCompareClearRegression(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
+ mu=10, sigma=1, n=5)])
+ higher_values = ','.join([self.MakeChart(metric=metric, seed='higher',
+ mu=20, sigma=2, n=5)])
+ result = json.loads(compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))[1])
+ self.assertTrue(result['result'])
+
+ def testCompareUnlikelyRegressionWithMultipleRuns(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([
+ self.MakeChart(
+ metric=metric, seed='lower%d' % i, mu=10, sigma=1, n=5)
+ for i in range(4)])
+ higher_values = ','.join([
+ self.MakeChart(
+ metric=metric, seed='higher%d' % i, mu=10.01, sigma=0.95, n=5)
+ for i in range(4)])
+ result = json.loads(compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))[1])
+ self.assertFalse(result['result'])
+
+ def testCompareInsufficientData(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
+ mu=10, sigma=1, n=5)])
+ higher_values = ','.join([self.MakeChart(metric=metric, seed='higher',
+ mu=10.40, sigma=0.95, n=5)])
+ result = json.loads(compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))[1])
+ self.assertEqual(result['result'], 'needMoreData')
+
+ def testCompareMissingFile(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
+ mu=10, sigma=1, n=5)])
+ higher_values = '/path/does/not/exist.json'
+ with self.assertRaises(RuntimeError):
+ compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))
+
+ def testCompareMissingMetric(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
+ mu=10, sigma=1, n=5)])
+ higher_values = ','.join([self.MakeChart(metric=metric, seed='higher',
+ mu=20, sigma=2, n=5)])
+ metric = ('some_chart', 'missing_trace')
+ with self.assertRaises(RuntimeError):
+ compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))
+
+ def testCompareBadChart(self):
+ metric = ('some_chart', 'some_trace')
+ lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
+ mu=10, sigma=1, n=5)])
+ higher_values = self.NewJsonTempfile(['obviously', 'not', 'a', 'chart]'])
+ with self.assertRaises(RuntimeError):
+ compare_samples.CompareSamples(
+ lower_values, higher_values, '/'.join(metric))
+
+ def testCompareValuesets(self):
+ vs = os.path.join(os.path.dirname(__file__), 'filtered.json')
+ error_code, result = compare_samples.CompareSamples(
+ vs, vs, 'load_media-memory:chrome:all_processes:reported_by_chrome:'
+ 'leveldb:effective_size_sum/load:media:9gag', method='compareValuesets')
+ result = json.loads(result)
+ self.assertEqual(0, error_code)
+ self.assertEqual(result['result'], 'needMoreData')
+ self.assertEqual(result['sample_a']['mean'], 4104)
+ self.assertEqual(result['sample_b']['mean'], 4104)
+
+ def testCompareValuesetsNumeric(self):
+ vs = os.path.join(os.path.dirname(__file__), 'filtered.json')
+ error_code, result = compare_samples.CompareSamples(
+ vs, vs, 'load_media-memory:chrome:renderer_processes:'
+ 'reported_by_chrome:tracing:effective_size/load:media:9gag',
+ method='compareValuesets')
+ result = json.loads(result)
+ self.assertEqual(0, error_code)
+ self.assertEqual(result['result'], 'needMoreData')
+ self.assertEqual(result['sample_a']['mean'], 64010768)
+ self.assertEqual(result['sample_b']['mean'], 64010768)

Powered by Google App Engine
This is Rietveld 408576698