Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(926)

Unified Diff: log_parser/perf_expectations/tests/perf_expectations_unittest.py

Issue 194032: Add more unit tests for perf expectations.... (Closed) Base URL: svn://chrome-svn.corp.google.com/chrome/trunk/tools/buildbot/scripts/master/
Patch Set: Created 11 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: log_parser/perf_expectations/tests/perf_expectations_unittest.py
===================================================================
--- log_parser/perf_expectations/tests/perf_expectations_unittest.py (revision 25340)
+++ log_parser/perf_expectations/tests/perf_expectations_unittest.py (working copy)
@@ -14,6 +14,7 @@
import sys
import os
import unittest
+import re
simplejson = None
@@ -58,24 +59,73 @@
sys.path = old_path
return True
+def LoadData():
+ perf_file = open(PERF_EXPECTATIONS, 'r')
+ try:
+ perf_data = simplejson.load(perf_file)
+ except ValueError, e:
+ perf_file.seek(0)
+ print "Error reading %s:\n%s" % (PERF_EXPECTATIONS,
+ perf_file.read()[:50]+'...')
+ raise e
+ return perf_data
+
OnTestsLoad()
PERF_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../perf_expectations.json')
-class SimplejsonUnittest(unittest.TestCase):
- def testFormat(self):
- perf_file = open(PERF_EXPECTATIONS, 'r')
- try:
- perf_data = simplejson.load(perf_file)
- except ValueError, e:
- perf_file.seek(0)
- print "Error reading %s:\n%s" % (PERF_EXPECTATIONS,
- perf_file.read()[:50]+'...')
- raise e
- print ("Successfully loaded perf_expectations: %d keys found." %
- len(perf_data))
- return
+class PerfExpectationsUnittest(unittest.TestCase):
+ def testPerfExpectations(self):
+ perf_data = LoadData()
+ # Test data is dictionary.
+ perf_data = LoadData()
+ if not isinstance(perf_data, dict):
+ raise Exception('perf expectations is not a dict')
+
+ # Test the 'load' key.
+ if not 'load' in perf_data:
+ raise Exception("perf expectations is missing a load key")
+ if not isinstance(perf_data['load'], bool):
+ raise Exception("perf expectations load key has non-bool value")
+
+ # Test all key values are dictionaries.
+ bad_keys = []
+ for key in perf_data:
+ if key == 'load':
+ continue
+ if not isinstance(perf_data[key], dict):
+ bad_keys.append(key)
+ if len(bad_keys) > 0:
+ msg = "perf expectations keys have non-dict values"
+ raise Exception("%s: %s" % (msg, bad_keys))
+
+ # Test all key values have delta and var keys.
+ for key in perf_data:
+ if key == 'load':
+ continue
+ if 'delta' not in perf_data[key] or 'var' not in perf_data[key]:
+ bad_keys.append(key)
+ if (not isinstance(perf_data[key]['delta'], int) and
+ not isinstance(perf_data[key]['delta'], float)):
+ bad_keys.append(key)
+ if (not isinstance(perf_data[key]['var'], int) and
+ not isinstance(perf_data[key]['var'], float)):
+ bad_keys.append(key)
+ if len(bad_keys) > 0:
+ msg = "perf expectations key values missing or invalid delta/var"
+ raise Exception("%s: %s" % (msg, bad_keys))
+
+ # Test all keys have the correct format.
+ for key in perf_data:
+ if key == 'load':
+ continue
+ if not re.match(r"^([\w-]+)/([\w-]+)/([\w-]+)/([\w-]+)$", key):
+ bad_keys.append(key)
+ if len(bad_keys) > 0:
+ msg = "perf expectations keys in bad format, expected a/b/c/d"
+ raise Exception("%s: %s" % (msg, bad_keys))
+
if __name__ == '__main__':
unittest.main()
Property changes on: log_parser/perf_expectations/tests/perf_expectations_unittest.py
___________________________________________________________________
Added: svn:mergeinfo
« no previous file with comments | « log_parser/perf_expectations/perf_expectations.json ('k') | log_parser/perf_expectations/tests/simplejson_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698