| Index: media/tools/layout_tests/layouttest_analyzer_helpers.py
|
| diff --git a/media/tools/layout_tests/layouttest_analyzer_helpers.py b/media/tools/layout_tests/layouttest_analyzer_helpers.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..2151e90763356949e11b7d11ed867ac49da39f25
|
| --- /dev/null
|
| +++ b/media/tools/layout_tests/layouttest_analyzer_helpers.py
|
| @@ -0,0 +1,429 @@
|
| +#!/usr/bin/python
|
| +# Copyright (c) 2011 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +
|
| +"""A module for helper functions for the layouttest analyzer."""
|
| +
|
| +import copy
|
| +from datetime import datetime
|
| +from email.mime.multipart import MIMEMultipart
|
| +from email.mime.text import MIMEText
|
| +import os
|
| +import pickle
|
| +import smtplib
|
| +import time
|
| +import urllib
|
| +
|
| +from bug import Bug
|
| +from test_expectations_history import TestExpectationsHistory
|
| +
|
| +
|
| +class AnalyzerResultMap:
|
| + """A class to deal with joined result produed by the analyzer.
|
| +
|
| + The join is done between layouttests and the test_expectations object
|
| + (based on the test expectation file). The instance variable |result_map|
|
| + contains the following keys: 'whole','skip','nonskip'. The value of 'whole'
|
| + contains information about all layouttests. The value of 'skip' contains
|
| + information about skipped layouttests where it has 'SKIP' in its entry in
|
| + the test expectation file. The value of 'nonskip' contains all information
|
| + about non skipped layout tests, which are in the test expectation file but
|
| + no skipped. The information is exactly same as the one parsed by the
|
| + analyzer.
|
| + """
|
| +
|
| + def __init__(self, test_info_map):
|
| + """Initialize the Result based on test_info_map.
|
| +
|
| + Test_info_map contains all layouttest information. The job here is to
|
| + classify them to 'whole', 'skip' or 'nonskip' based on that information.
|
| +
|
| + Args:
|
| + test_info_map: the result map of layouttests.JoinWithTestExpectation,
|
| + The key of the map is testname such as 'media/media-foo.html'.
|
| + The value of the map is a map that contains the following keys: desc
|
| + (description), te_info (test expectation information), which is
|
| + a list of test expectation information map. The key of the map is
|
| + test expectation keywords such as "SKIP".
|
| + """
|
| + self.result_map = {}
|
| + self.result_map['whole'] = {}
|
| + self.result_map['skip'] = {}
|
| + self.result_map['nonskip'] = {}
|
| + if test_info_map is not None:
|
| + for (k, v) in test_info_map.iteritems():
|
| + self.result_map['whole'][k] = v
|
| + if 'te_info' in v:
|
| + skip = False
|
| + for e in v['te_info']:
|
| + if 'SKIP' in e:
|
| + skip = True
|
| + break
|
| + if skip:
|
| + self.result_map['skip'][k] = v
|
| + else:
|
| + self.result_map['nonskip'][k] = v
|
| +
|
| + @staticmethod
|
| + def GetDiffString(diff_map_element, type_str):
|
| + """Get difference string out of diff map element
|
| +
|
| + This is used for generating email message.
|
| +
|
| + Args:
|
| + diff_map_element: the compared map generated by |CompareResultMaps()|
|
| + also, this is for each test group ('whole', 'skip', 'nonskip')
|
| +
|
| + Return:
|
| + a string including diff information.
|
| + """
|
| + diff = len(diff_map_element[0]) - len(diff_map_element[1])
|
| + if diff == 0:
|
| + return 'No Change'
|
| + color = ''
|
| + if diff > 0 and type_str != 'whole':
|
| + color = 'red'
|
| + else:
|
| + color = 'green'
|
| + str = '<font color="%s">+%d</font>' % (color, diff)
|
| + str1 = ''
|
| + for (name, v) in diff_map_element[0]:
|
| + str1 += name + ","
|
| + str1 = str1[:-1]
|
| + str2 = ''
|
| + for (name, v) in diff_map_element[1]:
|
| + str2 += name + ","
|
| + str2 = str2[:-1]
|
| + if str1 or str2:
|
| + str += ":"
|
| + if str1:
|
| + str += '<font color="%s">%s</font> ' % (color, str1)
|
| + if str2:
|
| + str += '<font color="%s">%s</font>' % (color, str2)
|
| + return str
|
| +
|
| + def ConvertToString(self, prev_time, diff_map, bug_anno_map):
|
| + """Convert this result to HTML display for email.
|
| +
|
| + Args:
|
| + prev_time: the previous time string that are compared against.
|
| + diff_map: the compared map generated by |CompareResultMaps()|.
|
| + anno_map: a annotation map where keys are bug names and values are
|
| + annotation for the bug.
|
| +
|
| + Returns:
|
| + a analyzer result string in HTML format.
|
| + """
|
| + d = len(self.result_map['whole'].keys()) - (
|
| + len(self.result_map['skip'].keys()))
|
| + # Passing rate is calculated like the followings.
|
| + passing_rate = 100 - len(self.result_map['nonskip'].keys()) * 100 / d
|
| + str = ('<b>Statistics (Diff Compared to %s):</b><ul>'
|
| + '<li>The number of tests: %d (%s)</li>'
|
| + '<li>The number of failing skipped tests: %d (%s)</li>'
|
| + '<li>The number of failing non-skipped tests: %d (%s)</li>'
|
| + '<li>Passing rate: %d %%</li></ul>') % (
|
| + prev_time,
|
| + len(self.result_map['whole'].keys()),
|
| + AnalyzerResultMap.GetDiffString(diff_map['whole'], 'whole'),
|
| + len(self.result_map['skip'].keys()),
|
| + AnalyzerResultMap.GetDiffString(diff_map['skip'], 'skip'),
|
| + len(self.result_map['nonskip'].keys()),
|
| + AnalyzerResultMap.GetDiffString(diff_map['nonskip'], 'nonskip'),
|
| + passing_rate)
|
| + str += '<b>Current issues about failing non-skipped tests:</b>'
|
| + for (bug_txt, test_info_list) in (
|
| + self.GetListOfBugsForNonSkippedTests().iteritems()):
|
| + if not bug_txt in bug_anno_map:
|
| + bug_anno_map[bug_txt] = '<font color="red">Needs investigation!</font>'
|
| + str += '<ul>%s (%s)' % (Bug(bug_txt).ToString(), bug_anno_map[bug_txt])
|
| + for test_info in test_info_list:
|
| + (test_name, te_info) = test_info
|
| + gpu_link = ''
|
| + if 'GPU' in te_info:
|
| + gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
|
| + dashboard_link = ('http://test-results.appspot.com/dashboards/'
|
| + 'flakiness_dashboard.html#%stests='
|
| + '%s' % (gpu_link, test_name))
|
| + str += '<li><a href="%s">%s</a> (%s) </li>' % (
|
| + dashboard_link, test_name, ' '.join(te_info.keys()))
|
| + str += '</ul>\n'
|
| + return str
|
| +
|
| + def CompareResultMaps(self, result_map2):
|
| + """Compare this result map with the other to see if any difference.
|
| +
|
| + The comparison is done for layouttests which belong to 'whole', 'skip',
|
| + or 'nonskip'.
|
| +
|
| + Args:
|
| + result_map2: another result map to be compared againt this result.
|
| +
|
| + Returns:
|
| + a comp_result_map, which contains 'whole', 'skip' and 'nonskip' as keys.
|
| + The values are the result of |GetDiffBetweenMaps()| that have two
|
| + tuples, where one is for a list of current tests diff and the other
|
| + one is for a list of previous test diff.
|
| + For example (test expectaion information is omitted for simplicity),
|
| + comp_result_map['whole'][0] = ['foo1.html']
|
| + comp_result_map['whole'][1] = ['foo2.html']
|
| + This means that current result has 'foo1.html' but not in previous
|
| + result. This also means the previous result has 'foo2.html' but not
|
| + current one.
|
| + """
|
| + comp_result_map = {}
|
| + for name in ['whole', 'skip', 'nonskip']:
|
| + if name == 'nonskip':
|
| + # Look into expectation to get diff only for non-skipped tests.
|
| + lookintoTestExpectaionInfo = True
|
| + else:
|
| + # Otherwise, only test names are compared to get diff.
|
| + lookintoTestExpectaionInfo = False
|
| + comp_result_map[name] = GetDiffBetweenMaps(
|
| + self.result_map[name], result_map2.result_map[name],
|
| + lookintoTestExpectaionInfo)
|
| + return comp_result_map
|
| +
|
| + @staticmethod
|
| + def Load(file_path):
|
| + """Load the object from |file_path| using pickle library.
|
| +
|
| + Args:
|
| + file_path: the file path to be read the result from.
|
| +
|
| + Returns:
|
| + a AnalyzerResultMap object read from |file_path|.
|
| + """
|
| + file_object = open(file_path)
|
| + analyzer_result_map = pickle.load(file_object)
|
| + file_object.close()
|
| + return analyzer_result_map
|
| +
|
| + def Save(self, file_path):
|
| + """Save the object to |file_path| using pickle library.
|
| +
|
| + Args:
|
| + file_path: the file path to be read the result from.
|
| + """
|
| + file_object = open(file_path, "wb")
|
| + pickle.dump(self, file_object)
|
| + file_object.close()
|
| +
|
| + def GetListOfBugsForNonSkippedTests(self):
|
| + """Get a lit of bugs for non-skipped layout tessts.
|
| +
|
| + This is used for generating email content.
|
| + """
|
| + bug_map = {}
|
| + for (name, v) in self.result_map['nonskip'].iteritems():
|
| + for te_info in v['te_info']:
|
| + main_te_info = {}
|
| + for k in te_info.keys():
|
| + if k != 'Comments' and k != 'Bugs':
|
| + main_te_info[k] = True
|
| + for bug in te_info['Bugs']:
|
| + if bug not in bug_map:
|
| + bug_map[bug] = []
|
| + bug_map[bug].append((name, main_te_info))
|
| + return bug_map
|
| +
|
| +
|
| +def SendStatusEmail(prev_time, analyzer_result_map, prev_analyzer_result_map,
|
| + bug_anno_map, receiver_email_address):
|
| + """Send status email.
|
| +
|
| + Args:
|
| + prev_time: the date string such as '2011-10-09-11'. This format has been
|
| + used in this analyzer.
|
| + analyzer_result_map: current analyzer result.
|
| + prev_analyzer_result_map: previous analyzer reusult, which is read from
|
| + a file.
|
| + bug_anno_map: bug annotation map where bug name and annotations are
|
| + stored.
|
| + receiver_email_address: reciever's email address.
|
| + """
|
| + diff_map = analyzer_result_map.CompareResultMaps(prev_analyzer_result_map)
|
| + str = analyzer_result_map.ConvertToString(prev_time, diff_map, bug_anno_map)
|
| + # Add diff info about skipped/non-skipped test.
|
| + prev_time = datetime.strptime(prev_time, "%Y-%m-%d-%H")
|
| + prev_time = time.mktime(prev_time.timetuple())
|
| + testname_map = {}
|
| + for (k, v) in diff_map['skip'][0]:
|
| + testname_map[k] = True
|
| + for (k, v) in diff_map['skip'][1]:
|
| + testname_map[k] = True
|
| + for (k, v) in diff_map['nonskip'][0]:
|
| + testname_map[k] = True
|
| + for (k, v) in diff_map['nonskip'][1]:
|
| + testname_map[k] = True
|
| + now = time.time()
|
| +
|
| + rev_infos = TestExpectationsHistory.GetDiffBetweenTimes(now, prev_time,
|
| + testname_map.keys())
|
| + if len(rev_infos) > 0:
|
| + str += '<br><b>Revision Information:</b>'
|
| + for rev_info in rev_infos:
|
| + (old_rev, new_rev, author, date, message, target_lines) = rev_info
|
| + l = urllib.unquote('http://trac.webkit.org/changeset?new=%d%40trunk%2F'
|
| + 'LayoutTests%2Fplatform%2Fchromium%2F'
|
| + 'test_expectations.txt&old=%d%40trunk%2FLayoutTests%2F'
|
| + 'platform%2Fchromium%2Ftest_expectations.txt')
|
| + link = l % (new_rev, old_rev)
|
| + str += '<ul><a href="%s">%s->%s</a>\n' % (link, old_rev, new_rev)
|
| + str += '<li>%s</li>\n' % author
|
| + str += '<li>%s</li>\n<ul>' % date
|
| + for line in target_lines:
|
| + str += '<li>%s</li>\n' % line
|
| + str += '</ul></ul>'
|
| + localtime = time.asctime(time.localtime(time.time()))
|
| + # TODO(imasaki): remove my name from here.
|
| + SendEmail('imasaki@chromium.org', 'Kenji Imasaki',
|
| + [receiver_email_address], ['Layout Test Analyzer Result'],
|
| + 'Layout Test Analyzer Result : ' + localtime, str)
|
| +
|
| +
|
| +def SendEmail(sender_email_address, sender_name, receivers_email_addresses,
|
| + receivers_names, subject, message):
|
| + """Send email using localhost's mail server.
|
| +
|
| + Args:
|
| + sender_email_address: sender's email address.
|
| + sender_name: sender's name.
|
| + receivers_email_addresses: receiver's email addresses.
|
| + receivers_names: receiver's names.
|
| + subject: subject string.
|
| + message: email message.
|
| + """
|
| + whole_message = ''.join([
|
| + 'From: %s<%s>\n' % (sender_name, sender_email_address),
|
| + 'To: %s<%s>\n' % (receivers_names[0],
|
| + receivers_email_addresses[0]),
|
| + 'Subject: %s\n' % subject, message])
|
| +
|
| + try:
|
| + html_top = """
|
| + <html>
|
| + <head></head>
|
| + <body>
|
| + """
|
| + html_bot = """
|
| + </body>
|
| + </html>
|
| + """
|
| + html = html_top + message + html_bot
|
| + msg = MIMEMultipart('alternative')
|
| + msg['Subject'] = subject
|
| + msg['From'] = sender_email_address
|
| + msg['To'] = receivers_email_addresses[0]
|
| + part1 = MIMEText(html, 'html')
|
| + smtpObj = smtplib.SMTP('localhost')
|
| + msg.attach(part1)
|
| + smtpObj.sendmail(sender_email_address,
|
| + receivers_email_addresses,
|
| + msg.as_string())
|
| + print 'Successfully sent email'
|
| + except smtplib.SMTPException:
|
| + print 'Error: unable to send email'
|
| +
|
| +
|
| +def FindLatestTime(time_list):
|
| + """Find latest time from |time_list|.
|
| +
|
| + The current status is compared to the status of the latest file in
|
| + |RESULT_DIR|.
|
| +
|
| + Args:
|
| + time_list: a list of time string in the form of '2011-10-23-23'
|
| +
|
| + Returns:
|
| + a string representing latest time among the time_list.
|
| + """
|
| + latest_date = None
|
| + for t in time_list:
|
| + item_date = datetime.strptime(t, "%Y-%m-%d-%H")
|
| + if latest_date == None or latest_date < item_date:
|
| + latest_date = item_date
|
| + return latest_date.strftime("%Y-%m-%d-%H")
|
| +
|
| +
|
| +def FindLatestResult(result_dir):
|
| + """Find the latest result in |result_dir| and read and return them.
|
| +
|
| + This is used for comparison of analyzer result between current analyzer
|
| + and most known latest result.
|
| +
|
| + Args:
|
| + result_dir: the result directory.
|
| +
|
| + Returns:
|
| + a tuple of filename (latest_time) of the and the latest analyzer result.
|
| + """
|
| + dirList = os.listdir(result_dir)
|
| + file_name = FindLatestTime(dirList)
|
| + file_path = os.path.join(result_dir, file_name)
|
| + return (file_name, AnalyzerResultMap.Load(file_path))
|
| +
|
| +
|
| +def GetTestExpectationDiffBetweenLists(list1, list2):
|
| + """Get test expectation diff between lists.
|
| +
|
| + Args:
|
| + list1: a list of test expectation information.
|
| + list2: a list of test expectation information.
|
| +
|
| + Returns:
|
| + a list of the difference between test expectation information.
|
| + """
|
| + result_list = []
|
| + for l1 in list1:
|
| + found = False
|
| + for l2 in list2:
|
| + if l1 == l2:
|
| + found = True
|
| + break
|
| + if not found:
|
| + result_list.append(l1)
|
| + return result_list
|
| +
|
| +
|
| +def GetDiffBetweenMaps(map1, map2, lookintoTestExpectaionInfo=False):
|
| + """Get difference between maps.
|
| +
|
| + Args:
|
| + map1: analyzer result map to be compared.
|
| + map2: analyzer result map to be compared.
|
| + lookintoTestExpectaionInfo: a boolean to indicate whether you compare
|
| + test expetation information as well as testnames.
|
| +
|
| + Returns:
|
| + a tuple of |name1_list| and |name2_list|. |Name1_list| contains all test
|
| + name and the test expectation information in |map1| but not in |map2|.
|
| + |Name2_list| contains all test name and the test expectation
|
| + information in |map2| but not in |map1|.
|
| + """
|
| + name1_list = []
|
| + # Compare map1 with map2.
|
| + for (name, v1) in map1.iteritems():
|
| + if name in map2:
|
| + if lookintoTestExpectaionInfo and 'te_info' in v1:
|
| + te_diff = GetTestExpectationDiffBetweenLists(v1['te_info'],
|
| + map2[name]['te_info'])
|
| + if te_diff:
|
| + name1_list.append((name, te_diff))
|
| + else:
|
| + name1_list.append((name, v1))
|
| +
|
| + name2_list = []
|
| + # Compare map1 with map2.
|
| + for (name, v2) in map2.iteritems():
|
| + if name in map1:
|
| + if lookintoTestExpectaionInfo and 'te_info' in v2:
|
| + # Look into te_info.
|
| + te_diff = GetTestExpectationDiffBetweenLists(v2['te_info'],
|
| + map1[name]['te_info'])
|
| + if te_diff:
|
| + name2_list.append((name, te_diff))
|
| + else:
|
| + name2_list.append((name, v2))
|
| + return (name1_list, name2_list)
|
|
|