OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | 2 # Copyright 2014 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Rebase DumpAccessibilityTree Tests. | 6 """Rebase DumpAccessibilityTree Tests. |
7 | 7 |
8 This script is intended to be run when you make a change that could affect the | 8 This script is intended to be run when you make a change that could affect the |
9 expected results of tests in: | 9 expected results of tests in: |
10 | 10 |
11 content/test/data/accessibility | 11 content/test/data/accessibility |
12 | 12 |
13 It assumes that you've already uploaded a change and the try jobs have finished. | 13 It assumes that you've already uploaded a change and the try jobs have finished. |
14 It collects all of the results from try jobs on all platforms and updates the | 14 It collects all of the results from try jobs on all platforms and updates the |
15 expectation files locally. From there you can run 'git diff' to make sure all | 15 expectation files locally. From there you can run 'git diff' to make sure all |
16 of the changes look reasonable, then upload the change for code review. | 16 of the changes look reasonable, then upload the change for code review. |
17 """ | 17 """ |
18 | 18 |
19 import json | |
20 import os | 19 import os |
21 import re | 20 import re |
22 import sys | 21 import sys |
23 import time | 22 import time |
24 import urllib | 23 import urllib |
25 | 24 |
26 # Load BeautifulSoup. It's checked into two places in the Chromium tree. | 25 # Load BeautifulSoup. It's checked into two places in the Chromium tree. |
27 sys.path.append( | 26 sys.path.append( |
28 'third_party/trace-viewer/third_party/tvcm/third_party/beautifulsoup') | 27 'third_party/trace-viewer/third_party/tvcm/third_party/beautifulsoup') |
29 from BeautifulSoup import BeautifulSoup | 28 from BeautifulSoup import BeautifulSoup |
30 | 29 |
31 # The location of the DumpAccessibilityTree html test files and expectations. | 30 # The location of the DumpAccessibilityTree html test files and expectations. |
32 TEST_DATA_PATH = os.path.join(os.getcwd(), 'content/test/data/accessibility') | 31 TEST_DATA_PATH = os.path.join(os.getcwd(), 'content/test/data/accessibility') |
33 | 32 |
34 # A global that keeps track of files we've already updated, so we don't | 33 # A global that keeps track of files we've already updated, so we don't |
35 # bother to update the same file twice. | 34 # bother to update the same file twice. |
36 completed_files = set() | 35 completed_files = set() |
37 | 36 |
38 def GitClIssue(): | 37 def GitClIssue(): |
39 '''Retrieve the current issue number as a string.''' | 38 '''Retrieve the current issue number as a string.''' |
40 result = os.popen('git cl issue').read() | 39 result = os.popen('git cl issue').read() |
41 # Returns string like: 'Issue number: 12345 (https://...)' | 40 # Returns string like: 'Issue number: 12345 (https://...)' |
42 return result.split()[2] | 41 return result.split()[2] |
43 | 42 |
44 def ParseFailure(name, url): | 43 def ParseFailure(name, url): |
45 '''Parse given the name of a failing trybot and the url of its build log.''' | 44 '''Parse given the name of a failing trybot and the url of its build log.''' |
46 print | |
47 print "Checking trybot: %s" % name | |
48 url = url.replace('/builders/', '/json/builders/') | |
49 response = urllib.urlopen(url) | |
50 if response.getcode() == 200: | |
51 jsondata = response.read() | |
52 | 45 |
53 if not jsondata: | 46 # Figure out the platform. |
54 print "Failed to fetch from: " + url | 47 if name.find('android') >= 0: |
| 48 platform_suffix = '-expected-android.txt' |
| 49 elif name.find('mac') >= 0: |
| 50 platform_suffix = '-expected-mac.txt' |
| 51 elif name.find('win') >= 0: |
| 52 platform_suffix = '-expected-win.txt' |
| 53 else: |
55 return | 54 return |
56 | 55 |
57 try: | 56 # Read the content_browsertests log file. |
58 data = json.loads(jsondata) | 57 data = None |
59 except: | 58 lines = None |
60 print "Failed to parse JSON from: " + url | 59 urls = [] |
| 60 for url_suffix in [ |
| 61 '/steps/content_browsertests%20(with%20patch)/logs/stdio/text', |
| 62 '/steps/content_browsertests/logs/stdio/text']: |
| 63 urls.append(url + url_suffix) |
| 64 for url in urls: |
| 65 response = urllib.urlopen(url) |
| 66 if response.getcode() == 200: |
| 67 data = response.read() |
| 68 lines = data.splitlines() |
| 69 break |
| 70 |
| 71 if not data: |
61 return | 72 return |
62 | 73 |
63 for step in data["steps"]: | 74 # Parse the log file for failing tests and overwrite the expected |
64 name = step["name"] | 75 # result file locally with the actual results from the log. |
65 if name[:len("content_browsertests")] == "content_browsertests": | 76 test_name = None |
66 if name.find("without") >= 0: | |
67 continue | |
68 if name.find("retry") >= 0: | |
69 continue | |
70 print "Found content_browsertests logs" | |
71 for log in step["logs"]: | |
72 (log_name, log_url) = log | |
73 if log_name == "stdio": | |
74 continue | |
75 log_url += '/text' | |
76 log_response = urllib.urlopen(log_url) | |
77 if log_response.getcode() == 200: | |
78 logdata = log_response.read() | |
79 ParseLog(logdata) | |
80 else: | |
81 print "Failed to fetch test log data from: " + url | |
82 | |
83 def Fix(line): | |
84 if line[:3] == '@@@': | |
85 try: | |
86 line = re.search('[^@]@([^@]*)@@@', line).group(1) | |
87 except: | |
88 pass | |
89 return line | |
90 | |
91 def ParseLog(logdata): | |
92 '''Parse the log file for failing tests and overwrite the expected | |
93 result file locally with the actual results from the log.''' | |
94 lines = logdata.splitlines() | |
95 test_file = None | |
96 expected_file = None | |
97 start = None | 77 start = None |
| 78 filename = None |
98 for i in range(len(lines)): | 79 for i in range(len(lines)): |
99 line = Fix(lines[i]) | 80 line = lines[i] |
100 if line.find('Testing:') >= 0: | 81 if line[:12] == '[ RUN ]': |
101 test_file = re.search( | 82 test_name = line[13:] |
102 'content.test.*accessibility.([^@]*)', line).group(1) | 83 if test_name and line[:8] == 'Testing:': |
103 expected_file = None | 84 filename = re.search('content.test.*accessibility.(.*)', line).group(1) |
104 start = None | 85 if test_name and line == 'Actual': |
105 if line.find('Expected output:') >= 0: | |
106 expected_file = re.search( | |
107 'content.test.*accessibility.([^@]*)', line).group(1) | |
108 if line == 'Actual': | |
109 start = i + 2 | 86 start = i + 2 |
110 if start and test_file and expected_file and line.find('End-of-file') >= 0: | 87 if start and test_name and filename and line[:12] == '[ FAILED ]': |
111 dst_fullpath = os.path.join(TEST_DATA_PATH, expected_file) | 88 # Get the path to the html file. |
| 89 dst_fullpath = os.path.join(TEST_DATA_PATH, filename) |
| 90 # Strip off .html and replace it with the platform expected suffix. |
| 91 dst_fullpath = dst_fullpath[:-5] + platform_suffix |
112 if dst_fullpath in completed_files: | 92 if dst_fullpath in completed_files: |
113 continue | 93 continue |
114 | 94 |
115 actual = [Fix(line) for line in lines[start : i] if line] | 95 actual = [line for line in lines[start : i - 1] if line] |
116 fp = open(dst_fullpath, 'w') | 96 fp = open(dst_fullpath, 'w') |
117 fp.write('\n'.join(actual)) | 97 fp.write('\n'.join(actual)) |
118 fp.close() | 98 fp.close() |
119 print "* %s" % os.path.relpath(dst_fullpath) | 99 print dst_fullpath |
120 completed_files.add(dst_fullpath) | 100 completed_files.add(dst_fullpath) |
121 start = None | 101 start = None |
122 test_file = None | 102 test_name = None |
123 expected_file = None | 103 filename = None |
124 | 104 |
125 def ParseTrybots(data): | 105 def ParseTrybots(data): |
126 '''Parse the code review page to find links to try bots.''' | 106 '''Parse the code review page to find links to try bots.''' |
127 soup = BeautifulSoup(data) | 107 soup = BeautifulSoup(data) |
128 failures = soup.findAll( | 108 failures = soup.findAll( |
129 'a', | 109 'a', |
130 { "class" : "build-result build-status-color-failure" }) | 110 { "class" : "build-result build-status-color-failure" }) |
131 print 'Found %d trybots that failed' % len(failures) | 111 print 'Found %d trybots that failed' % len(failures) |
132 for f in failures: | 112 for f in failures: |
133 name = f.text.replace(' ', '') | 113 name = f.text.replace(' ', '') |
134 url = f['href'] | 114 url = f['href'] |
135 ParseFailure(name, url) | 115 ParseFailure(name, url) |
136 | 116 |
137 def Run(): | 117 def Run(): |
138 '''Main. Get the issue number and parse the code review page.''' | 118 '''Main. Get the issue number and parse the code review page.''' |
139 if len(sys.argv) == 2: | 119 if len(sys.argv) == 2: |
140 issue = sys.argv[1] | 120 issue = sys.argv[1] |
141 else: | 121 else: |
142 issue = GitClIssue() | 122 issue = GitClIssue() |
143 | 123 |
144 url = 'https://codereview.chromium.org/%s' % issue | 124 url = 'https://codereview.chromium.org/%s' % issue |
145 print 'Fetching issue from %s' % url | 125 print 'Fetching issue from %s' % url |
146 response = urllib.urlopen(url) | 126 response = urllib.urlopen(url) |
147 if response.getcode() != 200: | 127 if response.getcode() != 200: |
148 print 'Error code %d accessing url: %s' % (response.getcode(), url) | 128 print 'Error code %d accessing url: %s' % (response.getcode(), url) |
149 return | |
150 data = response.read() | 129 data = response.read() |
151 ParseTrybots(data) | 130 ParseTrybots(data) |
152 | 131 |
153 print | |
154 if len(completed_files) == 0: | |
155 print "No output from DumpAccessibilityTree test results found." | |
156 return | |
157 else: | |
158 print "Summary: modified the following files:" | |
159 all_files = list(completed_files) | |
160 all_files.sort() | |
161 for f in all_files: | |
162 print "* %s" % os.path.relpath(f) | |
163 | |
164 if __name__ == '__main__': | 132 if __name__ == '__main__': |
165 sys.exit(Run()) | 133 sys.exit(Run()) |
OLD | NEW |