OLD | NEW |
| (Empty) |
1 # Copyright 2012 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 """Presubmit script for Chromium WebUI resources. | |
6 | |
7 See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts | |
8 for more details about the presubmit API built into depot_tools, and see | |
9 https://chromium.googlesource.com/chromium/src/+/master/styleguide/web/web.md | |
10 for the rules we're checking against here. | |
11 """ | |
12 | |
13 # TODO(dbeam): Real CSS parser? https://github.com/danbeam/css-py/tree/css3 | |
14 | |
15 class CSSChecker(object): | |
16 def __init__(self, input_api, output_api, file_filter=None): | |
17 self.input_api = input_api | |
18 self.output_api = output_api | |
19 self.file_filter = file_filter | |
20 | |
21 def RunChecks(self): | |
22 # We use this a lot, so make a nick name variable. | |
23 re = self.input_api.re | |
24 | |
25 def _collapseable_hex(s): | |
26 return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5]) | |
27 | |
28 def _is_gray(s): | |
29 return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6] | |
30 | |
31 def _remove_all(s): | |
32 s = _remove_grit(s) # Must be done first. | |
33 s = _remove_ats(s) | |
34 s = _remove_comments(s) | |
35 s = _remove_mixins_and_valid_vars(s) | |
36 s = _remove_template_expressions(s) | |
37 return s | |
38 | |
39 def _extract_inline_style(s): | |
40 return '\n'.join(re.findall(r'<style\b[^>]*>([^<]*)<\/style>', s)) | |
41 | |
42 def _remove_ats(s): | |
43 return re.sub(r""" | |
44 @(?!apply)(?!\d+x\b) # @at-keyword, not (apply|2x) | |
45 \w+[^'"]*?{ # selector junk { | |
46 (.*{.*?})+ # inner { curly } blocks, rules, and selector | |
47 .*?} # stuff up to the first end curly } | |
48 """, r'\1', s, flags=re.DOTALL | re.VERBOSE) | |
49 | |
50 def _remove_comments(s): | |
51 return re.sub(r'/\*.*?\*/', '', s, flags=re.DOTALL) | |
52 | |
53 def _remove_grit(s): | |
54 return re.sub(r""" | |
55 <if[^>]+>.*?<\s*/\s*if[^>]*>| # <if> contents </if> | |
56 <include[^>]+> # <include> | |
57 """, '', s, flags=re.DOTALL | re.VERBOSE) | |
58 | |
59 mixin_shim_reg = r'[\w-]+_-_[\w-]+' | |
60 | |
61 def _remove_mixins_and_valid_vars(s): | |
62 valid_vars = r'--(?!' + mixin_shim_reg + r')[\w-]+:\s*' | |
63 mixin_or_value = r'({.*?}|[^;}]+);?\s*' | |
64 return re.sub(valid_vars + mixin_or_value, '', s, flags=re.DOTALL) | |
65 | |
66 def _remove_template_expressions(s): | |
67 return re.sub(r'\$i18n(Raw)?{[^}]*}', '', s, flags=re.DOTALL) | |
68 | |
69 def _rgb_from_hex(s): | |
70 if len(s) == 3: | |
71 r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2] | |
72 else: | |
73 r, g, b = s[0:2], s[2:4], s[4:6] | |
74 return int(r, base=16), int(g, base=16), int(b, base=16) | |
75 | |
76 def _strip_prefix(s): | |
77 return re.sub(r'^-(?:o|ms|moz|khtml|webkit)-', '', s) | |
78 | |
79 def alphabetize_props(contents): | |
80 errors = [] | |
81 # TODO(dbeam): make this smart enough to detect issues in mixins. | |
82 for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL): | |
83 semis = map(lambda t: t.strip(), rule.group(1).split(';'))[:-1] | |
84 rules = filter(lambda r: ': ' in r, semis) | |
85 props = map(lambda r: r[0:r.find(':')], rules) | |
86 if props != sorted(props): | |
87 errors.append(' %s;\n' % (';\n '.join(rules))) | |
88 return errors | |
89 | |
90 def braces_have_space_before_and_nothing_after(line): | |
91 brace_space_reg = re.compile(r""" | |
92 (?:^|\S){| # selector{ or selector\n{ or | |
93 {\s*\S+\s* # selector { with stuff after it | |
94 $ # must be at the end of a line | |
95 """, | |
96 re.VERBOSE) | |
97 return brace_space_reg.search(line) | |
98 | |
99 def classes_use_dashes(line): | |
100 # Intentionally dumbed down version of CSS 2.1 grammar for class without | |
101 # non-ASCII, escape chars, or whitespace. | |
102 class_reg = re.compile(r""" | |
103 \.(-?[\w-]+).* # ., then maybe -, then alpha numeric and - | |
104 [,{]\s*$ # selectors should end with a , or { | |
105 """, | |
106 re.VERBOSE) | |
107 m = class_reg.search(line) | |
108 if not m: | |
109 return False | |
110 class_name = m.group(1) | |
111 return class_name.lower() != class_name or '_' in class_name | |
112 | |
113 end_mixin_reg = re.compile(r'\s*};\s*$') | |
114 | |
115 def close_brace_on_new_line(line): | |
116 # Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; } | |
117 frame_reg = re.compile(r""" | |
118 \s*(from|to|\d+%)\s*{ # 50% { | |
119 \s*[\w-]+: # rule: | |
120 (\s*[\w\(\), -]+)+\s*; # value; | |
121 \s*}\s* # } | |
122 """, | |
123 re.VERBOSE) | |
124 return ('}' in line and re.search(r'[^ }]', line) and | |
125 not frame_reg.match(line) and not end_mixin_reg.match(line)) | |
126 | |
127 def colons_have_space_after(line): | |
128 colon_space_reg = re.compile(r""" | |
129 (?<!data) # ignore data URIs | |
130 :(?!//) # ignore url(http://), etc. | |
131 \S[^;]+;\s* # only catch one-line rules for now | |
132 """, | |
133 re.VERBOSE) | |
134 return colon_space_reg.search(line) | |
135 | |
136 def favor_single_quotes(line): | |
137 return '"' in line | |
138 | |
139 # Shared between hex_could_be_shorter and rgb_if_not_gray. | |
140 hex_reg = re.compile(r""" | |
141 \#([a-fA-F0-9]{3}|[a-fA-F0-9]{6}) # pound followed by 3 or 6 hex digits | |
142 (?=[^\w-]|$) # no more alphanum chars or at EOL | |
143 (?!.*(?:{.*|,\s*)$) # not in a selector | |
144 """, | |
145 re.VERBOSE) | |
146 | |
147 def hex_could_be_shorter(line): | |
148 m = hex_reg.search(line) | |
149 return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1))) | |
150 | |
151 def rgb_if_not_gray(line): | |
152 m = hex_reg.search(line) | |
153 return (m and not _is_gray(m.group(1))) | |
154 | |
155 small_seconds_reg = re.compile(r""" | |
156 (?:^|[^\w-]) # start of a line or a non-alphanumeric char | |
157 (0?\.[0-9]+)s # 1.0s | |
158 (?!-?[\w-]) # no following - or alphanumeric chars | |
159 """, | |
160 re.VERBOSE) | |
161 | |
162 def milliseconds_for_small_times(line): | |
163 return small_seconds_reg.search(line) | |
164 | |
165 def suggest_ms_from_s(line): | |
166 ms = int(float(small_seconds_reg.search(line).group(1)) * 1000) | |
167 return ' (replace with %dms)' % ms | |
168 | |
169 def no_data_uris_in_source_files(line): | |
170 return re.search(r'\(\s*\s*data:', line) | |
171 | |
172 def no_mixin_shims(line): | |
173 return re.search(r'--' + mixin_shim_reg + r'\s*:', line) | |
174 | |
175 def no_quotes_in_url(line): | |
176 return re.search('url\s*\(\s*["\']', line, re.IGNORECASE) | |
177 | |
178 def one_rule_per_line(line): | |
179 one_rule_reg = re.compile(r""" | |
180 [\w-](?<!data): # a rule: but no data URIs | |
181 (?!//)[^;]+; # value; ignoring colons in protocols:// and }; | |
182 \s*[^ }]\s* # any non-space after the end colon | |
183 """, | |
184 re.VERBOSE) | |
185 return one_rule_reg.search(line) and not end_mixin_reg.match(line) | |
186 | |
187 def pseudo_elements_double_colon(contents): | |
188 pseudo_elements = ['after', | |
189 'before', | |
190 'calendar-picker-indicator', | |
191 'color-swatch', | |
192 'color-swatch-wrapper', | |
193 'date-and-time-container', | |
194 'date-and-time-value', | |
195 'datetime-edit', | |
196 'datetime-edit-ampm-field', | |
197 'datetime-edit-day-field', | |
198 'datetime-edit-hour-field', | |
199 'datetime-edit-millisecond-field', | |
200 'datetime-edit-minute-field', | |
201 'datetime-edit-month-field', | |
202 'datetime-edit-second-field', | |
203 'datetime-edit-text', | |
204 'datetime-edit-week-field', | |
205 'datetime-edit-year-field', | |
206 'details-marker', | |
207 'file-upload-button', | |
208 'first-letter', | |
209 'first-line', | |
210 'inner-spin-button', | |
211 'input-placeholder', | |
212 'input-speech-button', | |
213 'media-slider-container', | |
214 'media-slider-thumb', | |
215 'meter-bar', | |
216 'meter-even-less-good-value', | |
217 'meter-inner-element', | |
218 'meter-optimum-value', | |
219 'meter-suboptimum-value', | |
220 'progress-bar', | |
221 'progress-inner-element', | |
222 'progress-value', | |
223 'resizer', | |
224 'scrollbar', | |
225 'scrollbar-button', | |
226 'scrollbar-corner', | |
227 'scrollbar-thumb', | |
228 'scrollbar-track', | |
229 'scrollbar-track-piece', | |
230 'search-cancel-button', | |
231 'search-decoration', | |
232 'search-results-button', | |
233 'search-results-decoration', | |
234 'selection', | |
235 'slider-container', | |
236 'slider-runnable-track', | |
237 'slider-thumb', | |
238 'textfield-decoration-container', | |
239 'validation-bubble', | |
240 'validation-bubble-arrow', | |
241 'validation-bubble-arrow-clipper', | |
242 'validation-bubble-heading', | |
243 'validation-bubble-message', | |
244 'validation-bubble-text-block'] | |
245 pseudo_reg = re.compile(r""" | |
246 (?<!:): # a single colon, i.e. :after but not ::after | |
247 ([a-zA-Z-]+) # a pseudo element, class, or function | |
248 (?=[^{}]+?{) # make sure a selector, not inside { rules } | |
249 """, | |
250 re.MULTILINE | re.VERBOSE) | |
251 errors = [] | |
252 for p in re.finditer(pseudo_reg, contents): | |
253 pseudo = p.group(1).strip().splitlines()[0] | |
254 if _strip_prefix(pseudo.lower()) in pseudo_elements: | |
255 errors.append(' :%s (should be ::%s)' % (pseudo, pseudo)) | |
256 return errors | |
257 | |
258 def one_selector_per_line(contents): | |
259 any_reg = re.compile(r""" | |
260 :(?:-webkit-)?any\(.*?\) # :-webkit-any(a, b, i) selector | |
261 """, | |
262 re.DOTALL | re.VERBOSE) | |
263 multi_sels_reg = re.compile(r""" | |
264 (?:}\s*)? # ignore 0% { blah: blah; }, from @keyframes | |
265 ([^,]+,(?=[^{}]+?{) # selector junk {, not in a { rule } | |
266 .*[,{])\s*$ # has to end with , or { | |
267 """, | |
268 re.MULTILINE | re.VERBOSE) | |
269 errors = [] | |
270 for b in re.finditer(multi_sels_reg, re.sub(any_reg, '', contents)): | |
271 errors.append(' ' + b.group(1).strip().splitlines()[-1:][0]) | |
272 return errors | |
273 | |
274 def suggest_rgb_from_hex(line): | |
275 suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1)) | |
276 for h in re.finditer(hex_reg, line)] | |
277 return ' (replace with %s)' % ', '.join(suggestions) | |
278 | |
279 def suggest_short_hex(line): | |
280 h = hex_reg.search(line).group(1) | |
281 return ' (replace with #%s)' % (h[0] + h[2] + h[4]) | |
282 | |
283 webkit_before_or_after_reg = re.compile(r'-webkit-(\w+-)(after|before):') | |
284 | |
285 def suggest_top_or_bottom(line): | |
286 prop, pos = webkit_before_or_after_reg.search(line).groups() | |
287 top_or_bottom = 'top' if pos == 'before' else 'bottom' | |
288 return ' (replace with %s)' % (prop + top_or_bottom) | |
289 | |
290 def webkit_before_or_after(line): | |
291 return webkit_before_or_after_reg.search(line) | |
292 | |
293 def zero_width_lengths(contents): | |
294 hsl_reg = re.compile(r""" | |
295 hsl\([^\)]* # hsl(maybestuff | |
296 (?:[, ]|(?<=\()) # a comma or space not followed by a ( | |
297 (?:0?\.?)?0% # some equivalent to 0% | |
298 """, | |
299 re.VERBOSE) | |
300 zeros_reg = re.compile(r""" | |
301 ^.*(?:^|[^0-9.]) # start/non-number | |
302 (?:\.0|0(?:\.0? # .0, 0, or 0.0 | |
303 |px|em|%|in|cm|mm|pc|pt|ex)) # a length unit | |
304 (?:\D|$) # non-number/end | |
305 (?=[^{}]+?}).*$ # only { rules } | |
306 """, | |
307 re.MULTILINE | re.VERBOSE) | |
308 errors = [] | |
309 for z in re.finditer(zeros_reg, contents): | |
310 first_line = z.group(0).strip().splitlines()[0] | |
311 if not hsl_reg.search(first_line): | |
312 errors.append(' ' + first_line) | |
313 return errors | |
314 | |
315 # NOTE: Currently multi-line checks don't support 'after'. Instead, add | |
316 # suggestions while parsing the file so another pass isn't necessary. | |
317 added_or_modified_files_checks = [ | |
318 { 'desc': 'Alphabetize properties and list vendor specific (i.e. ' | |
319 '-webkit) above standard.', | |
320 'test': alphabetize_props, | |
321 'multiline': True, | |
322 }, | |
323 { 'desc': 'Start braces ({) end a selector, have a space before them ' | |
324 'and no rules after.', | |
325 'test': braces_have_space_before_and_nothing_after, | |
326 }, | |
327 { 'desc': 'Classes use .dash-form.', | |
328 'test': classes_use_dashes, | |
329 }, | |
330 { 'desc': 'Always put a rule closing brace (}) on a new line.', | |
331 'test': close_brace_on_new_line, | |
332 }, | |
333 { 'desc': 'Colons (:) should have a space after them.', | |
334 'test': colons_have_space_after, | |
335 }, | |
336 { 'desc': 'Use single quotes (\') instead of double quotes (") in ' | |
337 'strings.', | |
338 'test': favor_single_quotes, | |
339 }, | |
340 { 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.', | |
341 'test': hex_could_be_shorter, | |
342 'after': suggest_short_hex, | |
343 }, | |
344 { 'desc': 'Use milliseconds for time measurements under 1 second.', | |
345 'test': milliseconds_for_small_times, | |
346 'after': suggest_ms_from_s, | |
347 }, | |
348 { 'desc': "Don't use data URIs in source files. Use grit instead.", | |
349 'test': no_data_uris_in_source_files, | |
350 }, | |
351 { 'desc': "Don't override custom properties created by Polymer's mixin " | |
352 "shim. Set mixins or documented custom properties directly.", | |
353 'test': no_mixin_shims, | |
354 }, | |
355 { 'desc': "Don't use quotes in url().", | |
356 'test': no_quotes_in_url, | |
357 }, | |
358 { 'desc': 'One rule per line (what not to do: color: red; margin: 0;).', | |
359 'test': one_rule_per_line, | |
360 }, | |
361 { 'desc': 'One selector per line (what not to do: a, b {}).', | |
362 'test': one_selector_per_line, | |
363 'multiline': True, | |
364 }, | |
365 { 'desc': 'Pseudo-elements should use double colon (i.e. ::after).', | |
366 'test': pseudo_elements_double_colon, | |
367 'multiline': True, | |
368 }, | |
369 { 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).', | |
370 'test': rgb_if_not_gray, | |
371 'after': suggest_rgb_from_hex, | |
372 }, | |
373 { 'desc': 'Use *-top/bottom instead of -webkit-*-before/after.', | |
374 'test': webkit_before_or_after, | |
375 'after': suggest_top_or_bottom, | |
376 }, | |
377 { 'desc': 'Use "0" for zero-width lengths (i.e. 0px -> 0)', | |
378 'test': zero_width_lengths, | |
379 'multiline': True, | |
380 }, | |
381 ] | |
382 | |
383 results = [] | |
384 affected_files = self.input_api.AffectedFiles(include_deletes=False, | |
385 file_filter=self.file_filter) | |
386 files = [] | |
387 for f in affected_files: | |
388 path = f.LocalPath() | |
389 | |
390 is_html = path.endswith('.html') | |
391 if not is_html and not path.endswith('.css'): | |
392 continue | |
393 | |
394 # Remove all /*comments*/, @at-keywords, and grit <if|include> tags; we're | |
395 # not using a real parser. TODO(dbeam): Check alpha in <if> blocks. | |
396 file_contents = _remove_all('\n'.join(f.NewContents())) | |
397 | |
398 # Handle CSS files and HTML files with inline styles. | |
399 if is_html: | |
400 file_contents = _extract_inline_style(file_contents) | |
401 | |
402 files.append((path, file_contents)) | |
403 | |
404 for f in files: | |
405 file_errors = [] | |
406 for check in added_or_modified_files_checks: | |
407 # If the check is multiline, it receives the whole file and gives us | |
408 # back a list of things wrong. If the check isn't multiline, we pass it | |
409 # each line and the check returns something truthy if there's an issue. | |
410 if ('multiline' in check and check['multiline']): | |
411 assert not 'after' in check | |
412 check_errors = check['test'](f[1]) | |
413 if len(check_errors) > 0: | |
414 file_errors.append('- %s\n%s' % | |
415 (check['desc'], '\n'.join(check_errors).rstrip())) | |
416 else: | |
417 check_errors = [] | |
418 lines = f[1].splitlines() | |
419 for lnum, line in enumerate(lines): | |
420 if check['test'](line): | |
421 error = ' ' + line.strip() | |
422 if 'after' in check: | |
423 error += check['after'](line) | |
424 check_errors.append(error) | |
425 if len(check_errors) > 0: | |
426 file_errors.append('- %s\n%s' % | |
427 (check['desc'], '\n'.join(check_errors))) | |
428 if file_errors: | |
429 results.append(self.output_api.PresubmitPromptWarning( | |
430 '%s:\n%s' % (f[0], '\n\n'.join(file_errors)))) | |
431 | |
432 return results | |
OLD | NEW |