Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Side by Side Diff: third_party/closure_linter/closure_linter/error_fixer.py

Issue 2592193002: Remove closure_linter from Chrome (Closed)
Patch Set: Created 3 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 #
3 # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Main class responsible for automatically fixing simple style violations."""
18
19 # Allow non-Google copyright
20 # pylint: disable=g-bad-file-header
21
22 __author__ = 'robbyw@google.com (Robert Walker)'
23
24 import re
25
26 import gflags as flags
27 from closure_linter import errors
28 from closure_linter import javascriptstatetracker
29 from closure_linter import javascripttokens
30 from closure_linter import requireprovidesorter
31 from closure_linter import tokenutil
32 from closure_linter.common import errorhandler
33
34 # Shorthand
35 Token = javascripttokens.JavaScriptToken
36 Type = javascripttokens.JavaScriptTokenType
37
38 END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
39
40 # Regex to represent common mistake inverting author name and email as
41 # @author User Name (user@company)
42 INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
43 r'(?P<name>[^(]+)'
44 r'(?P<whitespace_after_name>\s+)'
45 r'\('
46 r'(?P<email>[^\s]+@[^)\s]+)'
47 r'\)'
48 r'(?P<trailing_characters>.*)')
49
50 FLAGS = flags.FLAGS
51 flags.DEFINE_boolean('disable_indentation_fixing', False,
52 'Whether to disable automatic fixing of indentation.')
53 flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
54 'fix. Defaults to all supported error codes when empty. '
55 'See errors.py for a list of error codes.')
56
57
58 class ErrorFixer(errorhandler.ErrorHandler):
59 """Object that fixes simple style errors."""
60
61 def __init__(self, external_file=None):
62 """Initialize the error fixer.
63
64 Args:
65 external_file: If included, all output will be directed to this file
66 instead of overwriting the files the errors are found in.
67 """
68 errorhandler.ErrorHandler.__init__(self)
69
70 self._file_name = None
71 self._file_token = None
72 self._external_file = external_file
73
74 try:
75 self._fix_error_codes = set([errors.ByName(error.upper()) for error in
76 FLAGS.fix_error_codes])
77 except KeyError as ke:
78 raise ValueError('Unknown error code ' + ke.args[0])
79
80 def HandleFile(self, filename, first_token):
81 """Notifies this ErrorPrinter that subsequent errors are in filename.
82
83 Args:
84 filename: The name of the file about to be checked.
85 first_token: The first token in the file.
86 """
87 self._file_name = filename
88 self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
89 self._file_token = first_token
90 self._file_fix_count = 0
91 self._file_changed_lines = set()
92
93 def _AddFix(self, tokens):
94 """Adds the fix to the internal count.
95
96 Args:
97 tokens: The token or sequence of tokens changed to fix an error.
98 """
99 self._file_fix_count += 1
100 if hasattr(tokens, 'line_number'):
101 self._file_changed_lines.add(tokens.line_number)
102 else:
103 for token in tokens:
104 self._file_changed_lines.add(token.line_number)
105
106 def _FixJsDocPipeNull(self, js_type):
107 """Change number|null or null|number to ?number.
108
109 Args:
110 js_type: The typeannotation.TypeAnnotation instance to fix.
111 """
112
113 # Recurse into all sub_types if the error was at a deeper level.
114 map(self._FixJsDocPipeNull, js_type.IterTypes())
115
116 if js_type.type_group and len(js_type.sub_types) == 2:
117 # Find and remove the null sub_type:
118 sub_type = None
119 for sub_type in js_type.sub_types:
120 if sub_type.identifier == 'null':
121 map(tokenutil.DeleteToken, sub_type.tokens)
122 self._AddFix(sub_type.tokens)
123 break
124 else:
125 return
126
127 first_token = js_type.FirstToken()
128 question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
129 first_token.line_number)
130 tokenutil.InsertTokenBefore(question_mark, first_token)
131 js_type.tokens.insert(0, question_mark)
132 js_type.tokens.remove(sub_type)
133 js_type.sub_types.remove(sub_type)
134 js_type.or_null = True
135
136 # Now also remove the separator, which is in the parent's token list,
137 # either before or after the sub_type, there is exactly one. Scan for it.
138 for token in js_type.tokens:
139 if (token and isinstance(token, Token) and
140 token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
141 tokenutil.DeleteToken(token)
142 js_type.tokens.remove(token)
143 self._AddFix(token)
144 break
145
146 def HandleError(self, error):
147 """Attempts to fix the error.
148
149 Args:
150 error: The error object
151 """
152 code = error.code
153 token = error.token
154
155 if self._fix_error_codes and code not in self._fix_error_codes:
156 return
157
158 if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
159 self._FixJsDocPipeNull(token.attached_object.jstype)
160
161 elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
162 iterator = token.attached_object.type_end_token
163 if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
164 iterator = iterator.previous
165
166 ending_space = len(iterator.string) - len(iterator.string.rstrip())
167 iterator.string = '%s=%s' % (iterator.string.rstrip(),
168 ' ' * ending_space)
169
170 # Create a new flag object with updated type info.
171 token.attached_object = javascriptstatetracker.JsDocFlag(token)
172 self._AddFix(token)
173
174 elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
175 iterator = token.attached_object.type_start_token
176 if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
177 iterator = iterator.next
178
179 starting_space = len(iterator.string) - len(iterator.string.lstrip())
180 iterator.string = '%s...%s' % (' ' * starting_space,
181 iterator.string.lstrip())
182
183 # Create a new flag object with updated type info.
184 token.attached_object = javascriptstatetracker.JsDocFlag(token)
185 self._AddFix(token)
186
187 elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
188 errors.MISSING_SEMICOLON):
189 semicolon_token = Token(';', Type.SEMICOLON, token.line,
190 token.line_number)
191 tokenutil.InsertTokenAfter(semicolon_token, token)
192 token.metadata.is_implied_semicolon = False
193 semicolon_token.metadata.is_implied_semicolon = False
194 self._AddFix(token)
195
196 elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
197 errors.REDUNDANT_SEMICOLON,
198 errors.COMMA_AT_END_OF_LITERAL):
199 self._DeleteToken(token)
200 self._AddFix(token)
201
202 elif code == errors.INVALID_JSDOC_TAG:
203 if token.string == '@returns':
204 token.string = '@return'
205 self._AddFix(token)
206
207 elif code == errors.FILE_MISSING_NEWLINE:
208 # This error is fixed implicitly by the way we restore the file
209 self._AddFix(token)
210
211 elif code == errors.MISSING_SPACE:
212 if error.fix_data:
213 token.string = error.fix_data
214 self._AddFix(token)
215 elif error.position:
216 if error.position.IsAtBeginning():
217 tokenutil.InsertSpaceTokenAfter(token.previous)
218 elif error.position.IsAtEnd(token.string):
219 tokenutil.InsertSpaceTokenAfter(token)
220 else:
221 token.string = error.position.Set(token.string, ' ')
222 self._AddFix(token)
223
224 elif code == errors.EXTRA_SPACE:
225 if error.position:
226 token.string = error.position.Set(token.string, '')
227 self._AddFix(token)
228
229 elif code == errors.MISSING_LINE:
230 if error.position.IsAtBeginning():
231 tokenutil.InsertBlankLineAfter(token.previous)
232 else:
233 tokenutil.InsertBlankLineAfter(token)
234 self._AddFix(token)
235
236 elif code == errors.EXTRA_LINE:
237 self._DeleteToken(token)
238 self._AddFix(token)
239
240 elif code == errors.WRONG_BLANK_LINE_COUNT:
241 if not token.previous:
242 # TODO(user): Add an insertBefore method to tokenutil.
243 return
244
245 num_lines = error.fix_data
246 should_delete = False
247
248 if num_lines < 0:
249 num_lines *= -1
250 should_delete = True
251
252 for unused_i in xrange(1, num_lines + 1):
253 if should_delete:
254 # TODO(user): DeleteToken should update line numbers.
255 self._DeleteToken(token.previous)
256 else:
257 tokenutil.InsertBlankLineAfter(token.previous)
258 self._AddFix(token)
259
260 elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
261 end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
262 if end_quote:
263 single_quote_start = Token(
264 "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
265 single_quote_end = Token(
266 "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
267 token.line_number)
268
269 tokenutil.InsertTokenAfter(single_quote_start, token)
270 tokenutil.InsertTokenAfter(single_quote_end, end_quote)
271 self._DeleteToken(token)
272 self._DeleteToken(end_quote)
273 self._AddFix([token, end_quote])
274
275 elif code == errors.MISSING_BRACES_AROUND_TYPE:
276 fixed_tokens = []
277 start_token = token.attached_object.type_start_token
278
279 if start_token.type != Type.DOC_START_BRACE:
280 leading_space = (
281 len(start_token.string) - len(start_token.string.lstrip()))
282 if leading_space:
283 start_token = tokenutil.SplitToken(start_token, leading_space)
284 # Fix case where start and end token were the same.
285 if token.attached_object.type_end_token == start_token.previous:
286 token.attached_object.type_end_token = start_token
287
288 new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
289 start_token.line_number)
290 tokenutil.InsertTokenAfter(new_token, start_token.previous)
291 token.attached_object.type_start_token = new_token
292 fixed_tokens.append(new_token)
293
294 end_token = token.attached_object.type_end_token
295 if end_token.type != Type.DOC_END_BRACE:
296 # If the start token was a brace, the end token will be a
297 # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
298 # the end token is the last token of the actual type.
299 last_type = end_token
300 if not fixed_tokens:
301 last_type = end_token.previous
302
303 while last_type.string.isspace():
304 last_type = last_type.previous
305
306 # If there was no starting brace then a lone end brace wouldn't have
307 # been type end token. Now that we've added any missing start brace,
308 # see if the last effective type token was an end brace.
309 if last_type.type != Type.DOC_END_BRACE:
310 trailing_space = (len(last_type.string) -
311 len(last_type.string.rstrip()))
312 if trailing_space:
313 tokenutil.SplitToken(last_type,
314 len(last_type.string) - trailing_space)
315
316 new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
317 last_type.line_number)
318 tokenutil.InsertTokenAfter(new_token, last_type)
319 token.attached_object.type_end_token = new_token
320 fixed_tokens.append(new_token)
321
322 self._AddFix(fixed_tokens)
323
324 elif code == errors.LINE_STARTS_WITH_OPERATOR:
325 # Remove whitespace following the operator so the line starts clean.
326 self._StripSpace(token, before=False)
327
328 # Remove the operator.
329 tokenutil.DeleteToken(token)
330 self._AddFix(token)
331
332 insertion_point = tokenutil.GetPreviousCodeToken(token)
333
334 # Insert a space between the previous token and the new operator.
335 space = Token(' ', Type.WHITESPACE, insertion_point.line,
336 insertion_point.line_number)
337 tokenutil.InsertTokenAfter(space, insertion_point)
338
339 # Insert the operator on the end of the previous line.
340 new_token = Token(token.string, token.type, insertion_point.line,
341 insertion_point.line_number)
342 tokenutil.InsertTokenAfter(new_token, space)
343 self._AddFix(new_token)
344
345 elif code == errors.LINE_ENDS_WITH_DOT:
346 # Remove whitespace preceding the operator to remove trailing whitespace.
347 self._StripSpace(token, before=True)
348
349 # Remove the dot.
350 tokenutil.DeleteToken(token)
351 self._AddFix(token)
352
353 insertion_point = tokenutil.GetNextCodeToken(token)
354
355 # Insert the dot at the beginning of the next line of code.
356 new_token = Token(token.string, token.type, insertion_point.line,
357 insertion_point.line_number)
358 tokenutil.InsertTokenBefore(new_token, insertion_point)
359 self._AddFix(new_token)
360
361 elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
362 require_start_token = error.fix_data
363 sorter = requireprovidesorter.RequireProvideSorter()
364 sorter.FixRequires(require_start_token)
365
366 self._AddFix(require_start_token)
367
368 elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
369 provide_start_token = error.fix_data
370 sorter = requireprovidesorter.RequireProvideSorter()
371 sorter.FixProvides(provide_start_token)
372
373 self._AddFix(provide_start_token)
374
375 elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
376 if token.previous.string == '{' and token.next.string == '}':
377 self._DeleteToken(token.previous)
378 self._DeleteToken(token.next)
379 self._AddFix([token])
380
381 elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
382 match = INVERTED_AUTHOR_SPEC.match(token.string)
383 if match:
384 token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
385 match.group('email'),
386 match.group('whitespace_after_name'),
387 match.group('name'),
388 match.group('trailing_characters'))
389 self._AddFix(token)
390
391 elif (code == errors.WRONG_INDENTATION and
392 not FLAGS.disable_indentation_fixing):
393 token = tokenutil.GetFirstTokenInSameLine(token)
394 actual = error.position.start
395 expected = error.position.length
396
397 # Cases where first token is param but with leading spaces.
398 if (len(token.string.lstrip()) == len(token.string) - actual and
399 token.string.lstrip()):
400 token.string = token.string.lstrip()
401 actual = 0
402
403 if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
404 token.string = token.string.lstrip() + (' ' * expected)
405 self._AddFix([token])
406 else:
407 # We need to add indentation.
408 new_token = Token(' ' * expected, Type.WHITESPACE,
409 token.line, token.line_number)
410 # Note that we'll never need to add indentation at the first line,
411 # since it will always not be indented. Therefore it's safe to assume
412 # token.previous exists.
413 tokenutil.InsertTokenAfter(new_token, token.previous)
414 self._AddFix([token])
415
416 elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
417 errors.MISSING_END_OF_SCOPE_COMMENT]:
418 # Only fix cases where }); is found with no trailing content on the line
419 # other than a comment. Value of 'token' is set to } for this error.
420 if (token.type == Type.END_BLOCK and
421 token.next.type == Type.END_PAREN and
422 token.next.next.type == Type.SEMICOLON):
423 current_token = token.next.next.next
424 removed_tokens = []
425 while current_token and current_token.line_number == token.line_number:
426 if current_token.IsAnyType(Type.WHITESPACE,
427 Type.START_SINGLE_LINE_COMMENT,
428 Type.COMMENT):
429 removed_tokens.append(current_token)
430 current_token = current_token.next
431 else:
432 return
433
434 if removed_tokens:
435 self._DeleteTokens(removed_tokens[0], len(removed_tokens))
436
437 whitespace_token = Token(' ', Type.WHITESPACE, token.line,
438 token.line_number)
439 start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
440 token.line, token.line_number)
441 comment_token = Token(' goog.scope', Type.COMMENT, token.line,
442 token.line_number)
443 insertion_tokens = [whitespace_token, start_comment_token,
444 comment_token]
445
446 tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
447 self._AddFix(removed_tokens + insertion_tokens)
448
449 elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
450 tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
451 num_delete_tokens = len(tokens_in_line)
452 # If line being deleted is preceded and succeed with blank lines then
453 # delete one blank line also.
454 if (tokens_in_line[0].previous and tokens_in_line[-1].next
455 and tokens_in_line[0].previous.type == Type.BLANK_LINE
456 and tokens_in_line[-1].next.type == Type.BLANK_LINE):
457 num_delete_tokens += 1
458 self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
459 self._AddFix(tokens_in_line)
460
461 elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
462 missing_namespaces = error.fix_data[0]
463 need_blank_line = error.fix_data[1] or (not token.previous)
464
465 insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
466 dummy_first_token = insert_location
467 tokenutil.InsertTokenBefore(insert_location, token)
468
469 # If inserting a blank line check blank line does not exist before
470 # token to avoid extra blank lines.
471 if (need_blank_line and insert_location.previous
472 and insert_location.previous.type != Type.BLANK_LINE):
473 tokenutil.InsertBlankLineAfter(insert_location)
474 insert_location = insert_location.next
475
476 for missing_namespace in missing_namespaces:
477 new_tokens = self._GetNewRequireOrProvideTokens(
478 code == errors.MISSING_GOOG_PROVIDE,
479 missing_namespace, insert_location.line_number + 1)
480 tokenutil.InsertLineAfter(insert_location, new_tokens)
481 insert_location = new_tokens[-1]
482 self._AddFix(new_tokens)
483
484 # If inserting a blank line check blank line does not exist after
485 # token to avoid extra blank lines.
486 if (need_blank_line and insert_location.next
487 and insert_location.next.type != Type.BLANK_LINE):
488 tokenutil.InsertBlankLineAfter(insert_location)
489
490 tokenutil.DeleteToken(dummy_first_token)
491
492 def _StripSpace(self, token, before):
493 """Strip whitespace tokens either preceding or following the given token.
494
495 Args:
496 token: The token.
497 before: If true, strip space before the token, if false, after it.
498 """
499 token = token.previous if before else token.next
500 while token and token.type == Type.WHITESPACE:
501 tokenutil.DeleteToken(token)
502 token = token.previous if before else token.next
503
504 def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
505 """Returns a list of tokens to create a goog.require/provide statement.
506
507 Args:
508 is_provide: True if getting tokens for a provide, False for require.
509 namespace: The required or provided namespaces to get tokens for.
510 line_number: The line number the new require or provide statement will be
511 on.
512
513 Returns:
514 Tokens to create a new goog.require or goog.provide statement.
515 """
516 string = 'goog.require'
517 if is_provide:
518 string = 'goog.provide'
519 line_text = string + '(\'' + namespace + '\');\n'
520 return [
521 Token(string, Type.IDENTIFIER, line_text, line_number),
522 Token('(', Type.START_PAREN, line_text, line_number),
523 Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
524 Token(namespace, Type.STRING_TEXT, line_text, line_number),
525 Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
526 Token(')', Type.END_PAREN, line_text, line_number),
527 Token(';', Type.SEMICOLON, line_text, line_number)
528 ]
529
530 def _DeleteToken(self, token):
531 """Deletes the specified token from the linked list of tokens.
532
533 Updates instance variables pointing to tokens such as _file_token if
534 they reference the deleted token.
535
536 Args:
537 token: The token to delete.
538 """
539 if token == self._file_token:
540 self._file_token = token.next
541
542 tokenutil.DeleteToken(token)
543
544 def _DeleteTokens(self, token, token_count):
545 """Deletes the given number of tokens starting with the given token.
546
547 Updates instance variables pointing to tokens such as _file_token if
548 they reference the deleted token.
549
550 Args:
551 token: The first token to delete.
552 token_count: The total number of tokens to delete.
553 """
554 if token == self._file_token:
555 for unused_i in xrange(token_count):
556 self._file_token = self._file_token.next
557
558 tokenutil.DeleteTokens(token, token_count)
559
560 def FinishFile(self):
561 """Called when the current file has finished style checking.
562
563 Used to go back and fix any errors in the file. It currently supports both
564 js and html files. For js files it does a simple dump of all tokens, but in
565 order to support html file, we need to merge the original file with the new
566 token set back together. This works because the tokenized html file is the
567 original html file with all non js lines kept but blanked out with one blank
568 line token per line of html.
569 """
570 if self._file_fix_count:
571 # Get the original file content for html.
572 if self._file_is_html:
573 f = open(self._file_name, 'r')
574 original_lines = f.readlines()
575 f.close()
576
577 f = self._external_file
578 if not f:
579 error_noun = 'error' if self._file_fix_count == 1 else 'errors'
580 print 'Fixed %d %s in %s' % (
581 self._file_fix_count, error_noun, self._file_name)
582 f = open(self._file_name, 'w')
583
584 token = self._file_token
585 # Finding the first not deleted token.
586 while token.is_deleted:
587 token = token.next
588 # If something got inserted before first token (e.g. due to sorting)
589 # then move to start. Bug 8398202.
590 while token.previous:
591 token = token.previous
592 char_count = 0
593 line = ''
594 while token:
595 line += token.string
596 char_count += len(token.string)
597
598 if token.IsLastInLine():
599 # We distinguish if a blank line in html was from stripped original
600 # file or newly added error fix by looking at the "org_line_number"
601 # field on the token. It is only set in the tokenizer, so for all
602 # error fixes, the value should be None.
603 if (line or not self._file_is_html or
604 token.orig_line_number is None):
605 f.write(line)
606 f.write('\n')
607 else:
608 f.write(original_lines[token.orig_line_number - 1])
609 line = ''
610 if char_count > 80 and token.line_number in self._file_changed_lines:
611 print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
612 token.line_number, self._file_name)
613
614 char_count = 0
615
616 token = token.next
617
618 if not self._external_file:
619 # Close the file if we created it
620 f.close()
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698