Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1016)

Unified Diff: third_party/closure_linter/closure_linter/statetracker.py

Issue 2592193002: Remove closure_linter from Chrome (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/closure_linter/closure_linter/statetracker.py
diff --git a/third_party/closure_linter/closure_linter/statetracker.py b/third_party/closure_linter/closure_linter/statetracker.py
deleted file mode 100755
index 5730facb87dc7ea2d89b423167e5f1a799884b02..0000000000000000000000000000000000000000
--- a/third_party/closure_linter/closure_linter/statetracker.py
+++ /dev/null
@@ -1,1300 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-import re
-
-from closure_linter import javascripttokenizer
-from closure_linter import javascripttokens
-from closure_linter import tokenutil
-from closure_linter import typeannotation
-
-# Shorthand
-Type = javascripttokens.JavaScriptTokenType
-
-
-class DocFlag(object):
- """Generic doc flag object.
-
- Attribute:
- flag_type: param, return, define, type, etc.
- flag_token: The flag token.
- type_start_token: The first token specifying the flag type,
- including braces.
- type_end_token: The last token specifying the flag type,
- including braces.
- type: The type spec string.
- jstype: The type spec, a TypeAnnotation instance.
- name_token: The token specifying the flag name.
- name: The flag name
- description_start_token: The first token in the description.
- description_end_token: The end token in the description.
- description: The description.
- """
-
- # Please keep these lists alphabetized.
-
- # The list of standard jsdoc tags is from
- STANDARD_DOC = frozenset([
- 'author',
- 'bug',
- 'classTemplate',
- 'consistentIdGenerator',
- 'const',
- 'constructor',
- 'define',
- 'deprecated',
- 'dict',
- 'enum',
- 'export',
- 'expose',
- 'extends',
- 'externs',
- 'fileoverview',
- 'idGenerator',
- 'implements',
- 'implicitCast',
- 'interface',
- 'lends',
- 'license',
- 'ngInject', # This annotation is specific to AngularJS.
- 'noalias',
- 'nocompile',
- 'nosideeffects',
- 'override',
- 'owner',
- 'nocollapse',
- 'package',
- 'param',
- 'polymerBehavior', # This annotation is specific to Polymer.
- 'preserve',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'see',
- 'stableIdGenerator',
- 'struct',
- 'supported',
- 'template',
- 'this',
- 'type',
- 'typedef',
- 'unrestricted',
- ])
-
- ANNOTATION = frozenset(['preserveTry', 'suppress'])
-
- LEGAL_DOC = STANDARD_DOC | ANNOTATION
-
- # Includes all Closure Compiler @suppress types.
- # Not all of these annotations are interpreted by Closure Linter.
- #
- # Specific cases:
- # - accessControls is supported by the compiler at the expression
- # and method level to suppress warnings about private/protected
- # access (method level applies to all references in the method).
- # The linter mimics the compiler behavior.
- SUPPRESS_TYPES = frozenset([
- 'accessControls',
- 'ambiguousFunctionDecl',
- 'checkDebuggerStatement',
- 'checkRegExp',
- 'checkStructDictInheritance',
- 'checkTypes',
- 'checkVars',
- 'const',
- 'constantProperty',
- 'deprecated',
- 'duplicate',
- 'es5Strict',
- 'externsValidation',
- 'extraProvide',
- 'extraRequire',
- 'fileoverviewTags',
- 'globalThis',
- 'internetExplorerChecks',
- 'invalidCasts',
- 'missingProperties',
- 'missingProvide',
- 'missingRequire',
- 'missingReturn',
- 'nonStandardJsDocs',
- 'reportUnknownTypes',
- 'strictModuleDepCheck',
- 'suspiciousCode',
- 'tweakValidation',
- 'typeInvalidation',
- 'undefinedNames',
- 'undefinedVars',
- 'underscore',
- 'unknownDefines',
- 'unnecessaryCasts',
- 'unusedPrivateMembers',
- 'uselessCode',
- 'visibility',
- 'with',
- ])
-
- HAS_DESCRIPTION = frozenset([
- 'define',
- 'deprecated',
- 'desc',
- 'fileoverview',
- 'license',
- 'param',
- 'preserve',
- 'return',
- 'supported',
- ])
-
- # Docflags whose argument should be parsed using the typeannotation parser.
- HAS_TYPE = frozenset([
- 'const',
- 'define',
- 'enum',
- 'export',
- 'extends',
- 'final',
- 'implements',
- 'mods',
- 'package',
- 'param',
- 'private',
- 'protected',
- 'public',
- 'return',
- 'suppress',
- 'type',
- 'typedef',
- ])
-
- # Docflags for which it's ok to omit the type (flag without an argument).
- CAN_OMIT_TYPE = frozenset([
- 'const',
- 'enum',
- 'export',
- 'final',
- 'package',
- 'private',
- 'protected',
- 'public',
- 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
- ])
-
- # Docflags that only take a type as an argument and should not parse a
- # following description.
- TYPE_ONLY = frozenset([
- 'const',
- 'enum',
- 'extends',
- 'implements',
- 'package',
- 'suppress',
- 'type',
- ])
-
- HAS_NAME = frozenset(['param'])
-
- EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
- EMPTY_STRING = re.compile(r'^\s*$')
-
- def __init__(self, flag_token, error_handler=None):
- """Creates the DocFlag object and attaches it to the given start token.
-
- Args:
- flag_token: The starting token of the flag.
- error_handler: An optional error handler for errors occurring while
- parsing the doctype.
- """
- self.flag_token = flag_token
- self.flag_type = flag_token.string.strip().lstrip('@')
-
- # Extract type, if applicable.
- self.type = None
- self.jstype = None
- self.type_start_token = None
- self.type_end_token = None
- if self.flag_type in self.HAS_TYPE:
- brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
- Type.FLAG_ENDING_TYPES)
- if brace:
- end_token, contents = _GetMatchingEndBraceAndContents(brace)
- self.type = contents
- self.jstype = typeannotation.Parse(brace, end_token,
- error_handler)
- self.type_start_token = brace
- self.type_end_token = end_token
- elif (self.flag_type in self.TYPE_ONLY and
- flag_token.next.type not in Type.FLAG_ENDING_TYPES and
- flag_token.line_number == flag_token.next.line_number):
- # b/10407058. If the flag is expected to be followed by a type then
- # search for type in same line only. If no token after flag in same
- # line then conclude that no type is specified.
- self.type_start_token = flag_token.next
- self.type_end_token, self.type = _GetEndTokenAndContents(
- self.type_start_token)
- if self.type is not None:
- self.type = self.type.strip()
- self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
- error_handler)
-
- # Extract name, if applicable.
- self.name_token = None
- self.name = None
- if self.flag_type in self.HAS_NAME:
- # Handle bad case, name could be immediately after flag token.
- self.name_token = _GetNextPartialIdentifierToken(flag_token)
-
- # Handle good case, if found token is after type start, look for
- # a identifier (substring to cover cases like [cnt] b/4197272) after
- # type end, since types contain identifiers.
- if (self.type and self.name_token and
- tokenutil.Compare(self.name_token, self.type_start_token) > 0):
- self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
-
- if self.name_token:
- self.name = self.name_token.string
-
- # Extract description, if applicable.
- self.description_start_token = None
- self.description_end_token = None
- self.description = None
- if self.flag_type in self.HAS_DESCRIPTION:
- search_start_token = flag_token
- if self.name_token and self.type_end_token:
- if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
- search_start_token = self.type_end_token
- else:
- search_start_token = self.name_token
- elif self.name_token:
- search_start_token = self.name_token
- elif self.type:
- search_start_token = self.type_end_token
-
- interesting_token = tokenutil.Search(search_start_token,
- Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
- if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
- self.description_start_token = interesting_token
- self.description_end_token, self.description = (
- _GetEndTokenAndContents(interesting_token))
-
- def HasType(self):
- """Returns whether this flag should have a type annotation."""
- return self.flag_type in self.HAS_TYPE
-
- def __repr__(self):
- return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
-
-
-class DocComment(object):
- """JavaScript doc comment object.
-
- Attributes:
- ordered_params: Ordered list of parameters documented.
- start_token: The token that starts the doc comment.
- end_token: The token that ends the doc comment.
- suppressions: Map of suppression type to the token that added it.
- """
- def __init__(self, start_token):
- """Create the doc comment object.
-
- Args:
- start_token: The first token in the doc comment.
- """
- self.__flags = []
- self.start_token = start_token
- self.end_token = None
- self.suppressions = {}
- self.invalidated = False
-
- @property
- def ordered_params(self):
- """Gives the list of parameter names as a list of strings."""
- params = []
- for flag in self.__flags:
- if flag.flag_type == 'param' and flag.name:
- params.append(flag.name)
- return params
-
- def Invalidate(self):
- """Indicate that the JSDoc is well-formed but we had problems parsing it.
-
- This is a short-circuiting mechanism so that we don't emit false
- positives about well-formed doc comments just because we don't support
- hot new syntaxes.
- """
- self.invalidated = True
-
- def IsInvalidated(self):
- """Test whether Invalidate() has been called."""
- return self.invalidated
-
- def AddSuppression(self, token):
- """Add a new error suppression flag.
-
- Args:
- token: The suppression flag token.
- """
- flag = token and token.attached_object
- if flag and flag.jstype:
- for suppression in flag.jstype.IterIdentifiers():
- self.suppressions[suppression] = token
-
- def SuppressionOnly(self):
- """Returns whether this comment contains only suppression flags."""
- if not self.__flags:
- return False
-
- for flag in self.__flags:
- if flag.flag_type != 'suppress':
- return False
-
- return True
-
- def AddFlag(self, flag):
- """Add a new document flag.
-
- Args:
- flag: DocFlag object.
- """
- self.__flags.append(flag)
-
- def InheritsDocumentation(self):
- """Test if the jsdoc implies documentation inheritance.
-
- Returns:
- True if documentation may be pulled off the superclass.
- """
- return self.HasFlag('inheritDoc') or self.HasFlag('override')
-
- def HasFlag(self, flag_type):
- """Test if the given flag has been set.
-
- Args:
- flag_type: The type of the flag to check.
-
- Returns:
- True if the flag is set.
- """
- for flag in self.__flags:
- if flag.flag_type == flag_type:
- return True
- return False
-
- def GetFlag(self, flag_type):
- """Gets the last flag of the given type.
-
- Args:
- flag_type: The type of the flag to get.
-
- Returns:
- The last instance of the given flag type in this doc comment.
- """
- for flag in reversed(self.__flags):
- if flag.flag_type == flag_type:
- return flag
-
- def GetDocFlags(self):
- """Return the doc flags for this comment."""
- return list(self.__flags)
-
- def _YieldDescriptionTokens(self):
- for token in self.start_token:
-
- if (token is self.end_token or
- token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
- token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
- return
-
- if token.type not in [
- javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
- javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
- yield token
-
- @property
- def description(self):
- return tokenutil.TokensToString(
- self._YieldDescriptionTokens())
-
- def GetTargetIdentifier(self):
- """Returns the identifier (as a string) that this is a comment for.
-
- Note that this uses method uses GetIdentifierForToken to get the full
- identifier, even if broken up by whitespace, newlines, or comments,
- and thus could be longer than GetTargetToken().string.
-
- Returns:
- The identifier for the token this comment is for.
- """
- token = self.GetTargetToken()
- if token:
- return tokenutil.GetIdentifierForToken(token)
-
- def GetTargetToken(self):
- """Get this comment's target token.
-
- Returns:
- The token that is the target of this comment, or None if there isn't one.
- """
-
- # File overviews describe the file, not a token.
- if self.HasFlag('fileoverview'):
- return
-
- skip_types = frozenset([
- Type.WHITESPACE,
- Type.BLANK_LINE,
- Type.START_PAREN])
-
- target_types = frozenset([
- Type.FUNCTION_NAME,
- Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- token = self.end_token.next
- while token:
- if token.type in target_types:
- return token
-
- # Handles the case of a comment on "var foo = ...'
- if token.IsKeyword('var'):
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if (next_code_token and
- next_code_token.IsType(Type.SIMPLE_LVALUE)):
- return next_code_token
-
- return
-
- # Handles the case of a comment on "function foo () {}"
- if token.type is Type.FUNCTION_DECLARATION:
- next_code_token = tokenutil.CustomSearch(
- token,
- lambda t: t.type not in Type.NON_CODE_TYPES)
-
- if next_code_token.IsType(Type.FUNCTION_NAME):
- return next_code_token
-
- return
-
- # Skip types will end the search.
- if token.type not in skip_types:
- return
-
- token = token.next
-
- def CompareParameters(self, params):
- """Computes the edit distance and list from the function params to the docs.
-
- Uses the Levenshtein edit distance algorithm, with code modified from
- http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
-
- Args:
- params: The parameter list for the function declaration.
-
- Returns:
- The edit distance, the edit list.
- """
- source_len, target_len = len(self.ordered_params), len(params)
- edit_lists = [[]]
- distance = [[]]
- for i in range(target_len+1):
- edit_lists[0].append(['I'] * i)
- distance[0].append(i)
-
- for j in range(1, source_len+1):
- edit_lists.append([['D'] * j])
- distance.append([j])
-
- for i in range(source_len):
- for j in range(target_len):
- cost = 1
- if self.ordered_params[i] == params[j]:
- cost = 0
-
- deletion = distance[i][j+1] + 1
- insertion = distance[i+1][j] + 1
- substitution = distance[i][j] + cost
-
- edit_list = None
- best = None
- if deletion <= insertion and deletion <= substitution:
- # Deletion is best.
- best = deletion
- edit_list = list(edit_lists[i][j+1])
- edit_list.append('D')
-
- elif insertion <= substitution:
- # Insertion is best.
- best = insertion
- edit_list = list(edit_lists[i+1][j])
- edit_list.append('I')
- edit_lists[i+1].append(edit_list)
-
- else:
- # Substitution is best.
- best = substitution
- edit_list = list(edit_lists[i][j])
- if cost:
- edit_list.append('S')
- else:
- edit_list.append('=')
-
- edit_lists[i+1].append(edit_list)
- distance[i+1].append(best)
-
- return distance[source_len][target_len], edit_lists[source_len][target_len]
-
- def __repr__(self):
- """Returns a string representation of this object.
-
- Returns:
- A string representation of this object.
- """
- return '<DocComment: %s, %s>' % (
- str(self.ordered_params), str(self.__flags))
-
-
-#
-# Helper methods used by DocFlag and DocComment to parse out flag information.
-#
-
-
-def _GetMatchingEndBraceAndContents(start_brace):
- """Returns the matching end brace and contents between the two braces.
-
- If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
- that token is used as the matching ending token. Contents will have all
- comment prefixes stripped out of them, and all comment prefixes in between the
- start and end tokens will be split out into separate DOC_PREFIX tokens.
-
- Args:
- start_brace: The DOC_START_BRACE token immediately before desired contents.
-
- Returns:
- The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
- of the contents between the matching tokens, minus any comment prefixes.
- """
- open_count = 1
- close_count = 0
- contents = []
-
- # We don't consider the start brace part of the type string.
- token = start_brace.next
- while open_count != close_count:
- if token.type == Type.DOC_START_BRACE:
- open_count += 1
- elif token.type == Type.DOC_END_BRACE:
- close_count += 1
-
- if token.type != Type.DOC_PREFIX:
- contents.append(token.string)
-
- if token.type in Type.FLAG_ENDING_TYPES:
- break
- token = token.next
-
- #Don't include the end token (end brace, end doc comment, etc.) in type.
- token = token.previous
- contents = contents[:-1]
-
- return token, ''.join(contents)
-
-
-def _GetNextPartialIdentifierToken(start_token):
- """Returns the first token having identifier as substring after a token.
-
- Searches each token after the start to see if it contains an identifier.
- If found, token is returned. If no identifier is found returns None.
- Search is abandoned when a FLAG_ENDING_TYPE token is found.
-
- Args:
- start_token: The token to start searching after.
-
- Returns:
- The token found containing identifier, None otherwise.
- """
- token = start_token.next
-
- while token and token.type not in Type.FLAG_ENDING_TYPES:
- match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
- token.string)
- if match is not None and token.type == Type.COMMENT:
- return token
-
- token = token.next
-
- return None
-
-
-def _GetEndTokenAndContents(start_token):
- """Returns last content token and all contents before FLAG_ENDING_TYPE token.
-
- Comment prefixes are split into DOC_PREFIX tokens and stripped from the
- returned contents.
-
- Args:
- start_token: The token immediately before the first content token.
-
- Returns:
- The last content token and a string of all contents including start and
- end tokens, with comment prefixes stripped.
- """
- iterator = start_token
- last_line = iterator.line_number
- last_token = None
- contents = ''
- doc_depth = 0
- while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
- if (iterator.IsFirstInLine() and
- DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
- # If we have a blank comment line, consider that an implicit
- # ending of the description. This handles a case like:
- #
- # * @return {boolean} True
- # *
- # * Note: This is a sentence.
- #
- # The note is not part of the @return description, but there was
- # no definitive ending token. Rather there was a line containing
- # only a doc comment prefix or whitespace.
- break
-
- # b/2983692
- # don't prematurely match against a @flag if inside a doc flag
- # need to think about what is the correct behavior for unterminated
- # inline doc flags
- if (iterator.type == Type.DOC_START_BRACE and
- iterator.next.type == Type.DOC_INLINE_FLAG):
- doc_depth += 1
- elif (iterator.type == Type.DOC_END_BRACE and
- doc_depth > 0):
- doc_depth -= 1
-
- if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
- contents += iterator.string
- last_token = iterator
-
- iterator = iterator.next
- if iterator.line_number != last_line:
- contents += '\n'
- last_line = iterator.line_number
-
- end_token = last_token
- if DocFlag.EMPTY_STRING.match(contents):
- contents = None
- else:
- # Strip trailing newline.
- contents = contents[:-1]
-
- return end_token, contents
-
-
-class Function(object):
- """Data about a JavaScript function.
-
- Attributes:
- block_depth: Block depth the function began at.
- doc: The DocComment associated with the function.
- has_return: If the function has a return value.
- has_this: If the function references the 'this' object.
- is_assigned: If the function is part of an assignment.
- is_constructor: If the function is a constructor.
- name: The name of the function, whether given in the function keyword or
- as the lvalue the function is assigned to.
- start_token: First token of the function (the function' keyword token).
- end_token: Last token of the function (the closing '}' token).
- parameters: List of parameter names.
- """
-
- def __init__(self, block_depth, is_assigned, doc, name):
- self.block_depth = block_depth
- self.is_assigned = is_assigned
- self.is_constructor = doc and doc.HasFlag('constructor')
- self.is_interface = doc and doc.HasFlag('interface')
- self.has_return = False
- self.has_throw = False
- self.has_this = False
- self.name = name
- self.doc = doc
- self.start_token = None
- self.end_token = None
- self.parameters = None
-
-
-class StateTracker(object):
- """EcmaScript state tracker.
-
- Tracks block depth, function names, etc. within an EcmaScript token stream.
- """
-
- OBJECT_LITERAL = 'o'
- CODE = 'c'
-
- def __init__(self, doc_flag=DocFlag):
- """Initializes a JavaScript token stream state tracker.
-
- Args:
- doc_flag: An optional custom DocFlag used for validating
- documentation flags.
- """
- self._doc_flag = doc_flag
- self.Reset()
-
- def Reset(self):
- """Resets the state tracker to prepare for processing a new page."""
- self._block_depth = 0
- self._is_block_close = False
- self._paren_depth = 0
- self._function_stack = []
- self._functions_by_name = {}
- self._last_comment = None
- self._doc_comment = None
- self._cumulative_params = None
- self._block_types = []
- self._last_non_space_token = None
- self._last_line = None
- self._first_token = None
- self._documented_identifiers = set()
- self._variables_in_scope = []
-
- def DocFlagPass(self, start_token, error_handler):
- """Parses doc flags.
-
- This pass needs to be executed before the aliaspass and we don't want to do
- a full-blown statetracker dry run for these.
-
- Args:
- start_token: The token at which to start iterating
- error_handler: An error handler for error reporting.
- """
- if not start_token:
- return
- doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
- for token in start_token:
- if token.type in doc_flag_types:
- token.attached_object = self._doc_flag(token, error_handler)
-
- def InFunction(self):
- """Returns true if the current token is within a function.
-
- Returns:
- True if the current token is within a function.
- """
- return bool(self._function_stack)
-
- def InConstructor(self):
- """Returns true if the current token is within a constructor.
-
- Returns:
- True if the current token is within a constructor.
- """
- return self.InFunction() and self._function_stack[-1].is_constructor
-
- def InInterfaceMethod(self):
- """Returns true if the current token is within an interface method.
-
- Returns:
- True if the current token is within an interface method.
- """
- if self.InFunction():
- if self._function_stack[-1].is_interface:
- return True
- else:
- name = self._function_stack[-1].name
- prototype_index = name.find('.prototype.')
- if prototype_index != -1:
- class_function_name = name[0:prototype_index]
- if (class_function_name in self._functions_by_name and
- self._functions_by_name[class_function_name].is_interface):
- return True
-
- return False
-
- def InTopLevelFunction(self):
- """Returns true if the current token is within a top level function.
-
- Returns:
- True if the current token is within a top level function.
- """
- return len(self._function_stack) == 1 and self.InTopLevel()
-
- def InAssignedFunction(self):
- """Returns true if the current token is within a function variable.
-
- Returns:
- True if if the current token is within a function variable
- """
- return self.InFunction() and self._function_stack[-1].is_assigned
-
- def IsFunctionOpen(self):
- """Returns true if the current token is a function block open.
-
- Returns:
- True if the current token is a function block open.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth - 1)
-
- def IsFunctionClose(self):
- """Returns true if the current token is a function block close.
-
- Returns:
- True if the current token is a function block close.
- """
- return (self._function_stack and
- self._function_stack[-1].block_depth == self._block_depth)
-
- def InBlock(self):
- """Returns true if the current token is within a block.
-
- Returns:
- True if the current token is within a block.
- """
- return bool(self._block_depth)
-
- def IsBlockClose(self):
- """Returns true if the current token is a block close.
-
- Returns:
- True if the current token is a block close.
- """
- return self._is_block_close
-
- def InObjectLiteral(self):
- """Returns true if the current token is within an object literal.
-
- Returns:
- True if the current token is within an object literal.
- """
- return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
-
- def InObjectLiteralDescendant(self):
- """Returns true if the current token has an object literal ancestor.
-
- Returns:
- True if the current token has an object literal ancestor.
- """
- return self.OBJECT_LITERAL in self._block_types
-
- def InParentheses(self):
- """Returns true if the current token is within parentheses.
-
- Returns:
- True if the current token is within parentheses.
- """
- return bool(self._paren_depth)
-
- def ParenthesesDepth(self):
- """Returns the number of parens surrounding the token.
-
- Returns:
- The number of parenthesis surrounding the token.
- """
- return self._paren_depth
-
- def BlockDepth(self):
- """Returns the number of blocks in which the token is nested.
-
- Returns:
- The number of blocks in which the token is nested.
- """
- return self._block_depth
-
- def FunctionDepth(self):
- """Returns the number of functions in which the token is nested.
-
- Returns:
- The number of functions in which the token is nested.
- """
- return len(self._function_stack)
-
- def InTopLevel(self):
- """Whether we are at the top level in the class.
-
- This function call is language specific. In some languages like
- JavaScript, a function is top level if it is not inside any parenthesis.
- In languages such as ActionScript, a function is top level if it is directly
- within a class.
- """
- raise TypeError('Abstract method InTopLevel not implemented')
-
- def GetBlockType(self, token):
- """Determine the block type given a START_BLOCK token.
-
- Code blocks come after parameters, keywords like else, and closing parens.
-
- Args:
- token: The current token. Can be assumed to be type START_BLOCK.
- Returns:
- Code block type for current token.
- """
- raise TypeError('Abstract method GetBlockType not implemented')
-
- def GetParams(self):
- """Returns the accumulated input params as an array.
-
- In some EcmasSript languages, input params are specified like
- (param:Type, param2:Type2, ...)
- in other they are specified just as
- (param, param2)
- We handle both formats for specifying parameters here and leave
- it to the compilers for each language to detect compile errors.
- This allows more code to be reused between lint checkers for various
- EcmaScript languages.
-
- Returns:
- The accumulated input params as an array.
- """
- params = []
- if self._cumulative_params:
- params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
- # Strip out the type from parameters of the form name:Type.
- params = map(lambda param: param.split(':')[0], params)
-
- return params
-
- def GetLastComment(self):
- """Return the last plain comment that could be used as documentation.
-
- Returns:
- The last plain comment that could be used as documentation.
- """
- return self._last_comment
-
- def GetDocComment(self):
- """Return the most recent applicable documentation comment.
-
- Returns:
- The last applicable documentation comment.
- """
- return self._doc_comment
-
- def HasDocComment(self, identifier):
- """Returns whether the identifier has been documented yet.
-
- Args:
- identifier: The identifier.
-
- Returns:
- Whether the identifier has been documented yet.
- """
- return identifier in self._documented_identifiers
-
- def InDocComment(self):
- """Returns whether the current token is in a doc comment.
-
- Returns:
- Whether the current token is in a doc comment.
- """
- return self._doc_comment and self._doc_comment.end_token is None
-
- def GetDocFlag(self):
- """Returns the current documentation flags.
-
- Returns:
- The current documentation flags.
- """
- return self._doc_flag
-
- def IsTypeToken(self, t):
- if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
- Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
- f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
- None, True)
- if (f and f.attached_object.type_start_token is not None and
- f.attached_object.type_end_token is not None):
- return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
- tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
- return False
-
- def GetFunction(self):
- """Return the function the current code block is a part of.
-
- Returns:
- The current Function object.
- """
- if self._function_stack:
- return self._function_stack[-1]
-
- def GetBlockDepth(self):
- """Return the block depth.
-
- Returns:
- The current block depth.
- """
- return self._block_depth
-
- def GetLastNonSpaceToken(self):
- """Return the last non whitespace token."""
- return self._last_non_space_token
-
- def GetLastLine(self):
- """Return the last line."""
- return self._last_line
-
- def GetFirstToken(self):
- """Return the very first token in the file."""
- return self._first_token
-
- def IsVariableInScope(self, token_string):
- """Checks if string is variable in current scope.
-
- For given string it checks whether the string is a defined variable
- (including function param) in current state.
-
- E.g. if variables defined (variables in current scope) is docs
- then docs, docs.length etc will be considered as variable in current
- scope. This will help in avoding extra goog.require for variables.
-
- Args:
- token_string: String to check if its is a variable in current scope.
-
- Returns:
- true if given string is a variable in current scope.
- """
- for variable in self._variables_in_scope:
- if (token_string == variable
- or token_string.startswith(variable + '.')):
- return True
-
- return False
-
- def HandleToken(self, token, last_non_space_token):
- """Handles the given token and updates state.
-
- Args:
- token: The token to handle.
- last_non_space_token:
- """
- self._is_block_close = False
-
- if not self._first_token:
- self._first_token = token
-
- # Track block depth.
- type = token.type
- if type == Type.START_BLOCK:
- self._block_depth += 1
-
- # Subclasses need to handle block start very differently because
- # whether a block is a CODE or OBJECT_LITERAL block varies significantly
- # by language.
- self._block_types.append(self.GetBlockType(token))
-
- # When entering a function body, record its parameters.
- if self.InFunction():
- function = self._function_stack[-1]
- if self._block_depth == function.block_depth + 1:
- function.parameters = self.GetParams()
-
- # Track block depth.
- elif type == Type.END_BLOCK:
- self._is_block_close = not self.InObjectLiteral()
- self._block_depth -= 1
- self._block_types.pop()
-
- # Track parentheses depth.
- elif type == Type.START_PAREN:
- self._paren_depth += 1
-
- # Track parentheses depth.
- elif type == Type.END_PAREN:
- self._paren_depth -= 1
-
- elif type == Type.COMMENT:
- self._last_comment = token.string
-
- elif type == Type.START_DOC_COMMENT:
- self._last_comment = None
- self._doc_comment = DocComment(token)
-
- elif type == Type.END_DOC_COMMENT:
- self._doc_comment.end_token = token
-
- elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- # Don't overwrite flags if they were already parsed in a previous pass.
- if token.attached_object is None:
- flag = self._doc_flag(token)
- token.attached_object = flag
- else:
- flag = token.attached_object
- self._doc_comment.AddFlag(flag)
-
- if flag.flag_type == 'suppress':
- self._doc_comment.AddSuppression(token)
-
- elif type == Type.FUNCTION_DECLARATION:
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
- True)
- doc = None
- # Only top-level functions are eligible for documentation.
- if self.InTopLevel():
- doc = self._doc_comment
-
- name = ''
- is_assigned = last_code and (last_code.IsOperator('=') or
- last_code.IsOperator('||') or last_code.IsOperator('&&') or
- (last_code.IsOperator(':') and not self.InObjectLiteral()))
- if is_assigned:
- # TODO(robbyw): This breaks for x[2] = ...
- # Must use loop to find full function name in the case of line-wrapped
- # declarations (bug 1220601) like:
- # my.function.foo.
- # bar = function() ...
- identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
- while identifier and tokenutil.IsIdentifierOrDot(identifier):
- name = identifier.string + name
- # Traverse behind us, skipping whitespace and comments.
- while True:
- identifier = identifier.previous
- if not identifier or not identifier.type in Type.NON_CODE_TYPES:
- break
-
- else:
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- while next_token and next_token.IsType(Type.FUNCTION_NAME):
- name += next_token.string
- next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
-
- function = Function(self._block_depth, is_assigned, doc, name)
- function.start_token = token
-
- self._function_stack.append(function)
- self._functions_by_name[name] = function
-
- # Add a delimiter in stack for scope variables to define start of
- # function. This helps in popping variables of this function when
- # function declaration ends.
- self._variables_in_scope.append('')
-
- elif type == Type.START_PARAMETERS:
- self._cumulative_params = ''
-
- elif type == Type.PARAMETERS:
- self._cumulative_params += token.string
- self._variables_in_scope.extend(self.GetParams())
-
- elif type == Type.KEYWORD and token.string == 'return':
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if not next_token.IsType(Type.SEMICOLON):
- function = self.GetFunction()
- if function:
- function.has_return = True
-
- elif type == Type.KEYWORD and token.string == 'throw':
- function = self.GetFunction()
- if function:
- function.has_throw = True
-
- elif type == Type.KEYWORD and token.string == 'var':
- function = self.GetFunction()
- next_token = tokenutil.Search(token, [Type.IDENTIFIER,
- Type.SIMPLE_LVALUE])
-
- if next_token:
- if next_token.type == Type.SIMPLE_LVALUE:
- self._variables_in_scope.append(next_token.values['identifier'])
- else:
- self._variables_in_scope.append(next_token.string)
-
- elif type == Type.SIMPLE_LVALUE:
- identifier = token.values['identifier']
- jsdoc = self.GetDocComment()
- if jsdoc:
- self._documented_identifiers.add(identifier)
-
- self._HandleIdentifier(identifier, True)
-
- elif type == Type.IDENTIFIER:
- self._HandleIdentifier(token.string, False)
-
- # Detect documented non-assignments.
- next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_token and next_token.IsType(Type.SEMICOLON):
- if (self._last_non_space_token and
- self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
- self._documented_identifiers.add(token.string)
-
- def _HandleIdentifier(self, identifier, is_assignment):
- """Process the given identifier.
-
- Currently checks if it references 'this' and annotates the function
- accordingly.
-
- Args:
- identifier: The identifer to process.
- is_assignment: Whether the identifer is being written to.
- """
- if identifier == 'this' or identifier.startswith('this.'):
- function = self.GetFunction()
- if function:
- function.has_this = True
-
- def HandleAfterToken(self, token):
- """Handle updating state after a token has been checked.
-
- This function should be used for destructive state changes such as
- deleting a tracked object.
-
- Args:
- token: The token to handle.
- """
- type = token.type
- if type == Type.SEMICOLON or type == Type.END_PAREN or (
- type == Type.END_BRACKET and
- self._last_non_space_token.type not in (
- Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END,
- Type.TEMPLATE_STRING_END)):
- # We end on any numeric array index, but keep going for string based
- # array indices so that we pick up manually exported identifiers.
- self._doc_comment = None
- self._last_comment = None
-
- elif type == Type.END_BLOCK:
- self._doc_comment = None
- self._last_comment = None
-
- if self.InFunction() and self.IsFunctionClose():
- # TODO(robbyw): Detect the function's name for better errors.
- function = self._function_stack.pop()
- function.end_token = token
-
- # Pop all variables till delimiter ('') those were defined in the
- # function being closed so make them out of scope.
- while self._variables_in_scope and self._variables_in_scope[-1]:
- self._variables_in_scope.pop()
-
- # Pop delimiter
- if self._variables_in_scope:
- self._variables_in_scope.pop()
-
- elif type == Type.END_PARAMETERS and self._doc_comment:
- self._doc_comment = None
- self._last_comment = None
-
- if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
- self._last_non_space_token = token
-
- self._last_line = token.line

Powered by Google App Engine
This is Rietveld 408576698