| Index: third_party/coverage-3.7.1/coverage/phystokens.py
|
| diff --git a/third_party/coverage-3.6/coverage/phystokens.py b/third_party/coverage-3.7.1/coverage/phystokens.py
|
| similarity index 93%
|
| rename from third_party/coverage-3.6/coverage/phystokens.py
|
| rename to third_party/coverage-3.7.1/coverage/phystokens.py
|
| index 166020e1fac0bd4afe41b1ba2d96e80fd54a1769..99b1d5ba0c79771e43338cc8a37ce09e7085d7e2 100644
|
| --- a/third_party/coverage-3.6/coverage/phystokens.py
|
| +++ b/third_party/coverage-3.7.1/coverage/phystokens.py
|
| @@ -1,7 +1,9 @@
|
| """Better tokenizing for coverage.py."""
|
|
|
| import codecs, keyword, re, sys, token, tokenize
|
| -from coverage.backward import StringIO # pylint: disable=W0622
|
| +from coverage.backward import set # pylint: disable=W0622
|
| +from coverage.parser import generate_tokens
|
| +
|
|
|
| def phys_tokens(toks):
|
| """Return all physical tokens, even line continuations.
|
| @@ -18,7 +20,7 @@ def phys_tokens(toks):
|
| last_ttype = None
|
| for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
|
| if last_lineno != elineno:
|
| - if last_line and last_line[-2:] == "\\\n":
|
| + if last_line and last_line.endswith("\\\n"):
|
| # We are at the beginning of a new line, and the last line
|
| # ended with a backslash. We probably have to inject a
|
| # backslash token into the stream. Unfortunately, there's more
|
| @@ -74,11 +76,11 @@ def source_token_lines(source):
|
| is indistinguishable from a final line with a newline.
|
|
|
| """
|
| - ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]
|
| + ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
|
| line = []
|
| col = 0
|
| source = source.expandtabs(8).replace('\r\n', '\n')
|
| - tokgen = tokenize.generate_tokens(StringIO(source).readline)
|
| + tokgen = generate_tokens(source)
|
| for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
|
| mark_start = True
|
| for part in re.split('(\n)', ttext):
|
| @@ -122,7 +124,7 @@ def source_encoding(source):
|
| cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
|
|
|
| # Do this so the detect_encode code we copied will work.
|
| - readline = iter(source.splitlines()).next
|
| + readline = iter(source.splitlines(True)).next
|
|
|
| def _get_normal_name(orig_enc):
|
| """Imitates get_normal_name in tokenizer.c."""
|
| @@ -177,7 +179,9 @@ def source_encoding(source):
|
| raise SyntaxError("unknown encoding: " + encoding)
|
|
|
| if bom_found:
|
| - if codec.name != 'utf-8':
|
| + # codecs in 2.3 were raw tuples of functions, assume the best.
|
| + codec_name = getattr(codec, 'name', encoding)
|
| + if codec_name != 'utf-8':
|
| # This behaviour mimics the Python interpreter
|
| raise SyntaxError('encoding problem: utf-8')
|
| encoding += '-sig'
|
|
|