Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(431)

Unified Diff: third_party/coverage-3.7.1/coverage/phystokens.py

Issue 225633007: Upgrade to coverage 3.7.1 and have it auto-build itself on first use. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/build
Patch Set: sigh our imports are a mess Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/coverage-3.7.1/coverage/parser.py ('k') | third_party/coverage-3.7.1/coverage/report.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/coverage-3.7.1/coverage/phystokens.py
diff --git a/third_party/coverage-3.6/coverage/phystokens.py b/third_party/coverage-3.7.1/coverage/phystokens.py
similarity index 93%
rename from third_party/coverage-3.6/coverage/phystokens.py
rename to third_party/coverage-3.7.1/coverage/phystokens.py
index 166020e1fac0bd4afe41b1ba2d96e80fd54a1769..99b1d5ba0c79771e43338cc8a37ce09e7085d7e2 100644
--- a/third_party/coverage-3.6/coverage/phystokens.py
+++ b/third_party/coverage-3.7.1/coverage/phystokens.py
@@ -1,7 +1,9 @@
"""Better tokenizing for coverage.py."""
import codecs, keyword, re, sys, token, tokenize
-from coverage.backward import StringIO # pylint: disable=W0622
+from coverage.backward import set # pylint: disable=W0622
+from coverage.parser import generate_tokens
+
def phys_tokens(toks):
"""Return all physical tokens, even line continuations.
@@ -18,7 +20,7 @@ def phys_tokens(toks):
last_ttype = None
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
- if last_line and last_line[-2:] == "\\\n":
+ if last_line and last_line.endswith("\\\n"):
# We are at the beginning of a new line, and the last line
# ended with a backslash. We probably have to inject a
# backslash token into the stream. Unfortunately, there's more
@@ -74,11 +76,11 @@ def source_token_lines(source):
is indistinguishable from a final line with a newline.
"""
- ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]
+ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
line = []
col = 0
source = source.expandtabs(8).replace('\r\n', '\n')
- tokgen = tokenize.generate_tokens(StringIO(source).readline)
+ tokgen = generate_tokens(source)
for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
mark_start = True
for part in re.split('(\n)', ttext):
@@ -122,7 +124,7 @@ def source_encoding(source):
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
# Do this so the detect_encode code we copied will work.
- readline = iter(source.splitlines()).next
+ readline = iter(source.splitlines(True)).next
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
@@ -177,7 +179,9 @@ def source_encoding(source):
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
- if codec.name != 'utf-8':
+ # codecs in 2.3 were raw tuples of functions, assume the best.
+ codec_name = getattr(codec, 'name', encoding)
+ if codec_name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
« no previous file with comments | « third_party/coverage-3.7.1/coverage/parser.py ('k') | third_party/coverage-3.7.1/coverage/report.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698