Index: third_party/protobuf/python/google/protobuf/text_format.py |
diff --git a/third_party/protobuf/python/google/protobuf/text_format.py b/third_party/protobuf/python/google/protobuf/text_format.py |
index 0714c39d44d0aeb9477446b8b62d65b14b14107f..8d256076c28ad4c3245aaccbbbd8284aeb4bc961 100755 |
--- a/third_party/protobuf/python/google/protobuf/text_format.py |
+++ b/third_party/protobuf/python/google/protobuf/text_format.py |
@@ -1,6 +1,6 @@ |
# Protocol Buffers - Google's data interchange format |
# Copyright 2008 Google Inc. All rights reserved. |
-# http://code.google.com/p/protobuf/ |
+# https://developers.google.com/protocol-buffers/ |
# |
# Redistribution and use in source and binary forms, with or without |
# modification, are permitted provided that the following conditions are |
@@ -28,19 +28,34 @@ |
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
-"""Contains routines for printing protocol messages in text format.""" |
+"""Contains routines for printing protocol messages in text format. |
+ |
+Simple usage example: |
+ |
+ # Create a proto object and serialize it to a text proto string. |
+ message = my_proto_pb2.MyMessage(foo='bar') |
+ text_proto = text_format.MessageToString(message) |
+ |
+ # Parse a text proto string. |
+ message = text_format.Parse(text_proto, my_proto_pb2.MyMessage()) |
+""" |
__author__ = 'kenton@google.com (Kenton Varda)' |
-import cStringIO |
+import io |
import re |
-from collections import deque |
+import six |
+ |
+if six.PY3: |
+ long = int |
+ |
from google.protobuf.internal import type_checkers |
from google.protobuf import descriptor |
+from google.protobuf import text_encoding |
-__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField', |
- 'PrintFieldValue', 'Merge' ] |
+__all__ = ['MessageToString', 'PrintMessage', 'PrintField', |
+ 'PrintFieldValue', 'Merge'] |
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), |
@@ -49,15 +64,69 @@ _INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), |
type_checkers.Int64ValueChecker()) |
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE) |
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE) |
+_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, |
+ descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) |
+_QUOTES = frozenset(("'", '"')) |
+ |
+ |
+class Error(Exception): |
+ """Top-level module error for text_format.""" |
+ |
+ |
+class ParseError(Error): |
+ """Thrown in case of text parsing error.""" |
-class ParseError(Exception): |
- """Thrown in case of ASCII parsing error.""" |
+class TextWriter(object): |
+ def __init__(self, as_utf8): |
+ if six.PY2: |
+ self._writer = io.BytesIO() |
+ else: |
+ self._writer = io.StringIO() |
+ |
+ def write(self, val): |
+ if six.PY2: |
+ if isinstance(val, six.text_type): |
+ val = val.encode('utf-8') |
+ return self._writer.write(val) |
+ |
+ def close(self): |
+ return self._writer.close() |
+ |
+ def getvalue(self): |
+ return self._writer.getvalue() |
+ |
+def MessageToString(message, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, |
+ float_format=None): |
+ """Convert protobuf message to text format. |
-def MessageToString(message, as_utf8=False, as_one_line=False): |
- out = cStringIO.StringIO() |
- PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line) |
+ Floating point values can be formatted compactly with 15 digits of |
+ precision (which is the most that IEEE 754 "double" can guarantee) |
+ using float_format='.15g'. To ensure that converting to text and back to a |
+ proto will result in an identical value, float_format='.17g' should be used. |
+ |
+ Args: |
+ message: The protocol buffers message. |
+ as_utf8: Produce text output in UTF8 format. |
+ as_one_line: Don't introduce newlines between fields. |
+ pointy_brackets: If True, use angle brackets instead of curly braces for |
+ nesting. |
+ use_index_order: If True, print fields of a proto message using the order |
+ defined in source code instead of the field number. By default, use the |
+ field number order. |
+ float_format: If set, use this to specify floating point number formatting |
+ (per the "Format Specification Mini-Language"); otherwise, str() is used. |
+ |
+ Returns: |
+ A string of the text formatted protocol buffer message. |
+ """ |
+ out = TextWriter(as_utf8) |
+ PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
result = out.getvalue() |
out.close() |
if as_one_line: |
@@ -65,25 +134,55 @@ def MessageToString(message, as_utf8=False, as_one_line=False): |
return result |
-def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False): |
- for field, value in message.ListFields(): |
- if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
+def _IsMapEntry(field): |
+ return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and |
+ field.message_type.has_options and |
+ field.message_type.GetOptions().map_entry) |
+ |
+ |
+def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, |
+ float_format=None): |
+ fields = message.ListFields() |
+ if use_index_order: |
+ fields.sort(key=lambda x: x[0].index) |
+ for field, value in fields: |
+ if _IsMapEntry(field): |
+ for key in sorted(value): |
+ # This is slow for maps with submessage entires because it copies the |
+ # entire tree. Unfortunately this would take significant refactoring |
+ # of this file to work around. |
+ # |
+ # TODO(haberman): refactor and optimize if this becomes an issue. |
+ entry_submsg = field.message_type._concrete_class( |
+ key=key, value=value[key]) |
+ PrintField(field, entry_submsg, out, indent, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, float_format=float_format) |
+ elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
for element in value: |
- PrintField(field, element, out, indent, as_utf8, as_one_line) |
+ PrintField(field, element, out, indent, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
else: |
- PrintField(field, value, out, indent, as_utf8, as_one_line) |
+ PrintField(field, value, out, indent, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
-def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False): |
+def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, float_format=None): |
"""Print a single field name/value pair. For repeated fields, the value |
- should be a single element.""" |
+ should be a single element. |
+ """ |
- out.write(' ' * indent); |
+ out.write(' ' * indent) |
if field.is_extension: |
out.write('[') |
if (field.containing_type.GetOptions().message_set_wire_format and |
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and |
- field.message_type == field.extension_scope and |
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): |
out.write(field.message_type.full_name) |
else: |
@@ -100,27 +199,45 @@ def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False): |
# don't include it. |
out.write(': ') |
- PrintFieldValue(field, value, out, indent, as_utf8, as_one_line) |
+ PrintFieldValue(field, value, out, indent, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
if as_one_line: |
out.write(' ') |
else: |
out.write('\n') |
-def PrintFieldValue(field, value, out, indent=0, |
- as_utf8=False, as_one_line=False): |
+def PrintFieldValue(field, value, out, indent=0, as_utf8=False, |
+ as_one_line=False, pointy_brackets=False, |
+ use_index_order=False, |
+ float_format=None): |
"""Print a single field value (not including name). For repeated fields, |
the value should be a single element.""" |
+ if pointy_brackets: |
+ openb = '<' |
+ closeb = '>' |
+ else: |
+ openb = '{' |
+ closeb = '}' |
+ |
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
if as_one_line: |
- out.write(' { ') |
- PrintMessage(value, out, indent, as_utf8, as_one_line) |
- out.write('}') |
+ out.write(' %s ' % openb) |
+ PrintMessage(value, out, indent, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
+ out.write(closeb) |
else: |
- out.write(' {\n') |
- PrintMessage(value, out, indent + 2, as_utf8, as_one_line) |
- out.write(' ' * indent + '}') |
+ out.write(' %s\n' % openb) |
+ PrintMessage(value, out, indent + 2, as_utf8, as_one_line, |
+ pointy_brackets=pointy_brackets, |
+ use_index_order=use_index_order, |
+ float_format=float_format) |
+ out.write(' ' * indent + closeb) |
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: |
enum_value = field.enum_type.values_by_number.get(value, None) |
if enum_value is not None: |
@@ -129,46 +246,155 @@ def PrintFieldValue(field, value, out, indent=0, |
out.write(str(value)) |
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: |
out.write('\"') |
- if type(value) is unicode: |
- out.write(_CEscape(value.encode('utf-8'), as_utf8)) |
+ if isinstance(value, six.text_type): |
+ out_value = value.encode('utf-8') |
else: |
- out.write(_CEscape(value, as_utf8)) |
+ out_value = value |
+ if field.type == descriptor.FieldDescriptor.TYPE_BYTES: |
+ # We need to escape non-UTF8 chars in TYPE_BYTES field. |
+ out_as_utf8 = False |
+ else: |
+ out_as_utf8 = as_utf8 |
+ out.write(text_encoding.CEscape(out_value, out_as_utf8)) |
out.write('\"') |
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: |
if value: |
- out.write("true") |
+ out.write('true') |
else: |
- out.write("false") |
+ out.write('false') |
+ elif field.cpp_type in _FLOAT_TYPES and float_format is not None: |
+ out.write('{1:{0}}'.format(float_format, value)) |
else: |
out.write(str(value)) |
-def Merge(text, message): |
- """Merges an ASCII representation of a protocol message into a message. |
+def Parse(text, message, allow_unknown_extension=False): |
+ """Parses an text representation of a protocol message into a message. |
+ |
+ Args: |
+ text: Message text representation. |
+ message: A protocol buffer message to merge into. |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
+ |
+ Returns: |
+ The same message passed as argument. |
+ |
+ Raises: |
+ ParseError: On text parsing problems. |
+ """ |
+ if not isinstance(text, str): |
+ text = text.decode('utf-8') |
+ return ParseLines(text.split('\n'), message, allow_unknown_extension) |
+ |
+ |
+def Merge(text, message, allow_unknown_extension=False): |
+ """Parses an text representation of a protocol message into a message. |
+ |
+ Like Parse(), but allows repeated values for a non-repeated field, and uses |
+ the last one. |
+ |
+ Args: |
+ text: Message text representation. |
+ message: A protocol buffer message to merge into. |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
+ |
+ Returns: |
+ The same message passed as argument. |
+ |
+ Raises: |
+ ParseError: On text parsing problems. |
+ """ |
+ return MergeLines(text.split('\n'), message, allow_unknown_extension) |
+ |
+ |
+def ParseLines(lines, message, allow_unknown_extension=False): |
+ """Parses an text representation of a protocol message into a message. |
+ |
+ Args: |
+ lines: An iterable of lines of a message's text representation. |
+ message: A protocol buffer message to merge into. |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
+ |
+ Returns: |
+ The same message passed as argument. |
+ |
+ Raises: |
+ ParseError: On text parsing problems. |
+ """ |
+ _ParseOrMerge(lines, message, False, allow_unknown_extension) |
+ return message |
+ |
+ |
+def MergeLines(lines, message, allow_unknown_extension=False): |
+ """Parses an text representation of a protocol message into a message. |
+ |
+ Args: |
+ lines: An iterable of lines of a message's text representation. |
+ message: A protocol buffer message to merge into. |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
+ |
+ Returns: |
+ The same message passed as argument. |
+ |
+ Raises: |
+ ParseError: On text parsing problems. |
+ """ |
+ _ParseOrMerge(lines, message, True, allow_unknown_extension) |
+ return message |
+ |
+ |
+def _ParseOrMerge(lines, |
+ message, |
+ allow_multiple_scalars, |
+ allow_unknown_extension=False): |
+ """Converts an text representation of a protocol message into a message. |
Args: |
- text: Message ASCII representation. |
+ lines: Lines of a message's text representation. |
message: A protocol buffer message to merge into. |
+ allow_multiple_scalars: Determines if repeated values for a non-repeated |
+ field are permitted, e.g., the string "foo: 1 foo: 2" for a |
+ required/optional field named "foo". |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
Raises: |
- ParseError: On ASCII parsing problems. |
+ ParseError: On text parsing problems. |
""" |
- tokenizer = _Tokenizer(text) |
+ tokenizer = _Tokenizer(lines) |
while not tokenizer.AtEnd(): |
- _MergeField(tokenizer, message) |
+ _MergeField(tokenizer, message, allow_multiple_scalars, |
+ allow_unknown_extension) |
-def _MergeField(tokenizer, message): |
+def _MergeField(tokenizer, |
+ message, |
+ allow_multiple_scalars, |
+ allow_unknown_extension=False): |
"""Merges a single protocol message field into a message. |
Args: |
tokenizer: A tokenizer to parse the field name and values. |
message: A protocol message to record the data. |
+ allow_multiple_scalars: Determines if repeated values for a non-repeated |
+ field are permitted, e.g., the string "foo: 1 foo: 2" for a |
+ required/optional field named "foo". |
+ allow_unknown_extension: if True, skip over missing extensions and keep |
+ parsing |
Raises: |
- ParseError: In case of ASCII parsing problems. |
+ ParseError: In case of text parsing problems. |
""" |
message_descriptor = message.DESCRIPTOR |
+ if (hasattr(message_descriptor, 'syntax') and |
+ message_descriptor.syntax == 'proto3'): |
+ # Proto3 doesn't represent presence so we can't test if multiple |
+ # scalars have occurred. We have to allow them. |
+ allow_multiple_scalars = True |
if tokenizer.TryConsume('['): |
name = [tokenizer.ConsumeIdentifier()] |
while tokenizer.TryConsume('.'): |
@@ -179,15 +405,22 @@ def _MergeField(tokenizer, message): |
raise tokenizer.ParseErrorPreviousToken( |
'Message type "%s" does not have extensions.' % |
message_descriptor.full_name) |
+ # pylint: disable=protected-access |
field = message.Extensions._FindExtensionByName(name) |
+ # pylint: enable=protected-access |
if not field: |
- raise tokenizer.ParseErrorPreviousToken( |
- 'Extension "%s" not registered.' % name) |
+ if allow_unknown_extension: |
+ field = None |
+ else: |
+ raise tokenizer.ParseErrorPreviousToken( |
+ 'Extension "%s" not registered.' % name) |
elif message_descriptor != field.containing_type: |
raise tokenizer.ParseErrorPreviousToken( |
'Extension "%s" does not extend message type "%s".' % ( |
name, message_descriptor.full_name)) |
+ |
tokenizer.Consume(']') |
+ |
else: |
name = tokenizer.ConsumeIdentifier() |
field = message_descriptor.fields_by_name.get(name, None) |
@@ -209,7 +442,8 @@ def _MergeField(tokenizer, message): |
'Message type "%s" has no field named "%s".' % ( |
message_descriptor.full_name, name)) |
- if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
+ if field and field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
+ is_map_entry = _IsMapEntry(field) |
tokenizer.TryConsume(':') |
if tokenizer.TryConsume('<'): |
@@ -221,6 +455,8 @@ def _MergeField(tokenizer, message): |
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
if field.is_extension: |
sub_message = message.Extensions[field].add() |
+ elif is_map_entry: |
+ sub_message = field.message_type._concrete_class() |
else: |
sub_message = getattr(message, field.name).add() |
else: |
@@ -233,24 +469,137 @@ def _MergeField(tokenizer, message): |
while not tokenizer.TryConsume(end_token): |
if tokenizer.AtEnd(): |
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token)) |
- _MergeField(tokenizer, sub_message) |
+ _MergeField(tokenizer, sub_message, allow_multiple_scalars, |
+ allow_unknown_extension) |
+ |
+ if is_map_entry: |
+ value_cpptype = field.message_type.fields_by_name['value'].cpp_type |
+ if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
+ value = getattr(message, field.name)[sub_message.key] |
+ value.MergeFrom(sub_message.value) |
+ else: |
+ getattr(message, field.name)[sub_message.key] = sub_message.value |
+ elif field: |
+ tokenizer.Consume(':') |
+ if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and |
+ tokenizer.TryConsume('[')): |
+ # Short repeated format, e.g. "foo: [1, 2, 3]" |
+ while True: |
+ _MergeScalarField(tokenizer, message, field, allow_multiple_scalars) |
+ if tokenizer.TryConsume(']'): |
+ break |
+ tokenizer.Consume(',') |
+ else: |
+ _MergeScalarField(tokenizer, message, field, allow_multiple_scalars) |
+ else: # Proto field is unknown. |
+ assert allow_unknown_extension |
+ _SkipFieldContents(tokenizer) |
+ |
+ # For historical reasons, fields may optionally be separated by commas or |
+ # semicolons. |
+ if not tokenizer.TryConsume(','): |
+ tokenizer.TryConsume(';') |
+ |
+ |
+def _SkipFieldContents(tokenizer): |
+ """Skips over contents (value or message) of a field. |
+ |
+ Args: |
+ tokenizer: A tokenizer to parse the field name and values. |
+ """ |
+ # Try to guess the type of this field. |
+ # If this field is not a message, there should be a ":" between the |
+ # field name and the field value and also the field value should not |
+ # start with "{" or "<" which indicates the beginning of a message body. |
+ # If there is no ":" or there is a "{" or "<" after ":", this field has |
+ # to be a message or the input is ill-formed. |
+ if tokenizer.TryConsume(':') and not tokenizer.LookingAt( |
+ '{') and not tokenizer.LookingAt('<'): |
+ _SkipFieldValue(tokenizer) |
+ else: |
+ _SkipFieldMessage(tokenizer) |
+ |
+ |
+def _SkipField(tokenizer): |
+ """Skips over a complete field (name and value/message). |
+ |
+ Args: |
+ tokenizer: A tokenizer to parse the field name and values. |
+ """ |
+ if tokenizer.TryConsume('['): |
+ # Consume extension name. |
+ tokenizer.ConsumeIdentifier() |
+ while tokenizer.TryConsume('.'): |
+ tokenizer.ConsumeIdentifier() |
+ tokenizer.Consume(']') |
else: |
- _MergeScalarField(tokenizer, message, field) |
+ tokenizer.ConsumeIdentifier() |
+ _SkipFieldContents(tokenizer) |
-def _MergeScalarField(tokenizer, message, field): |
+ # For historical reasons, fields may optionally be separated by commas or |
+ # semicolons. |
+ if not tokenizer.TryConsume(','): |
+ tokenizer.TryConsume(';') |
+ |
+ |
+def _SkipFieldMessage(tokenizer): |
+ """Skips over a field message. |
+ |
+ Args: |
+ tokenizer: A tokenizer to parse the field name and values. |
+ """ |
+ |
+ if tokenizer.TryConsume('<'): |
+ delimiter = '>' |
+ else: |
+ tokenizer.Consume('{') |
+ delimiter = '}' |
+ |
+ while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'): |
+ _SkipField(tokenizer) |
+ |
+ tokenizer.Consume(delimiter) |
+ |
+ |
+def _SkipFieldValue(tokenizer): |
+ """Skips over a field value. |
+ |
+ Args: |
+ tokenizer: A tokenizer to parse the field name and values. |
+ |
+ Raises: |
+ ParseError: In case an invalid field value is found. |
+ """ |
+ # String tokens can come in multiple adjacent string literals. |
+ # If we can consume one, consume as many as we can. |
+ if tokenizer.TryConsumeString(): |
+ while tokenizer.TryConsumeString(): |
+ pass |
+ return |
+ |
+ if (not tokenizer.TryConsumeIdentifier() and |
+ not tokenizer.TryConsumeInt64() and |
+ not tokenizer.TryConsumeUint64() and |
+ not tokenizer.TryConsumeFloat()): |
+ raise ParseError('Invalid field value: ' + tokenizer.token) |
+ |
+ |
+def _MergeScalarField(tokenizer, message, field, allow_multiple_scalars): |
"""Merges a single protocol message scalar field into a message. |
Args: |
tokenizer: A tokenizer to parse the field value. |
message: A protocol message to record the data. |
field: The descriptor of the field to be merged. |
+ allow_multiple_scalars: Determines if repeated values for a non-repeated |
+ field are permitted, e.g., the string "foo: 1 foo: 2" for a |
+ required/optional field named "foo". |
Raises: |
- ParseError: In case of ASCII parsing problems. |
+ ParseError: In case of text parsing problems. |
RuntimeError: On runtime errors. |
""" |
- tokenizer.Consume(':') |
value = None |
if field.type in (descriptor.FieldDescriptor.TYPE_INT32, |
@@ -288,13 +637,23 @@ def _MergeScalarField(tokenizer, message, field): |
getattr(message, field.name).append(value) |
else: |
if field.is_extension: |
- message.Extensions[field] = value |
+ if not allow_multiple_scalars and message.HasExtension(field): |
+ raise tokenizer.ParseErrorPreviousToken( |
+ 'Message type "%s" should not have multiple "%s" extensions.' % |
+ (message.DESCRIPTOR.full_name, field.full_name)) |
+ else: |
+ message.Extensions[field] = value |
else: |
- setattr(message, field.name, value) |
+ if not allow_multiple_scalars and message.HasField(field.name): |
+ raise tokenizer.ParseErrorPreviousToken( |
+ 'Message type "%s" should not have multiple "%s" fields.' % |
+ (message.DESCRIPTOR.full_name, field.name)) |
+ else: |
+ setattr(message, field.name, value) |
class _Tokenizer(object): |
- """Protocol buffer ASCII representation tokenizer. |
+ """Protocol buffer text representation tokenizer. |
This class handles the lower level string parsing by splitting it into |
meaningful tokens. |
@@ -303,44 +662,51 @@ class _Tokenizer(object): |
""" |
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE) |
- _TOKEN = re.compile( |
- '[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier |
- '[0-9+-][0-9a-zA-Z_.+-]*|' # a number |
- '\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string |
- '\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string |
- _IDENTIFIER = re.compile('\w+') |
+ _TOKEN = re.compile('|'.join([ |
+ r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier |
+ r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number |
+ ] + [ # quoted str for each quote mark |
+ r'{qt}([^{qt}\n\\]|\\.)*({qt}|\\?$)'.format(qt=mark) for mark in _QUOTES |
+ ])) |
- def __init__(self, text_message): |
- self._text_message = text_message |
+ _IDENTIFIER = re.compile(r'\w+') |
+ def __init__(self, lines): |
self._position = 0 |
self._line = -1 |
self._column = 0 |
self._token_start = None |
self.token = '' |
- self._lines = deque(text_message.split('\n')) |
+ self._lines = iter(lines) |
self._current_line = '' |
self._previous_line = 0 |
self._previous_column = 0 |
+ self._more_lines = True |
self._SkipWhitespace() |
self.NextToken() |
+ def LookingAt(self, token): |
+ return self.token == token |
+ |
def AtEnd(self): |
"""Checks the end of the text was reached. |
Returns: |
True iff the end was reached. |
""" |
- return self.token == '' |
+ return not self.token |
def _PopLine(self): |
while len(self._current_line) <= self._column: |
- if not self._lines: |
+ try: |
+ self._current_line = next(self._lines) |
+ except StopIteration: |
self._current_line = '' |
+ self._more_lines = False |
return |
- self._line += 1 |
- self._column = 0 |
- self._current_line = self._lines.popleft() |
+ else: |
+ self._line += 1 |
+ self._column = 0 |
def _SkipWhitespace(self): |
while True: |
@@ -377,6 +743,13 @@ class _Tokenizer(object): |
if not self.TryConsume(token): |
raise self._ParseError('Expected "%s".' % token) |
+ def TryConsumeIdentifier(self): |
+ try: |
+ self.ConsumeIdentifier() |
+ return True |
+ except ParseError: |
+ return False |
+ |
def ConsumeIdentifier(self): |
"""Consumes protocol message field identifier. |
@@ -403,7 +776,7 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseInteger(self.token, is_signed=True, is_long=False) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -419,11 +792,18 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseInteger(self.token, is_signed=False, is_long=False) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
+ def TryConsumeInt64(self): |
+ try: |
+ self.ConsumeInt64() |
+ return True |
+ except ParseError: |
+ return False |
+ |
def ConsumeInt64(self): |
"""Consumes a signed 64bit integer number. |
@@ -435,11 +815,18 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseInteger(self.token, is_signed=True, is_long=True) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
+ def TryConsumeUint64(self): |
+ try: |
+ self.ConsumeUint64() |
+ return True |
+ except ParseError: |
+ return False |
+ |
def ConsumeUint64(self): |
"""Consumes an unsigned 64bit integer number. |
@@ -451,11 +838,18 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseInteger(self.token, is_signed=False, is_long=True) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
+ def TryConsumeFloat(self): |
+ try: |
+ self.ConsumeFloat() |
+ return True |
+ except ParseError: |
+ return False |
+ |
def ConsumeFloat(self): |
"""Consumes an floating point number. |
@@ -467,7 +861,7 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseFloat(self.token) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -483,11 +877,18 @@ class _Tokenizer(object): |
""" |
try: |
result = ParseBool(self.token) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
+ def TryConsumeString(self): |
+ try: |
+ self.ConsumeString() |
+ return True |
+ except ParseError: |
+ return False |
+ |
def ConsumeString(self): |
"""Consumes a string value. |
@@ -497,10 +898,10 @@ class _Tokenizer(object): |
Raises: |
ParseError: If a string value couldn't be consumed. |
""" |
- bytes = self.ConsumeByteString() |
+ the_bytes = self.ConsumeByteString() |
try: |
- return unicode(bytes, 'utf-8') |
- except UnicodeDecodeError, e: |
+ return six.text_type(the_bytes, 'utf-8') |
+ except UnicodeDecodeError as e: |
raise self._StringParseError(e) |
def ConsumeByteString(self): |
@@ -512,10 +913,10 @@ class _Tokenizer(object): |
Raises: |
ParseError: If a byte array value couldn't be consumed. |
""" |
- list = [self._ConsumeSingleByteString()] |
- while len(self.token) > 0 and self.token[0] in ('\'', '"'): |
- list.append(self._ConsumeSingleByteString()) |
- return "".join(list) |
+ the_list = [self._ConsumeSingleByteString()] |
+ while self.token and self.token[0] in _QUOTES: |
+ the_list.append(self._ConsumeSingleByteString()) |
+ return b''.join(the_list) |
def _ConsumeSingleByteString(self): |
"""Consume one token of a string literal. |
@@ -523,17 +924,22 @@ class _Tokenizer(object): |
String literals (whether bytes or text) can come in multiple adjacent |
tokens which are automatically concatenated, like in C or Python. This |
method only consumes one token. |
+ |
+ Returns: |
+ The token parsed. |
+ Raises: |
+ ParseError: When the wrong format data is found. |
""" |
text = self.token |
- if len(text) < 1 or text[0] not in ('\'', '"'): |
- raise self._ParseError('Expected string.') |
+ if len(text) < 1 or text[0] not in _QUOTES: |
+ raise self._ParseError('Expected string but found: %r' % (text,)) |
if len(text) < 2 or text[-1] != text[0]: |
- raise self._ParseError('String missing ending quote.') |
+ raise self._ParseError('String missing ending quote: %r' % (text,)) |
try: |
- result = _CUnescape(text[1:-1]) |
- except ValueError, e: |
+ result = text_encoding.CUnescape(text[1:-1]) |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -541,7 +947,7 @@ class _Tokenizer(object): |
def ConsumeEnum(self, field): |
try: |
result = ParseEnum(field, self.token) |
- except ValueError, e: |
+ except ValueError as e: |
raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -574,7 +980,7 @@ class _Tokenizer(object): |
self._column += len(self.token) |
self._SkipWhitespace() |
- if not self._lines and len(self._current_line) <= self._column: |
+ if not self._more_lines: |
self.token = '' |
return |
@@ -586,40 +992,6 @@ class _Tokenizer(object): |
self.token = self._current_line[self._column] |
-# text.encode('string_escape') does not seem to satisfy our needs as it |
-# encodes unprintable characters using two-digit hex escapes whereas our |
-# C++ unescaping function allows hex escapes to be any length. So, |
-# "\0011".encode('string_escape') ends up being "\\x011", which will be |
-# decoded in C++ as a single-character string with char code 0x11. |
-def _CEscape(text, as_utf8): |
- def escape(c): |
- o = ord(c) |
- if o == 10: return r"\n" # optional escape |
- if o == 13: return r"\r" # optional escape |
- if o == 9: return r"\t" # optional escape |
- if o == 39: return r"\'" # optional escape |
- |
- if o == 34: return r'\"' # necessary escape |
- if o == 92: return r"\\" # necessary escape |
- |
- # necessary escapes |
- if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o |
- return c |
- return "".join([escape(c) for c in text]) |
- |
- |
-_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])') |
- |
- |
-def _CUnescape(text): |
- def ReplaceHex(m): |
- return chr(int(m.group(0)[2:], 16)) |
- # This is required because the 'string_escape' encoding doesn't |
- # allow single-digit hex escapes (like '\xf'). |
- result = _CUNESCAPE_HEX.sub(ReplaceHex, text) |
- return result.decode('string_escape') |
- |
- |
def ParseInteger(text, is_signed=False, is_long=False): |
"""Parses an integer. |
@@ -636,7 +1008,13 @@ def ParseInteger(text, is_signed=False, is_long=False): |
""" |
# Do the actual parsing. Exception handling is propagated to caller. |
try: |
- result = int(text, 0) |
+ # We force 32-bit values to int and 64-bit values to long to make |
+ # alternate implementations where the distinction is more significant |
+ # (e.g. the C++ implementation) simpler. |
+ if is_long: |
+ result = long(text, 0) |
+ else: |
+ result = int(text, 0) |
except ValueError: |
raise ValueError('Couldn\'t parse integer: %s' % text) |