Index: third_party/protobuf/python/google/protobuf/text_format.py |
diff --git a/third_party/protobuf/python/google/protobuf/text_format.py b/third_party/protobuf/python/google/protobuf/text_format.py |
index 90f6ce42a483e938825ee24a05bcd74c91463b5a..6f1e3c8b725ea2f90b27ad62714de4d4df24a94c 100755 |
--- a/third_party/protobuf/python/google/protobuf/text_format.py |
+++ b/third_party/protobuf/python/google/protobuf/text_format.py |
@@ -48,15 +48,15 @@ import re |
import six |
if six.PY3: |
- long = int # pylint: disable=redefined-builtin,invalid-name |
+ long = int |
-# pylint: disable=g-import-not-at-top |
from google.protobuf.internal import type_checkers |
from google.protobuf import descriptor |
from google.protobuf import text_encoding |
-__all__ = ['MessageToString', 'PrintMessage', 'PrintField', 'PrintFieldValue', |
- 'Merge'] |
+__all__ = ['MessageToString', 'PrintMessage', 'PrintField', |
+ 'PrintFieldValue', 'Merge'] |
+ |
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), |
type_checkers.Int32ValueChecker(), |
@@ -67,7 +67,6 @@ _FLOAT_NAN = re.compile('nanf?', re.IGNORECASE) |
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, |
descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) |
_QUOTES = frozenset(("'", '"')) |
-_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' |
class Error(Exception): |
@@ -75,30 +74,10 @@ class Error(Exception): |
class ParseError(Error): |
- """Thrown in case of text parsing or tokenizing error.""" |
- |
- def __init__(self, message=None, line=None, column=None): |
- if message is not None and line is not None: |
- loc = str(line) |
- if column is not None: |
- loc += ':{0}'.format(column) |
- message = '{0} : {1}'.format(loc, message) |
- if message is not None: |
- super(ParseError, self).__init__(message) |
- else: |
- super(ParseError, self).__init__() |
- self._line = line |
- self._column = column |
- |
- def GetLine(self): |
- return self._line |
- |
- def GetColumn(self): |
- return self._column |
+ """Thrown in case of text parsing error.""" |
class TextWriter(object): |
- |
def __init__(self, as_utf8): |
if six.PY2: |
self._writer = io.BytesIO() |
@@ -118,15 +97,9 @@ class TextWriter(object): |
return self._writer.getvalue() |
-def MessageToString(message, |
- as_utf8=False, |
- as_one_line=False, |
- pointy_brackets=False, |
- use_index_order=False, |
- float_format=None, |
- use_field_number=False, |
- descriptor_pool=None, |
- indent=0): |
+def MessageToString(message, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, |
+ float_format=None, use_field_number=False): |
"""Convert protobuf message to text format. |
Floating point values can be formatted compactly with 15 digits of |
@@ -146,16 +119,14 @@ def MessageToString(message, |
float_format: If set, use this to specify floating point number formatting |
(per the "Format Specification Mini-Language"); otherwise, str() is used. |
use_field_number: If True, print field numbers instead of names. |
- descriptor_pool: A DescriptorPool used to resolve Any types. |
- indent: The indent level, in terms of spaces, for pretty print. |
Returns: |
A string of the text formatted protocol buffer message. |
""" |
out = TextWriter(as_utf8) |
- printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, |
- use_index_order, float_format, use_field_number, |
- descriptor_pool) |
+ printer = _Printer(out, 0, as_utf8, as_one_line, |
+ pointy_brackets, use_index_order, float_format, |
+ use_field_number) |
printer.PrintMessage(message) |
result = out.getvalue() |
out.close() |
@@ -170,87 +141,39 @@ def _IsMapEntry(field): |
field.message_type.GetOptions().map_entry) |
-def PrintMessage(message, |
- out, |
- indent=0, |
- as_utf8=False, |
- as_one_line=False, |
- pointy_brackets=False, |
- use_index_order=False, |
- float_format=None, |
- use_field_number=False, |
- descriptor_pool=None): |
- printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, |
- use_index_order, float_format, use_field_number, |
- descriptor_pool) |
+def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, |
+ float_format=None, use_field_number=False): |
+ printer = _Printer(out, indent, as_utf8, as_one_line, |
+ pointy_brackets, use_index_order, float_format, |
+ use_field_number) |
printer.PrintMessage(message) |
-def PrintField(field, |
- value, |
- out, |
- indent=0, |
- as_utf8=False, |
- as_one_line=False, |
- pointy_brackets=False, |
- use_index_order=False, |
- float_format=None): |
+def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, float_format=None): |
"""Print a single field name/value pair.""" |
- printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, |
- use_index_order, float_format) |
+ printer = _Printer(out, indent, as_utf8, as_one_line, |
+ pointy_brackets, use_index_order, float_format) |
printer.PrintField(field, value) |
-def PrintFieldValue(field, |
- value, |
- out, |
- indent=0, |
- as_utf8=False, |
- as_one_line=False, |
- pointy_brackets=False, |
+def PrintFieldValue(field, value, out, indent=0, as_utf8=False, |
+ as_one_line=False, pointy_brackets=False, |
use_index_order=False, |
float_format=None): |
"""Print a single field value (not including name).""" |
- printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, |
- use_index_order, float_format) |
+ printer = _Printer(out, indent, as_utf8, as_one_line, |
+ pointy_brackets, use_index_order, float_format) |
printer.PrintFieldValue(field, value) |
-def _BuildMessageFromTypeName(type_name, descriptor_pool): |
- """Returns a protobuf message instance. |
- |
- Args: |
- type_name: Fully-qualified protobuf message type name string. |
- descriptor_pool: DescriptorPool instance. |
- |
- Returns: |
- A Message instance of type matching type_name, or None if the a Descriptor |
- wasn't found matching type_name. |
- """ |
- # pylint: disable=g-import-not-at-top |
- from google.protobuf import symbol_database |
- database = symbol_database.Default() |
- try: |
- message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) |
- except KeyError: |
- return None |
- message_type = database.GetPrototype(message_descriptor) |
- return message_type() |
- |
- |
class _Printer(object): |
"""Text format printer for protocol message.""" |
- def __init__(self, |
- out, |
- indent=0, |
- as_utf8=False, |
- as_one_line=False, |
- pointy_brackets=False, |
- use_index_order=False, |
- float_format=None, |
- use_field_number=False, |
- descriptor_pool=None): |
+ def __init__(self, out, indent=0, as_utf8=False, as_one_line=False, |
+ pointy_brackets=False, use_index_order=False, float_format=None, |
+ use_field_number=False): |
"""Initialize the Printer. |
Floating point values can be formatted compactly with 15 digits of |
@@ -272,7 +195,6 @@ class _Printer(object): |
(per the "Format Specification Mini-Language"); otherwise, str() is |
used. |
use_field_number: If True, print field numbers instead of names. |
- descriptor_pool: A DescriptorPool used to resolve Any types. |
""" |
self.out = out |
self.indent = indent |
@@ -282,20 +204,6 @@ class _Printer(object): |
self.use_index_order = use_index_order |
self.float_format = float_format |
self.use_field_number = use_field_number |
- self.descriptor_pool = descriptor_pool |
- |
- def _TryPrintAsAnyMessage(self, message): |
- """Serializes if message is a google.protobuf.Any field.""" |
- packed_message = _BuildMessageFromTypeName(message.TypeName(), |
- self.descriptor_pool) |
- if packed_message: |
- packed_message.MergeFromString(message.value) |
- self.out.write('%s[%s]' % (self.indent * ' ', message.type_url)) |
- self._PrintMessageFieldValue(packed_message) |
- self.out.write(' ' if self.as_one_line else '\n') |
- return True |
- else: |
- return False |
def PrintMessage(self, message): |
"""Convert protobuf message to text format. |
@@ -303,9 +211,6 @@ class _Printer(object): |
Args: |
message: The protocol buffers message. |
""" |
- if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and |
- self.descriptor_pool and self._TryPrintAsAnyMessage(message)): |
- return |
fields = message.ListFields() |
if self.use_index_order: |
fields.sort(key=lambda x: x[0].index) |
@@ -317,7 +222,8 @@ class _Printer(object): |
# of this file to work around. |
# |
# TODO(haberman): refactor and optimize if this becomes an issue. |
- entry_submsg = value.GetEntryClass()(key=key, value=value[key]) |
+ entry_submsg = field.message_type._concrete_class( |
+ key=key, value=value[key]) |
self.PrintField(field, entry_submsg) |
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
for element in value: |
@@ -358,25 +264,6 @@ class _Printer(object): |
else: |
out.write('\n') |
- def _PrintMessageFieldValue(self, value): |
- if self.pointy_brackets: |
- openb = '<' |
- closeb = '>' |
- else: |
- openb = '{' |
- closeb = '}' |
- |
- if self.as_one_line: |
- self.out.write(' %s ' % openb) |
- self.PrintMessage(value) |
- self.out.write(closeb) |
- else: |
- self.out.write(' %s\n' % openb) |
- self.indent += 2 |
- self.PrintMessage(value) |
- self.indent -= 2 |
- self.out.write(' ' * self.indent + closeb) |
- |
def PrintFieldValue(self, field, value): |
"""Print a single field value (not including name). |
@@ -387,8 +274,24 @@ class _Printer(object): |
value: The value of the field. |
""" |
out = self.out |
+ if self.pointy_brackets: |
+ openb = '<' |
+ closeb = '>' |
+ else: |
+ openb = '{' |
+ closeb = '}' |
+ |
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
- self._PrintMessageFieldValue(value) |
+ if self.as_one_line: |
+ out.write(' %s ' % openb) |
+ self.PrintMessage(value) |
+ out.write(closeb) |
+ else: |
+ out.write(' %s\n' % openb) |
+ self.indent += 2 |
+ self.PrintMessage(value) |
+ self.indent -= 2 |
+ out.write(' ' * self.indent + closeb) |
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: |
enum_value = field.enum_type.values_by_number.get(value, None) |
if enum_value is not None: |
@@ -419,11 +322,9 @@ class _Printer(object): |
out.write(str(value)) |
-def Parse(text, |
- message, |
- allow_unknown_extension=False, |
- allow_field_number=False): |
- """Parses a text representation of a protocol message into a message. |
+def Parse(text, message, |
+ allow_unknown_extension=False, allow_field_number=False): |
+ """Parses an text representation of a protocol message into a message. |
Args: |
text: Message text representation. |
@@ -440,16 +341,13 @@ def Parse(text, |
""" |
if not isinstance(text, str): |
text = text.decode('utf-8') |
- return ParseLines( |
- text.split('\n'), message, allow_unknown_extension, allow_field_number) |
+ return ParseLines(text.split('\n'), message, allow_unknown_extension, |
+ allow_field_number) |
-def Merge(text, |
- message, |
- allow_unknown_extension=False, |
- allow_field_number=False, |
- descriptor_pool=None): |
- """Parses a text representation of a protocol message into a message. |
+def Merge(text, message, allow_unknown_extension=False, |
+ allow_field_number=False): |
+ """Parses an text representation of a protocol message into a message. |
Like Parse(), but allows repeated values for a non-repeated field, and uses |
the last one. |
@@ -460,7 +358,6 @@ def Merge(text, |
allow_unknown_extension: if True, skip over missing extensions and keep |
parsing |
allow_field_number: if True, both field number and field name are allowed. |
- descriptor_pool: A DescriptorPool used to resolve Any types. |
Returns: |
The same message passed as argument. |
@@ -468,19 +365,13 @@ def Merge(text, |
Raises: |
ParseError: On text parsing problems. |
""" |
- return MergeLines( |
- text.split('\n'), |
- message, |
- allow_unknown_extension, |
- allow_field_number, |
- descriptor_pool=descriptor_pool) |
+ return MergeLines(text.split('\n'), message, allow_unknown_extension, |
+ allow_field_number) |
-def ParseLines(lines, |
- message, |
- allow_unknown_extension=False, |
+def ParseLines(lines, message, allow_unknown_extension=False, |
allow_field_number=False): |
- """Parses a text representation of a protocol message into a message. |
+ """Parses an text representation of a protocol message into a message. |
Args: |
lines: An iterable of lines of a message's text representation. |
@@ -488,7 +379,6 @@ def ParseLines(lines, |
allow_unknown_extension: if True, skip over missing extensions and keep |
parsing |
allow_field_number: if True, both field number and field name are allowed. |
- descriptor_pool: A DescriptorPool used to resolve Any types. |
Returns: |
The same message passed as argument. |
@@ -500,12 +390,9 @@ def ParseLines(lines, |
return parser.ParseLines(lines, message) |
-def MergeLines(lines, |
- message, |
- allow_unknown_extension=False, |
- allow_field_number=False, |
- descriptor_pool=None): |
- """Parses a text representation of a protocol message into a message. |
+def MergeLines(lines, message, allow_unknown_extension=False, |
+ allow_field_number=False): |
+ """Parses an text representation of a protocol message into a message. |
Args: |
lines: An iterable of lines of a message's text representation. |
@@ -520,47 +407,41 @@ def MergeLines(lines, |
Raises: |
ParseError: On text parsing problems. |
""" |
- parser = _Parser(allow_unknown_extension, |
- allow_field_number, |
- descriptor_pool=descriptor_pool) |
+ parser = _Parser(allow_unknown_extension, allow_field_number) |
return parser.MergeLines(lines, message) |
class _Parser(object): |
"""Text format parser for protocol message.""" |
- def __init__(self, |
- allow_unknown_extension=False, |
- allow_field_number=False, |
- descriptor_pool=None): |
+ def __init__(self, allow_unknown_extension=False, allow_field_number=False): |
self.allow_unknown_extension = allow_unknown_extension |
self.allow_field_number = allow_field_number |
- self.descriptor_pool = descriptor_pool |
def ParseFromString(self, text, message): |
- """Parses a text representation of a protocol message into a message.""" |
+ """Parses an text representation of a protocol message into a message.""" |
if not isinstance(text, str): |
text = text.decode('utf-8') |
return self.ParseLines(text.split('\n'), message) |
def ParseLines(self, lines, message): |
- """Parses a text representation of a protocol message into a message.""" |
+ """Parses an text representation of a protocol message into a message.""" |
self._allow_multiple_scalars = False |
self._ParseOrMerge(lines, message) |
return message |
def MergeFromString(self, text, message): |
- """Merges a text representation of a protocol message into a message.""" |
+ """Merges an text representation of a protocol message into a message.""" |
return self._MergeLines(text.split('\n'), message) |
def MergeLines(self, lines, message): |
- """Merges a text representation of a protocol message into a message.""" |
+ """Merges an text representation of a protocol message into a message.""" |
self._allow_multiple_scalars = True |
self._ParseOrMerge(lines, message) |
return message |
def _ParseOrMerge(self, lines, message): |
- """Converts a text representation of a protocol message into a message. |
+ """Converts an text representation of a protocol message into a message. |
Args: |
lines: Lines of a message's text representation. |
@@ -569,7 +450,7 @@ class _Parser(object): |
Raises: |
ParseError: On text parsing problems. |
""" |
- tokenizer = Tokenizer(lines) |
+ tokenizer = _Tokenizer(lines) |
while not tokenizer.AtEnd(): |
self._MergeField(tokenizer, message) |
@@ -610,13 +491,13 @@ class _Parser(object): |
'Extension "%s" not registered.' % name) |
elif message_descriptor != field.containing_type: |
raise tokenizer.ParseErrorPreviousToken( |
- 'Extension "%s" does not extend message type "%s".' % |
- (name, message_descriptor.full_name)) |
+ 'Extension "%s" does not extend message type "%s".' % ( |
+ name, message_descriptor.full_name)) |
tokenizer.Consume(']') |
else: |
- name = tokenizer.ConsumeIdentifierOrNumber() |
+ name = tokenizer.ConsumeIdentifier() |
if self.allow_field_number and name.isdigit(): |
number = ParseInteger(name, True, True) |
field = message_descriptor.fields_by_number.get(number, None) |
@@ -639,8 +520,8 @@ class _Parser(object): |
if not field: |
raise tokenizer.ParseErrorPreviousToken( |
- 'Message type "%s" has no field named "%s".' % |
- (message_descriptor.full_name, name)) |
+ 'Message type "%s" has no field named "%s".' % ( |
+ message_descriptor.full_name, name)) |
if field: |
if not self._allow_multiple_scalars and field.containing_oneof: |
@@ -651,9 +532,9 @@ class _Parser(object): |
if which_oneof is not None and which_oneof != field.name: |
raise tokenizer.ParseErrorPreviousToken( |
'Field "%s" is specified along with field "%s", another member ' |
- 'of oneof "%s" for message type "%s".' % |
- (field.name, which_oneof, field.containing_oneof.name, |
- message_descriptor.full_name)) |
+ 'of oneof "%s" for message type "%s".' % ( |
+ field.name, which_oneof, field.containing_oneof.name, |
+ message_descriptor.full_name)) |
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: |
tokenizer.TryConsume(':') |
@@ -662,13 +543,12 @@ class _Parser(object): |
tokenizer.Consume(':') |
merger = self._MergeScalarField |
- if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and |
- tokenizer.TryConsume('[')): |
+ if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED |
+ and tokenizer.TryConsume('[')): |
# Short repeated format, e.g. "foo: [1, 2, 3]" |
while True: |
merger(tokenizer, message, field) |
- if tokenizer.TryConsume(']'): |
- break |
+ if tokenizer.TryConsume(']'): break |
tokenizer.Consume(',') |
else: |
@@ -683,21 +563,6 @@ class _Parser(object): |
if not tokenizer.TryConsume(','): |
tokenizer.TryConsume(';') |
- def _ConsumeAnyTypeUrl(self, tokenizer): |
- """Consumes a google.protobuf.Any type URL and returns the type name.""" |
- # Consume "type.googleapis.com/". |
- tokenizer.ConsumeIdentifier() |
- tokenizer.Consume('.') |
- tokenizer.ConsumeIdentifier() |
- tokenizer.Consume('.') |
- tokenizer.ConsumeIdentifier() |
- tokenizer.Consume('/') |
- # Consume the fully-qualified type name. |
- name = [tokenizer.ConsumeIdentifier()] |
- while tokenizer.TryConsume('.'): |
- name.append(tokenizer.ConsumeIdentifier()) |
- return '.'.join(name) |
- |
def _MergeMessageField(self, tokenizer, message, field): |
"""Merges a single scalar field into a message. |
@@ -717,38 +582,12 @@ class _Parser(object): |
tokenizer.Consume('{') |
end_token = '}' |
- if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and |
- tokenizer.TryConsume('[')): |
- packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) |
- tokenizer.Consume(']') |
- tokenizer.TryConsume(':') |
- if tokenizer.TryConsume('<'): |
- expanded_any_end_token = '>' |
- else: |
- tokenizer.Consume('{') |
- expanded_any_end_token = '}' |
- if not self.descriptor_pool: |
- raise ParseError('Descriptor pool required to parse expanded Any field') |
- expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, |
- self.descriptor_pool) |
- if not expanded_any_sub_message: |
- raise ParseError('Type %s not found in descriptor pool' % |
- packed_type_name) |
- while not tokenizer.TryConsume(expanded_any_end_token): |
- if tokenizer.AtEnd(): |
- raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % |
- (expanded_any_end_token,)) |
- self._MergeField(tokenizer, expanded_any_sub_message) |
- if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
- any_message = getattr(message, field.name).add() |
- else: |
- any_message = getattr(message, field.name) |
- any_message.Pack(expanded_any_sub_message) |
- elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
+ if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: |
if field.is_extension: |
sub_message = message.Extensions[field].add() |
elif is_map_entry: |
- sub_message = getattr(message, field.name).GetEntryClass()() |
+ # pylint: disable=protected-access |
+ sub_message = field.message_type._concrete_class() |
else: |
sub_message = getattr(message, field.name).add() |
else: |
@@ -789,17 +628,17 @@ class _Parser(object): |
if field.type in (descriptor.FieldDescriptor.TYPE_INT32, |
descriptor.FieldDescriptor.TYPE_SINT32, |
descriptor.FieldDescriptor.TYPE_SFIXED32): |
- value = _ConsumeInt32(tokenizer) |
+ value = tokenizer.ConsumeInt32() |
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, |
descriptor.FieldDescriptor.TYPE_SINT64, |
descriptor.FieldDescriptor.TYPE_SFIXED64): |
- value = _ConsumeInt64(tokenizer) |
+ value = tokenizer.ConsumeInt64() |
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, |
descriptor.FieldDescriptor.TYPE_FIXED32): |
- value = _ConsumeUint32(tokenizer) |
+ value = tokenizer.ConsumeUint32() |
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, |
descriptor.FieldDescriptor.TYPE_FIXED64): |
- value = _ConsumeUint64(tokenizer) |
+ value = tokenizer.ConsumeUint64() |
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, |
descriptor.FieldDescriptor.TYPE_DOUBLE): |
value = tokenizer.ConsumeFloat() |
@@ -914,12 +753,13 @@ def _SkipFieldValue(tokenizer): |
return |
if (not tokenizer.TryConsumeIdentifier() and |
- not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and |
+ not tokenizer.TryConsumeInt64() and |
+ not tokenizer.TryConsumeUint64() and |
not tokenizer.TryConsumeFloat()): |
raise ParseError('Invalid field value: ' + tokenizer.token) |
-class Tokenizer(object): |
+class _Tokenizer(object): |
"""Protocol buffer text representation tokenizer. |
This class handles the lower level string parsing by splitting it into |
@@ -928,20 +768,17 @@ class Tokenizer(object): |
It was directly ported from the Java protocol buffer API. |
""" |
- _WHITESPACE = re.compile(r'\s+') |
- _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) |
- _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) |
+ _WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE) |
_TOKEN = re.compile('|'.join([ |
- r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier |
+ r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier |
r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number |
- ] + [ # quoted str for each quote mark |
+ ] + [ # quoted str for each quote mark |
r'{qt}([^{qt}\n\\]|\\.)*({qt}|\\?$)'.format(qt=mark) for mark in _QUOTES |
])) |
- _IDENTIFIER = re.compile(r'[^\d\W]\w*') |
- _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') |
+ _IDENTIFIER = re.compile(r'\w+') |
- def __init__(self, lines, skip_comments=True): |
+ def __init__(self, lines): |
self._position = 0 |
self._line = -1 |
self._column = 0 |
@@ -952,9 +789,6 @@ class Tokenizer(object): |
self._previous_line = 0 |
self._previous_column = 0 |
self._more_lines = True |
- self._skip_comments = skip_comments |
- self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT |
- or self._WHITESPACE) |
self._SkipWhitespace() |
self.NextToken() |
@@ -984,7 +818,7 @@ class Tokenizer(object): |
def _SkipWhitespace(self): |
while True: |
self._PopLine() |
- match = self._whitespace_pattern.match(self._current_line, self._column) |
+ match = self._WHITESPACE.match(self._current_line, self._column) |
if not match: |
break |
length = len(match.group(0)) |
@@ -1014,14 +848,7 @@ class Tokenizer(object): |
ParseError: If the text couldn't be consumed. |
""" |
if not self.TryConsume(token): |
- raise self.ParseError('Expected "%s".' % token) |
- |
- def ConsumeComment(self): |
- result = self.token |
- if not self._COMMENT.match(result): |
- raise self.ParseError('Expected comment.') |
- self.NextToken() |
- return result |
+ raise self._ParseError('Expected "%s".' % token) |
def TryConsumeIdentifier(self): |
try: |
@@ -1041,55 +868,85 @@ class Tokenizer(object): |
""" |
result = self.token |
if not self._IDENTIFIER.match(result): |
- raise self.ParseError('Expected identifier.') |
+ raise self._ParseError('Expected identifier.') |
self.NextToken() |
return result |
- def TryConsumeIdentifierOrNumber(self): |
+ def ConsumeInt32(self): |
+ """Consumes a signed 32bit integer number. |
+ |
+ Returns: |
+ The integer parsed. |
+ |
+ Raises: |
+ ParseError: If a signed 32bit integer couldn't be consumed. |
+ """ |
try: |
- self.ConsumeIdentifierOrNumber() |
+ result = ParseInteger(self.token, is_signed=True, is_long=False) |
+ except ValueError as e: |
+ raise self._ParseError(str(e)) |
+ self.NextToken() |
+ return result |
+ |
+ def ConsumeUint32(self): |
+ """Consumes an unsigned 32bit integer number. |
+ |
+ Returns: |
+ The integer parsed. |
+ |
+ Raises: |
+ ParseError: If an unsigned 32bit integer couldn't be consumed. |
+ """ |
+ try: |
+ result = ParseInteger(self.token, is_signed=False, is_long=False) |
+ except ValueError as e: |
+ raise self._ParseError(str(e)) |
+ self.NextToken() |
+ return result |
+ |
+ def TryConsumeInt64(self): |
+ try: |
+ self.ConsumeInt64() |
return True |
except ParseError: |
return False |
- def ConsumeIdentifierOrNumber(self): |
- """Consumes protocol message field identifier. |
+ def ConsumeInt64(self): |
+ """Consumes a signed 64bit integer number. |
Returns: |
- Identifier string. |
+ The integer parsed. |
Raises: |
- ParseError: If an identifier couldn't be consumed. |
+ ParseError: If a signed 64bit integer couldn't be consumed. |
""" |
- result = self.token |
- if not self._IDENTIFIER_OR_NUMBER.match(result): |
- raise self.ParseError('Expected identifier or number.') |
+ try: |
+ result = ParseInteger(self.token, is_signed=True, is_long=True) |
+ except ValueError as e: |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
- def TryConsumeInteger(self): |
+ def TryConsumeUint64(self): |
try: |
- # Note: is_long only affects value type, not whether an error is raised. |
- self.ConsumeInteger() |
+ self.ConsumeUint64() |
return True |
except ParseError: |
return False |
- def ConsumeInteger(self, is_long=False): |
- """Consumes an integer number. |
+ def ConsumeUint64(self): |
+ """Consumes an unsigned 64bit integer number. |
- Args: |
- is_long: True if the value should be returned as a long integer. |
Returns: |
The integer parsed. |
Raises: |
- ParseError: If an integer couldn't be consumed. |
+ ParseError: If an unsigned 64bit integer couldn't be consumed. |
""" |
try: |
- result = _ParseAbstractInteger(self.token, is_long=is_long) |
+ result = ParseInteger(self.token, is_signed=False, is_long=True) |
except ValueError as e: |
- raise self.ParseError(str(e)) |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -1112,7 +969,7 @@ class Tokenizer(object): |
try: |
result = ParseFloat(self.token) |
except ValueError as e: |
- raise self.ParseError(str(e)) |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -1128,7 +985,7 @@ class Tokenizer(object): |
try: |
result = ParseBool(self.token) |
except ValueError as e: |
- raise self.ParseError(str(e)) |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -1182,15 +1039,15 @@ class Tokenizer(object): |
""" |
text = self.token |
if len(text) < 1 or text[0] not in _QUOTES: |
- raise self.ParseError('Expected string but found: %r' % (text,)) |
+ raise self._ParseError('Expected string but found: %r' % (text,)) |
if len(text) < 2 or text[-1] != text[0]: |
- raise self.ParseError('String missing ending quote: %r' % (text,)) |
+ raise self._ParseError('String missing ending quote: %r' % (text,)) |
try: |
result = text_encoding.CUnescape(text[1:-1]) |
except ValueError as e: |
- raise self.ParseError(str(e)) |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -1198,7 +1055,7 @@ class Tokenizer(object): |
try: |
result = ParseEnum(field, self.token) |
except ValueError as e: |
- raise self.ParseError(str(e)) |
+ raise self._ParseError(str(e)) |
self.NextToken() |
return result |
@@ -1211,15 +1068,16 @@ class Tokenizer(object): |
Returns: |
A ParseError instance. |
""" |
- return ParseError(message, self._previous_line + 1, |
- self._previous_column + 1) |
+ return ParseError('%d:%d : %s' % ( |
+ self._previous_line + 1, self._previous_column + 1, message)) |
- def ParseError(self, message): |
+ def _ParseError(self, message): |
"""Creates and *returns* a ParseError for the current token.""" |
- return ParseError(message, self._line + 1, self._column + 1) |
+ return ParseError('%d:%d : %s' % ( |
+ self._line + 1, self._column + 1, message)) |
def _StringParseError(self, e): |
- return self.ParseError('Couldn\'t parse string: ' + str(e)) |
+ return self._ParseError('Couldn\'t parse string: ' + str(e)) |
def NextToken(self): |
"""Reads the next meaningful token.""" |
@@ -1234,124 +1092,12 @@ class Tokenizer(object): |
return |
match = self._TOKEN.match(self._current_line, self._column) |
- if not match and not self._skip_comments: |
- match = self._COMMENT.match(self._current_line, self._column) |
if match: |
token = match.group(0) |
self.token = token |
else: |
self.token = self._current_line[self._column] |
-# Aliased so it can still be accessed by current visibility violators. |
-# TODO(dbarnett): Migrate violators to textformat_tokenizer. |
-_Tokenizer = Tokenizer # pylint: disable=invalid-name |
- |
- |
-def _ConsumeInt32(tokenizer): |
- """Consumes a signed 32bit integer number from tokenizer. |
- |
- Args: |
- tokenizer: A tokenizer used to parse the number. |
- |
- Returns: |
- The integer parsed. |
- |
- Raises: |
- ParseError: If a signed 32bit integer couldn't be consumed. |
- """ |
- return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) |
- |
- |
-def _ConsumeUint32(tokenizer): |
- """Consumes an unsigned 32bit integer number from tokenizer. |
- |
- Args: |
- tokenizer: A tokenizer used to parse the number. |
- |
- Returns: |
- The integer parsed. |
- |
- Raises: |
- ParseError: If an unsigned 32bit integer couldn't be consumed. |
- """ |
- return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) |
- |
- |
-def _TryConsumeInt64(tokenizer): |
- try: |
- _ConsumeInt64(tokenizer) |
- return True |
- except ParseError: |
- return False |
- |
- |
-def _ConsumeInt64(tokenizer): |
- """Consumes a signed 32bit integer number from tokenizer. |
- |
- Args: |
- tokenizer: A tokenizer used to parse the number. |
- |
- Returns: |
- The integer parsed. |
- |
- Raises: |
- ParseError: If a signed 32bit integer couldn't be consumed. |
- """ |
- return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) |
- |
- |
-def _TryConsumeUint64(tokenizer): |
- try: |
- _ConsumeUint64(tokenizer) |
- return True |
- except ParseError: |
- return False |
- |
- |
-def _ConsumeUint64(tokenizer): |
- """Consumes an unsigned 64bit integer number from tokenizer. |
- |
- Args: |
- tokenizer: A tokenizer used to parse the number. |
- |
- Returns: |
- The integer parsed. |
- |
- Raises: |
- ParseError: If an unsigned 64bit integer couldn't be consumed. |
- """ |
- return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) |
- |
- |
-def _TryConsumeInteger(tokenizer, is_signed=False, is_long=False): |
- try: |
- _ConsumeInteger(tokenizer, is_signed=is_signed, is_long=is_long) |
- return True |
- except ParseError: |
- return False |
- |
- |
-def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): |
- """Consumes an integer number from tokenizer. |
- |
- Args: |
- tokenizer: A tokenizer used to parse the number. |
- is_signed: True if a signed integer must be parsed. |
- is_long: True if a long integer must be parsed. |
- |
- Returns: |
- The integer parsed. |
- |
- Raises: |
- ParseError: If an integer with given characteristics couldn't be consumed. |
- """ |
- try: |
- result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) |
- except ValueError as e: |
- raise tokenizer.ParseError(str(e)) |
- tokenizer.NextToken() |
- return result |
- |
def ParseInteger(text, is_signed=False, is_long=False): |
"""Parses an integer. |
@@ -1368,39 +1114,22 @@ def ParseInteger(text, is_signed=False, is_long=False): |
ValueError: Thrown Iff the text is not a valid integer. |
""" |
# Do the actual parsing. Exception handling is propagated to caller. |
- result = _ParseAbstractInteger(text, is_long=is_long) |
- |
- # Check if the integer is sane. Exceptions handled by callers. |
- checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] |
- checker.CheckValue(result) |
- return result |
- |
- |
-def _ParseAbstractInteger(text, is_long=False): |
- """Parses an integer without checking size/signedness. |
- |
- Args: |
- text: The text to parse. |
- is_long: True if the value should be returned as a long integer. |
- |
- Returns: |
- The integer value. |
- |
- Raises: |
- ValueError: Thrown Iff the text is not a valid integer. |
- """ |
- # Do the actual parsing. Exception handling is propagated to caller. |
try: |
# We force 32-bit values to int and 64-bit values to long to make |
# alternate implementations where the distinction is more significant |
# (e.g. the C++ implementation) simpler. |
if is_long: |
- return long(text, 0) |
+ result = long(text, 0) |
else: |
- return int(text, 0) |
+ result = int(text, 0) |
except ValueError: |
raise ValueError('Couldn\'t parse integer: %s' % text) |
+ # Check if the integer is sane. Exceptions handled by callers. |
+ checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] |
+ checker.CheckValue(result) |
+ return result |
+ |
def ParseFloat(text): |
"""Parse a floating point number. |
@@ -1446,9 +1175,9 @@ def ParseBool(text): |
Raises: |
ValueError: If text is not a valid boolean. |
""" |
- if text in ('true', 't', '1', 'True'): |
+ if text in ('true', 't', '1'): |
return True |
- elif text in ('false', 'f', '0', 'False'): |
+ elif text in ('false', 'f', '0'): |
return False |
else: |
raise ValueError('Expected "true" or "false".') |
@@ -1477,12 +1206,14 @@ def ParseEnum(field, value): |
# Identifier. |
enum_value = enum_descriptor.values_by_name.get(value, None) |
if enum_value is None: |
- raise ValueError('Enum type "%s" has no value named %s.' % |
- (enum_descriptor.full_name, value)) |
+ raise ValueError( |
+ 'Enum type "%s" has no value named %s.' % ( |
+ enum_descriptor.full_name, value)) |
else: |
# Numeric value. |
enum_value = enum_descriptor.values_by_number.get(number, None) |
if enum_value is None: |
- raise ValueError('Enum type "%s" has no value with number %d.' % |
- (enum_descriptor.full_name, number)) |
+ raise ValueError( |
+ 'Enum type "%s" has no value with number %d.' % ( |
+ enum_descriptor.full_name, number)) |
return enum_value.number |