From d64a2d9941c36a7bc2a7959ea10ab8363192ac14 Mon Sep 17 00:00:00 2001 From: Adam Cozzette Date: Wed, 29 Jun 2016 15:23:27 -0700 Subject: Integrated internal changes from Google This includes all internal changes from around May 20 to now. --- python/google/protobuf/text_format.py | 613 ++++++++++++++++++++++++---------- 1 file changed, 442 insertions(+), 171 deletions(-) (limited to 'python/google/protobuf/text_format.py') diff --git a/python/google/protobuf/text_format.py b/python/google/protobuf/text_format.py index 6f1e3c8b..c4b23c37 100755 --- a/python/google/protobuf/text_format.py +++ b/python/google/protobuf/text_format.py @@ -48,15 +48,15 @@ import re import six if six.PY3: - long = int + long = int # pylint: disable=redefined-builtin,invalid-name +# pylint: disable=g-import-not-at-top from google.protobuf.internal import type_checkers from google.protobuf import descriptor from google.protobuf import text_encoding -__all__ = ['MessageToString', 'PrintMessage', 'PrintField', - 'PrintFieldValue', 'Merge'] - +__all__ = ['MessageToString', 'PrintMessage', 'PrintField', 'PrintFieldValue', + 'Merge'] _INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), type_checkers.Int32ValueChecker(), @@ -67,6 +67,7 @@ _FLOAT_NAN = re.compile('nanf?', re.IGNORECASE) _FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT, descriptor.FieldDescriptor.CPPTYPE_DOUBLE]) _QUOTES = frozenset(("'", '"')) +_ANY_FULL_TYPE_NAME = 'google.protobuf.Any' class Error(Exception): @@ -74,10 +75,30 @@ class Error(Exception): class ParseError(Error): - """Thrown in case of text parsing error.""" + """Thrown in case of text parsing or tokenizing error.""" + + def __init__(self, message=None, line=None, column=None): + if message is not None and line is not None: + loc = str(line) + if column is not None: + loc += ':{}'.format(column) + message = '{} : {}'.format(loc, message) + if message is not None: + super(ParseError, self).__init__(message) + else: + super(ParseError, self).__init__() + self._line = line + self._column = column + + def GetLine(self): + return self._line + + def GetColumn(self): + return self._column class TextWriter(object): + def __init__(self, as_utf8): if six.PY2: self._writer = io.BytesIO() @@ -97,9 +118,15 @@ class TextWriter(object): return self._writer.getvalue() -def MessageToString(message, as_utf8=False, as_one_line=False, - pointy_brackets=False, use_index_order=False, - float_format=None, use_field_number=False): +def MessageToString(message, + as_utf8=False, + as_one_line=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + use_field_number=False, + descriptor_pool=None, + indent=0): """Convert protobuf message to text format. Floating point values can be formatted compactly with 15 digits of @@ -119,14 +146,16 @@ def MessageToString(message, as_utf8=False, as_one_line=False, float_format: If set, use this to specify floating point number formatting (per the "Format Specification Mini-Language"); otherwise, str() is used. use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. + indent: The indent level, in terms of spaces, for pretty print. Returns: A string of the text formatted protocol buffer message. """ out = TextWriter(as_utf8) - printer = _Printer(out, 0, as_utf8, as_one_line, - pointy_brackets, use_index_order, float_format, - use_field_number) + printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, + use_index_order, float_format, use_field_number, + descriptor_pool) printer.PrintMessage(message) result = out.getvalue() out.close() @@ -141,39 +170,87 @@ def _IsMapEntry(field): field.message_type.GetOptions().map_entry) -def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False, - pointy_brackets=False, use_index_order=False, - float_format=None, use_field_number=False): - printer = _Printer(out, indent, as_utf8, as_one_line, - pointy_brackets, use_index_order, float_format, - use_field_number) +def PrintMessage(message, + out, + indent=0, + as_utf8=False, + as_one_line=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + use_field_number=False, + descriptor_pool=None): + printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, + use_index_order, float_format, use_field_number, + descriptor_pool) printer.PrintMessage(message) -def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False, - pointy_brackets=False, use_index_order=False, float_format=None): +def PrintField(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + pointy_brackets=False, + use_index_order=False, + float_format=None): """Print a single field name/value pair.""" - printer = _Printer(out, indent, as_utf8, as_one_line, - pointy_brackets, use_index_order, float_format) + printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, + use_index_order, float_format) printer.PrintField(field, value) -def PrintFieldValue(field, value, out, indent=0, as_utf8=False, - as_one_line=False, pointy_brackets=False, +def PrintFieldValue(field, + value, + out, + indent=0, + as_utf8=False, + as_one_line=False, + pointy_brackets=False, use_index_order=False, float_format=None): """Print a single field value (not including name).""" - printer = _Printer(out, indent, as_utf8, as_one_line, - pointy_brackets, use_index_order, float_format) + printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets, + use_index_order, float_format) printer.PrintFieldValue(field, value) +def _BuildMessageFromTypeName(type_name, descriptor_pool): + """Returns a protobuf message instance. + + Args: + type_name: Fully-qualified protobuf message type name string. + descriptor_pool: DescriptorPool instance. + + Returns: + A Message instance of type matching type_name, or None if the a Descriptor + wasn't found matching type_name. + """ + # pylint: disable=g-import-not-at-top + from google.protobuf import message_factory + factory = message_factory.MessageFactory(descriptor_pool) + try: + message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) + except KeyError: + return None + message_type = factory.GetPrototype(message_descriptor) + return message_type() + + class _Printer(object): """Text format printer for protocol message.""" - def __init__(self, out, indent=0, as_utf8=False, as_one_line=False, - pointy_brackets=False, use_index_order=False, float_format=None, - use_field_number=False): + def __init__(self, + out, + indent=0, + as_utf8=False, + as_one_line=False, + pointy_brackets=False, + use_index_order=False, + float_format=None, + use_field_number=False, + descriptor_pool=None): """Initialize the Printer. Floating point values can be formatted compactly with 15 digits of @@ -195,6 +272,7 @@ class _Printer(object): (per the "Format Specification Mini-Language"); otherwise, str() is used. use_field_number: If True, print field numbers instead of names. + descriptor_pool: A DescriptorPool used to resolve Any types. """ self.out = out self.indent = indent @@ -204,6 +282,20 @@ class _Printer(object): self.use_index_order = use_index_order self.float_format = float_format self.use_field_number = use_field_number + self.descriptor_pool = descriptor_pool + + def _TryPrintAsAnyMessage(self, message): + """Serializes if message is a google.protobuf.Any field.""" + packed_message = _BuildMessageFromTypeName(message.TypeName(), + self.descriptor_pool) + if packed_message: + packed_message.MergeFromString(message.value) + self.out.write('%s[%s]' % (self.indent * ' ', message.type_url)) + self._PrintMessageFieldValue(packed_message) + self.out.write(' ' if self.as_one_line else '\n') + return True + else: + return False def PrintMessage(self, message): """Convert protobuf message to text format. @@ -211,6 +303,9 @@ class _Printer(object): Args: message: The protocol buffers message. """ + if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and + self.descriptor_pool and self._TryPrintAsAnyMessage(message)): + return fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) @@ -222,8 +317,8 @@ class _Printer(object): # of this file to work around. # # TODO(haberman): refactor and optimize if this becomes an issue. - entry_submsg = field.message_type._concrete_class( - key=key, value=value[key]) + entry_submsg = field.message_type._concrete_class(key=key, + value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: @@ -264,6 +359,25 @@ class _Printer(object): else: out.write('\n') + def _PrintMessageFieldValue(self, value): + if self.pointy_brackets: + openb = '<' + closeb = '>' + else: + openb = '{' + closeb = '}' + + if self.as_one_line: + self.out.write(' %s ' % openb) + self.PrintMessage(value) + self.out.write(closeb) + else: + self.out.write(' %s\n' % openb) + self.indent += 2 + self.PrintMessage(value) + self.indent -= 2 + self.out.write(' ' * self.indent + closeb) + def PrintFieldValue(self, field, value): """Print a single field value (not including name). @@ -274,24 +388,8 @@ class _Printer(object): value: The value of the field. """ out = self.out - if self.pointy_brackets: - openb = '<' - closeb = '>' - else: - openb = '{' - closeb = '}' - if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: - if self.as_one_line: - out.write(' %s ' % openb) - self.PrintMessage(value) - out.write(closeb) - else: - out.write(' %s\n' % openb) - self.indent += 2 - self.PrintMessage(value) - self.indent -= 2 - out.write(' ' * self.indent + closeb) + self._PrintMessageFieldValue(value) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: @@ -322,9 +420,11 @@ class _Printer(object): out.write(str(value)) -def Parse(text, message, - allow_unknown_extension=False, allow_field_number=False): - """Parses an text representation of a protocol message into a message. +def Parse(text, + message, + allow_unknown_extension=False, + allow_field_number=False): + """Parses a text representation of a protocol message into a message. Args: text: Message text representation. @@ -341,13 +441,16 @@ def Parse(text, message, """ if not isinstance(text, str): text = text.decode('utf-8') - return ParseLines(text.split('\n'), message, allow_unknown_extension, - allow_field_number) + return ParseLines( + text.split('\n'), message, allow_unknown_extension, allow_field_number) -def Merge(text, message, allow_unknown_extension=False, - allow_field_number=False): - """Parses an text representation of a protocol message into a message. +def Merge(text, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None): + """Parses a text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. @@ -358,6 +461,7 @@ def Merge(text, message, allow_unknown_extension=False, allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. Returns: The same message passed as argument. @@ -365,13 +469,19 @@ def Merge(text, message, allow_unknown_extension=False, Raises: ParseError: On text parsing problems. """ - return MergeLines(text.split('\n'), message, allow_unknown_extension, - allow_field_number) + return MergeLines( + text.split('\n'), + message, + allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool) -def ParseLines(lines, message, allow_unknown_extension=False, +def ParseLines(lines, + message, + allow_unknown_extension=False, allow_field_number=False): - """Parses an text representation of a protocol message into a message. + """Parses a text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. @@ -379,6 +489,7 @@ def ParseLines(lines, message, allow_unknown_extension=False, allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. + descriptor_pool: A DescriptorPool used to resolve Any types. Returns: The same message passed as argument. @@ -390,9 +501,12 @@ def ParseLines(lines, message, allow_unknown_extension=False, return parser.ParseLines(lines, message) -def MergeLines(lines, message, allow_unknown_extension=False, - allow_field_number=False): - """Parses an text representation of a protocol message into a message. +def MergeLines(lines, + message, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None): + """Parses a text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. @@ -407,41 +521,47 @@ def MergeLines(lines, message, allow_unknown_extension=False, Raises: ParseError: On text parsing problems. """ - parser = _Parser(allow_unknown_extension, allow_field_number) + parser = _Parser(allow_unknown_extension, + allow_field_number, + descriptor_pool=descriptor_pool) return parser.MergeLines(lines, message) class _Parser(object): """Text format parser for protocol message.""" - def __init__(self, allow_unknown_extension=False, allow_field_number=False): + def __init__(self, + allow_unknown_extension=False, + allow_field_number=False, + descriptor_pool=None): self.allow_unknown_extension = allow_unknown_extension self.allow_field_number = allow_field_number + self.descriptor_pool = descriptor_pool def ParseFromString(self, text, message): - """Parses an text representation of a protocol message into a message.""" + """Parses a text representation of a protocol message into a message.""" if not isinstance(text, str): text = text.decode('utf-8') return self.ParseLines(text.split('\n'), message) def ParseLines(self, lines, message): - """Parses an text representation of a protocol message into a message.""" + """Parses a text representation of a protocol message into a message.""" self._allow_multiple_scalars = False self._ParseOrMerge(lines, message) return message def MergeFromString(self, text, message): - """Merges an text representation of a protocol message into a message.""" + """Merges a text representation of a protocol message into a message.""" return self._MergeLines(text.split('\n'), message) def MergeLines(self, lines, message): - """Merges an text representation of a protocol message into a message.""" + """Merges a text representation of a protocol message into a message.""" self._allow_multiple_scalars = True self._ParseOrMerge(lines, message) return message def _ParseOrMerge(self, lines, message): - """Converts an text representation of a protocol message into a message. + """Converts a text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. @@ -450,7 +570,7 @@ class _Parser(object): Raises: ParseError: On text parsing problems. """ - tokenizer = _Tokenizer(lines) + tokenizer = Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message) @@ -491,13 +611,13 @@ class _Parser(object): 'Extension "%s" not registered.' % name) elif message_descriptor != field.containing_type: raise tokenizer.ParseErrorPreviousToken( - 'Extension "%s" does not extend message type "%s".' % ( - name, message_descriptor.full_name)) + 'Extension "%s" does not extend message type "%s".' % + (name, message_descriptor.full_name)) tokenizer.Consume(']') else: - name = tokenizer.ConsumeIdentifier() + name = tokenizer.ConsumeIdentifierOrNumber() if self.allow_field_number and name.isdigit(): number = ParseInteger(name, True, True) field = message_descriptor.fields_by_number.get(number, None) @@ -520,8 +640,8 @@ class _Parser(object): if not field: raise tokenizer.ParseErrorPreviousToken( - 'Message type "%s" has no field named "%s".' % ( - message_descriptor.full_name, name)) + 'Message type "%s" has no field named "%s".' % + (message_descriptor.full_name, name)) if field: if not self._allow_multiple_scalars and field.containing_oneof: @@ -532,9 +652,9 @@ class _Parser(object): if which_oneof is not None and which_oneof != field.name: raise tokenizer.ParseErrorPreviousToken( 'Field "%s" is specified along with field "%s", another member ' - 'of oneof "%s" for message type "%s".' % ( - field.name, which_oneof, field.containing_oneof.name, - message_descriptor.full_name)) + 'of oneof "%s" for message type "%s".' % + (field.name, which_oneof, field.containing_oneof.name, + message_descriptor.full_name)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: tokenizer.TryConsume(':') @@ -543,12 +663,13 @@ class _Parser(object): tokenizer.Consume(':') merger = self._MergeScalarField - if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED - and tokenizer.TryConsume('[')): + if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and + tokenizer.TryConsume('[')): # Short repeated format, e.g. "foo: [1, 2, 3]" while True: merger(tokenizer, message, field) - if tokenizer.TryConsume(']'): break + if tokenizer.TryConsume(']'): + break tokenizer.Consume(',') else: @@ -563,6 +684,21 @@ class _Parser(object): if not tokenizer.TryConsume(','): tokenizer.TryConsume(';') + def _ConsumeAnyTypeUrl(self, tokenizer): + """Consumes a google.protobuf.Any type URL and returns the type name.""" + # Consume "type.googleapis.com/". + tokenizer.ConsumeIdentifier() + tokenizer.Consume('.') + tokenizer.ConsumeIdentifier() + tokenizer.Consume('.') + tokenizer.ConsumeIdentifier() + tokenizer.Consume('/') + # Consume the fully-qualified type name. + name = [tokenizer.ConsumeIdentifier()] + while tokenizer.TryConsume('.'): + name.append(tokenizer.ConsumeIdentifier()) + return '.'.join(name) + def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. @@ -582,7 +718,34 @@ class _Parser(object): tokenizer.Consume('{') end_token = '}' - if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and + tokenizer.TryConsume('[')): + packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) + tokenizer.Consume(']') + tokenizer.TryConsume(':') + if tokenizer.TryConsume('<'): + expanded_any_end_token = '>' + else: + tokenizer.Consume('{') + expanded_any_end_token = '}' + if not self.descriptor_pool: + raise ParseError('Descriptor pool required to parse expanded Any field') + expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, + self.descriptor_pool) + if not expanded_any_sub_message: + raise ParseError('Type %s not found in descriptor pool' % + packed_type_name) + while not tokenizer.TryConsume(expanded_any_end_token): + if tokenizer.AtEnd(): + raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % + (expanded_any_end_token,)) + self._MergeField(tokenizer, expanded_any_sub_message) + if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: + any_message = getattr(message, field.name).add() + else: + any_message = getattr(message, field.name) + any_message.Pack(expanded_any_sub_message) + elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: @@ -628,17 +791,17 @@ class _Parser(object): if field.type in (descriptor.FieldDescriptor.TYPE_INT32, descriptor.FieldDescriptor.TYPE_SINT32, descriptor.FieldDescriptor.TYPE_SFIXED32): - value = tokenizer.ConsumeInt32() + value = _ConsumeInt32(tokenizer) elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, descriptor.FieldDescriptor.TYPE_SINT64, descriptor.FieldDescriptor.TYPE_SFIXED64): - value = tokenizer.ConsumeInt64() + value = _ConsumeInt64(tokenizer) elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, descriptor.FieldDescriptor.TYPE_FIXED32): - value = tokenizer.ConsumeUint32() + value = _ConsumeUint32(tokenizer) elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, descriptor.FieldDescriptor.TYPE_FIXED64): - value = tokenizer.ConsumeUint64() + value = _ConsumeUint64(tokenizer) elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, descriptor.FieldDescriptor.TYPE_DOUBLE): value = tokenizer.ConsumeFloat() @@ -753,13 +916,12 @@ def _SkipFieldValue(tokenizer): return if (not tokenizer.TryConsumeIdentifier() and - not tokenizer.TryConsumeInt64() and - not tokenizer.TryConsumeUint64() and + not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token) -class _Tokenizer(object): +class Tokenizer(object): """Protocol buffer text representation tokenizer. This class handles the lower level string parsing by splitting it into @@ -768,17 +930,20 @@ class _Tokenizer(object): It was directly ported from the Java protocol buffer API. """ - _WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE) + _WHITESPACE = re.compile(r'\s+') + _COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE) + _WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE) _TOKEN = re.compile('|'.join([ - r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier + r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number - ] + [ # quoted str for each quote mark + ] + [ # quoted str for each quote mark r'{qt}([^{qt}\n\\]|\\.)*({qt}|\\?$)'.format(qt=mark) for mark in _QUOTES ])) - _IDENTIFIER = re.compile(r'\w+') + _IDENTIFIER = re.compile(r'[^\d\W]\w*') + _IDENTIFIER_OR_NUMBER = re.compile(r'\w+') - def __init__(self, lines): + def __init__(self, lines, skip_comments=True): self._position = 0 self._line = -1 self._column = 0 @@ -789,6 +954,9 @@ class _Tokenizer(object): self._previous_line = 0 self._previous_column = 0 self._more_lines = True + self._skip_comments = skip_comments + self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT + or self._WHITESPACE) self._SkipWhitespace() self.NextToken() @@ -818,7 +986,7 @@ class _Tokenizer(object): def _SkipWhitespace(self): while True: self._PopLine() - match = self._WHITESPACE.match(self._current_line, self._column) + match = self._whitespace_pattern.match(self._current_line, self._column) if not match: break length = len(match.group(0)) @@ -848,7 +1016,14 @@ class _Tokenizer(object): ParseError: If the text couldn't be consumed. """ if not self.TryConsume(token): - raise self._ParseError('Expected "%s".' % token) + raise self.ParseError('Expected "%s".' % token) + + def ConsumeComment(self): + result = self.token + if not self._COMMENT.match(result): + raise self.ParseError('Expected comment.') + self.NextToken() + return result def TryConsumeIdentifier(self): try: @@ -868,85 +1043,55 @@ class _Tokenizer(object): """ result = self.token if not self._IDENTIFIER.match(result): - raise self._ParseError('Expected identifier.') + raise self.ParseError('Expected identifier.') self.NextToken() return result - def ConsumeInt32(self): - """Consumes a signed 32bit integer number. - - Returns: - The integer parsed. - - Raises: - ParseError: If a signed 32bit integer couldn't be consumed. - """ + def TryConsumeIdentifierOrNumber(self): try: - result = ParseInteger(self.token, is_signed=True, is_long=False) - except ValueError as e: - raise self._ParseError(str(e)) - self.NextToken() - return result - - def ConsumeUint32(self): - """Consumes an unsigned 32bit integer number. - - Returns: - The integer parsed. - - Raises: - ParseError: If an unsigned 32bit integer couldn't be consumed. - """ - try: - result = ParseInteger(self.token, is_signed=False, is_long=False) - except ValueError as e: - raise self._ParseError(str(e)) - self.NextToken() - return result - - def TryConsumeInt64(self): - try: - self.ConsumeInt64() + self.ConsumeIdentifierOrNumber() return True except ParseError: return False - def ConsumeInt64(self): - """Consumes a signed 64bit integer number. + def ConsumeIdentifierOrNumber(self): + """Consumes protocol message field identifier. Returns: - The integer parsed. + Identifier string. Raises: - ParseError: If a signed 64bit integer couldn't be consumed. + ParseError: If an identifier couldn't be consumed. """ - try: - result = ParseInteger(self.token, is_signed=True, is_long=True) - except ValueError as e: - raise self._ParseError(str(e)) + result = self.token + if not self._IDENTIFIER_OR_NUMBER.match(result): + raise self.ParseError('Expected identifier or number.') self.NextToken() return result - def TryConsumeUint64(self): + def TryConsumeInteger(self): try: - self.ConsumeUint64() + # Note: is_long only affects value type, not whether an error is raised. + self.ConsumeInteger() return True except ParseError: return False - def ConsumeUint64(self): - """Consumes an unsigned 64bit integer number. + def ConsumeInteger(self, is_long=False): + """Consumes an integer number. + Args: + is_long: True if the value should be returned as a long integer. Returns: The integer parsed. Raises: - ParseError: If an unsigned 64bit integer couldn't be consumed. + ParseError: If an integer couldn't be consumed. """ try: - result = ParseInteger(self.token, is_signed=False, is_long=True) + result = _ParseAbstractInteger(self.token, is_long=is_long) except ValueError as e: - raise self._ParseError(str(e)) + raise self.ParseError(str(e)) self.NextToken() return result @@ -969,7 +1114,7 @@ class _Tokenizer(object): try: result = ParseFloat(self.token) except ValueError as e: - raise self._ParseError(str(e)) + raise self.ParseError(str(e)) self.NextToken() return result @@ -985,7 +1130,7 @@ class _Tokenizer(object): try: result = ParseBool(self.token) except ValueError as e: - raise self._ParseError(str(e)) + raise self.ParseError(str(e)) self.NextToken() return result @@ -1039,15 +1184,15 @@ class _Tokenizer(object): """ text = self.token if len(text) < 1 or text[0] not in _QUOTES: - raise self._ParseError('Expected string but found: %r' % (text,)) + raise self.ParseError('Expected string but found: %r' % (text,)) if len(text) < 2 or text[-1] != text[0]: - raise self._ParseError('String missing ending quote: %r' % (text,)) + raise self.ParseError('String missing ending quote: %r' % (text,)) try: result = text_encoding.CUnescape(text[1:-1]) except ValueError as e: - raise self._ParseError(str(e)) + raise self.ParseError(str(e)) self.NextToken() return result @@ -1055,7 +1200,7 @@ class _Tokenizer(object): try: result = ParseEnum(field, self.token) except ValueError as e: - raise self._ParseError(str(e)) + raise self.ParseError(str(e)) self.NextToken() return result @@ -1068,16 +1213,15 @@ class _Tokenizer(object): Returns: A ParseError instance. """ - return ParseError('%d:%d : %s' % ( - self._previous_line + 1, self._previous_column + 1, message)) + return ParseError(message, self._previous_line + 1, + self._previous_column + 1) - def _ParseError(self, message): + def ParseError(self, message): """Creates and *returns* a ParseError for the current token.""" - return ParseError('%d:%d : %s' % ( - self._line + 1, self._column + 1, message)) + return ParseError(message, self._line + 1, self._column + 1) def _StringParseError(self, e): - return self._ParseError('Couldn\'t parse string: ' + str(e)) + return self.ParseError('Couldn\'t parse string: ' + str(e)) def NextToken(self): """Reads the next meaningful token.""" @@ -1092,12 +1236,124 @@ class _Tokenizer(object): return match = self._TOKEN.match(self._current_line, self._column) + if not match and not self._skip_comments: + match = self._COMMENT.match(self._current_line, self._column) if match: token = match.group(0) self.token = token else: self.token = self._current_line[self._column] +# Aliased so it can still be accessed by current visibility violators. +# TODO(dbarnett): Migrate violators to textformat_tokenizer. +_Tokenizer = Tokenizer # pylint: disable=invalid-name + + +def _ConsumeInt32(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=False) + + +def _ConsumeUint32(tokenizer): + """Consumes an unsigned 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=False) + + +def _TryConsumeInt64(tokenizer): + try: + _ConsumeInt64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeInt64(tokenizer): + """Consumes a signed 32bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If a signed 32bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=True, is_long=True) + + +def _TryConsumeUint64(tokenizer): + try: + _ConsumeUint64(tokenizer) + return True + except ParseError: + return False + + +def _ConsumeUint64(tokenizer): + """Consumes an unsigned 64bit integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + + Returns: + The integer parsed. + + Raises: + ParseError: If an unsigned 64bit integer couldn't be consumed. + """ + return _ConsumeInteger(tokenizer, is_signed=False, is_long=True) + + +def _TryConsumeInteger(tokenizer, is_signed=False, is_long=False): + try: + _ConsumeInteger(tokenizer, is_signed=is_signed, is_long=is_long) + return True + except ParseError: + return False + + +def _ConsumeInteger(tokenizer, is_signed=False, is_long=False): + """Consumes an integer number from tokenizer. + + Args: + tokenizer: A tokenizer used to parse the number. + is_signed: True if a signed integer must be parsed. + is_long: True if a long integer must be parsed. + + Returns: + The integer parsed. + + Raises: + ParseError: If an integer with given characteristics couldn't be consumed. + """ + try: + result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long) + except ValueError as e: + raise tokenizer.ParseError(str(e)) + tokenizer.NextToken() + return result + def ParseInteger(text, is_signed=False, is_long=False): """Parses an integer. @@ -1110,6 +1366,28 @@ def ParseInteger(text, is_signed=False, is_long=False): Returns: The integer value. + Raises: + ValueError: Thrown Iff the text is not a valid integer. + """ + # Do the actual parsing. Exception handling is propagated to caller. + result = _ParseAbstractInteger(text, is_long=is_long) + + # Check if the integer is sane. Exceptions handled by callers. + checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] + checker.CheckValue(result) + return result + + +def _ParseAbstractInteger(text, is_long=False): + """Parses an integer without checking size/signedness. + + Args: + text: The text to parse. + is_long: True if the value should be returned as a long integer. + + Returns: + The integer value. + Raises: ValueError: Thrown Iff the text is not a valid integer. """ @@ -1119,17 +1397,12 @@ def ParseInteger(text, is_signed=False, is_long=False): # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. if is_long: - result = long(text, 0) + return long(text, 0) else: - result = int(text, 0) + return int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) - # Check if the integer is sane. Exceptions handled by callers. - checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] - checker.CheckValue(result) - return result - def ParseFloat(text): """Parse a floating point number. @@ -1206,14 +1479,12 @@ def ParseEnum(field, value): # Identifier. enum_value = enum_descriptor.values_by_name.get(value, None) if enum_value is None: - raise ValueError( - 'Enum type "%s" has no value named %s.' % ( - enum_descriptor.full_name, value)) + raise ValueError('Enum type "%s" has no value named %s.' % + (enum_descriptor.full_name, value)) else: # Numeric value. enum_value = enum_descriptor.values_by_number.get(number, None) if enum_value is None: - raise ValueError( - 'Enum type "%s" has no value with number %d.' % ( - enum_descriptor.full_name, number)) + raise ValueError('Enum type "%s" has no value with number %d.' % + (enum_descriptor.full_name, number)) return enum_value.number -- cgit v1.2.3