aboutsummaryrefslogtreecommitdiffhomepage
path: root/python/google/protobuf
diff options
context:
space:
mode:
authorGravatar Tres Seaver <tseaver@palladion.com>2015-01-13 14:21:29 -0500
committerGravatar Tres Seaver <tseaver@palladion.com>2015-01-13 14:22:05 -0500
commitf336d4b7a5c1d369ed508e513d482c885705e939 (patch)
tree4d0b2d191780864b6c787780193e071f2ea7f434 /python/google/protobuf
parent052e0205a76717f39fc65e303fd2b92ab1df3028 (diff)
Prepare for Python2-Python3 straddle.
- Remove PY25 cruft. - Selectively apply cleanups from 'python-modernize': - New exception syntax. - Use 'six' to handle module renames. - Use 'six' to handle text / binary stuff. This PR covers most of the work from #66 which falls inside `python` (rather than the Python code generation stuff in 'src').
Diffstat (limited to 'python/google/protobuf')
-rwxr-xr-xpython/google/protobuf/__init__.py2
-rwxr-xr-xpython/google/protobuf/descriptor.py4
-rw-r--r--python/google/protobuf/descriptor_pool.py12
-rwxr-xr-xpython/google/protobuf/internal/cpp_message.py24
-rwxr-xr-xpython/google/protobuf/internal/decoder.py51
-rwxr-xr-xpython/google/protobuf/internal/encoder.py50
-rwxr-xr-xpython/google/protobuf/internal/generator_test.py2
-rw-r--r--python/google/protobuf/internal/message_factory_test.py6
-rwxr-xr-xpython/google/protobuf/internal/python_message.py31
-rwxr-xr-xpython/google/protobuf/internal/reflection_test.py65
-rwxr-xr-xpython/google/protobuf/internal/text_format_test.py5
-rwxr-xr-xpython/google/protobuf/internal/type_checkers.py27
-rw-r--r--python/google/protobuf/message_factory.py10
-rw-r--r--python/google/protobuf/text_encoding.py20
-rwxr-xr-xpython/google/protobuf/text_format.py33
15 files changed, 144 insertions, 198 deletions
diff --git a/python/google/protobuf/__init__.py b/python/google/protobuf/__init__.py
index ec3b0934..03eb74ef 100755
--- a/python/google/protobuf/__init__.py
+++ b/python/google/protobuf/__init__.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Needs to stay compatible with Python 2.5 due to GAE.
-#
# Copyright 2007 Google Inc. All Rights Reserved.
__version__ = '3.0.0-pre'
diff --git a/python/google/protobuf/descriptor.py b/python/google/protobuf/descriptor.py
index af571b7c..e7acdacd 100755
--- a/python/google/protobuf/descriptor.py
+++ b/python/google/protobuf/descriptor.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Needs to stay compatible with Python 2.5 due to GAE.
-#
# Copyright 2007 Google Inc. All Rights Reserved.
"""Descriptors essentially contain exactly the information found in a .proto
@@ -846,5 +844,5 @@ def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True):
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
- nested_types.values(), enum_types.values(), [],
+ list(nested_types.values()), list(enum_types.values()), [],
options=desc_proto.options)
diff --git a/python/google/protobuf/descriptor_pool.py b/python/google/protobuf/descriptor_pool.py
index bcac513a..9a751bac 100644
--- a/python/google/protobuf/descriptor_pool.py
+++ b/python/google/protobuf/descriptor_pool.py
@@ -57,8 +57,6 @@ directly instead of this class.
__author__ = 'matthewtoia@google.com (Matt Toia)'
-import sys
-
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import text_encoding
@@ -175,8 +173,7 @@ class DescriptorPool(object):
try:
file_proto = self._internal_db.FindFileByName(file_name)
- except KeyError:
- _, error, _ = sys.exc_info() #PY25 compatible for GAE.
+ except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
@@ -211,8 +208,7 @@ class DescriptorPool(object):
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
- except KeyError:
- _, error, _ = sys.exc_info() #PY25 compatible for GAE.
+ except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
@@ -282,9 +278,9 @@ class DescriptorPool(object):
# file proto.
for dependency in built_deps:
scope.update(self._ExtractSymbols(
- dependency.message_types_by_name.values()))
+ list(dependency.message_types_by_name.values())))
scope.update((_PrefixWithDot(enum.full_name), enum)
- for enum in dependency.enum_types_by_name.values())
+ for enum in list(dependency.enum_types_by_name.values()))
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
diff --git a/python/google/protobuf/internal/cpp_message.py b/python/google/protobuf/internal/cpp_message.py
index 0313cb0b..031b525d 100755
--- a/python/google/protobuf/internal/cpp_message.py
+++ b/python/google/protobuf/internal/cpp_message.py
@@ -34,8 +34,12 @@ Descriptor objects at runtime backed by the protocol buffer C++ API.
__author__ = 'petar@google.com (Petar Petrov)'
-import copy_reg
+import collections
import operator
+
+import six
+import six.moves.copyreg
+
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
@@ -146,7 +150,7 @@ class RepeatedScalarContainer(object):
def __eq__(self, other):
if self is other:
return True
- if not operator.isSequenceType(other):
+ if not isinstance(other, collections.Sequence):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
@@ -259,7 +263,7 @@ class RepeatedCompositeContainer(object):
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
- indexes = range(len(self))
+ indexes = list(range(len(self)))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
@@ -385,7 +389,7 @@ def InitMessage(message_descriptor, cls):
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
- copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
+ six.moves.copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
@@ -400,7 +404,7 @@ def _AddDescriptors(message_descriptor, dictionary):
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
- dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
+ dictionary['__slots__'] = list(dictionary['__descriptors'].keys())) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
@@ -420,7 +424,7 @@ def _AddEnumValues(message_descriptor, dictionary):
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
- for extension_name, extension_field in extension_dict.iteritems():
+ for extension_name, extension_field in extension_dict.items():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
@@ -474,7 +478,7 @@ def _AddInitMethod(message_descriptor, cls):
self._HACK_REFCOUNTS = self
self._composite_fields = {}
- for field_name, field_value in kwargs.iteritems():
+ for field_name, field_value in kwargs.items():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
@@ -538,7 +542,7 @@ def _AddMessageMethods(message_descriptor, cls):
def Clear(self):
cmessages_to_release = []
- for field_name, child_field in self._composite_fields.iteritems():
+ for field_name, child_field in self._composite_fields.items():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
@@ -631,7 +635,7 @@ def _AddMessageMethods(message_descriptor, cls):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
- for key, value in locals().copy().iteritems():
+ for key, value in locals().copy().items():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
@@ -658,6 +662,6 @@ def _AddMessageMethods(message_descriptor, cls):
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
- for extension_name, extension_field in extension_dict.iteritems():
+ for extension_name, extension_field in extension_dict.items():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
diff --git a/python/google/protobuf/internal/decoder.py b/python/google/protobuf/internal/decoder.py
index a4b90608..6b72adef 100755
--- a/python/google/protobuf/internal/decoder.py
+++ b/python/google/protobuf/internal/decoder.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#PY25 compatible for GAE.
-#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Code for decoding protocol buffer primitives.
@@ -85,8 +83,9 @@ we repeatedly read a tag, look up the corresponding decoder, and invoke it.
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
-import sys ##PY25
-_PY2 = sys.version_info[0] < 3 ##PY25
+
+import six
+
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
@@ -114,14 +113,11 @@ def _VarintDecoder(mask, result_type):
decoder returns a (value, new_pos) pair.
"""
- local_ord = ord
- py2 = _PY2 ##PY25
-##!PY25 py2 = str is bytes
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
- b = local_ord(buffer[pos]) if py2 else buffer[pos]
+ b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
@@ -137,14 +133,11 @@ def _VarintDecoder(mask, result_type):
def _SignedVarintDecoder(mask, result_type):
"""Like _VarintDecoder() but decodes signed values."""
- local_ord = ord
- py2 = _PY2 ##PY25
-##!PY25 py2 = str is bytes
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
- b = local_ord(buffer[pos]) if py2 else buffer[pos]
+ b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
@@ -164,8 +157,8 @@ def _SignedVarintDecoder(mask, result_type):
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
-_DecodeVarint = _VarintDecoder((1 << 64) - 1, long)
-_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, long)
+_DecodeVarint = _VarintDecoder((1 << 64) - 1, int)
+_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, int)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
@@ -183,10 +176,8 @@ def ReadTag(buffer, pos):
use that, but not in Python.
"""
- py2 = _PY2 ##PY25
-##!PY25 py2 = str is bytes
start = pos
- while (ord(buffer[pos]) if py2 else buffer[pos]) & 0x80:
+ while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
@@ -301,7 +292,6 @@ def _FloatDecoder():
"""
local_unpack = struct.unpack
- b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
@@ -312,17 +302,12 @@ def _FloatDecoder():
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
- if ((float_bytes[3:4] in b('\x7F\xFF')) ##PY25
-##!PY25 if ((float_bytes[3:4] in b'\x7F\xFF')
- and (float_bytes[2:3] >= b('\x80'))): ##PY25
-##!PY25 and (float_bytes[2:3] >= b'\x80')):
+ if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
- if float_bytes[0:3] != b('\x00\x00\x80'): ##PY25
-##!PY25 if float_bytes[0:3] != b'\x00\x00\x80':
+ if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
- if float_bytes[3:4] == b('\xFF'): ##PY25
-##!PY25 if float_bytes[3:4] == b'\xFF':
+ if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
@@ -341,7 +326,6 @@ def _DoubleDecoder():
"""
local_unpack = struct.unpack
- b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
@@ -352,12 +336,9 @@ def _DoubleDecoder():
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
-##!PY25 if ((double_bytes[7:8] in b'\x7F\xFF')
-##!PY25 and (double_bytes[6:7] >= b'\xF0')
-##!PY25 and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
- if ((double_bytes[7:8] in b('\x7F\xFF')) ##PY25
- and (double_bytes[6:7] >= b('\xF0')) ##PY25
- and (double_bytes[0:7] != b('\x00\x00\x00\x00\x00\x00\xF0'))): ##PY25
+ if ((double_bytes[7:8] in b'\x7F\xFF')
+ and (double_bytes[6:7] >= b'\xF0')
+ and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
@@ -480,12 +461,12 @@ def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
- local_unicode = unicode
+ local_unicode = six.text_type
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
- except UnicodeDecodeError, e:
+ except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
diff --git a/python/google/protobuf/internal/encoder.py b/python/google/protobuf/internal/encoder.py
index 38a5138a..21ed2ed7 100755
--- a/python/google/protobuf/internal/encoder.py
+++ b/python/google/protobuf/internal/encoder.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#PY25 compatible for GAE.
-#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Code for encoding protocol message primitives.
@@ -71,8 +69,9 @@ sizer rather than when calling them. In particular:
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
-import sys ##PY25
-_PY2 = sys.version_info[0] < 3 ##PY25
+
+import six
+
from google.protobuf.internal import wire_format
@@ -346,16 +345,14 @@ def MessageSetItemSizer(field_number):
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
- local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
-##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
- write(local_chr(0x80|bits))
+ write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
- return write(local_chr(bits))
+ return write(six.int2byte(bits))
return EncodeVarint
@@ -364,18 +361,16 @@ def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
- local_chr = _PY2 and chr or (lambda x: bytes((x,))) ##PY25
-##!PY25 local_chr = chr if bytes is str else lambda x: bytes((x,))
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
- write(local_chr(0x80|bits))
+ write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
- return write(local_chr(bits))
+ return write(six.int2byte(bits))
return EncodeSignedVarint
@@ -390,8 +385,7 @@ def _VarintBytes(value):
pieces = []
_EncodeVarint(pieces.append, value)
- return "".encode("latin1").join(pieces) ##PY25
-##!PY25 return b"".join(pieces)
+ return b"".join(pieces)
def TagBytes(field_number, wire_type):
@@ -529,33 +523,26 @@ def _FloatingPointEncoder(wire_type, format):
format: The format string to pass to struct.pack().
"""
- b = _PY2 and (lambda x:x) or (lambda x:x.encode('latin1')) ##PY25
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
- write(b('\x00\x00\x80\x7F')) ##PY25
-##!PY25 write(b'\x00\x00\x80\x7F')
+ write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
- write(b('\x00\x00\x80\xFF')) ##PY25
-##!PY25 write(b'\x00\x00\x80\xFF')
+ write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
- write(b('\x00\x00\xC0\x7F')) ##PY25
-##!PY25 write(b'\x00\x00\xC0\x7F')
+ write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
- write(b('\x00\x00\x00\x00\x00\x00\xF0\x7F')) ##PY25
-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
+ write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
- write(b('\x00\x00\x00\x00\x00\x00\xF0\xFF')) ##PY25
-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
+ write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
- write(b('\x00\x00\x00\x00\x00\x00\xF8\x7F')) ##PY25
-##!PY25 write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
+ write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
@@ -631,10 +618,8 @@ DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
-##!PY25 false_byte = b'\x00'
-##!PY25 true_byte = b'\x01'
- false_byte = '\x00'.encode('latin1') ##PY25
- true_byte = '\x01'.encode('latin1') ##PY25
+ false_byte = b'\x00'
+ true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
@@ -770,8 +755,7 @@ def MessageSetItemEncoder(field_number):
}
}
"""
- start_bytes = "".encode("latin1").join([ ##PY25
-##!PY25 start_bytes = b"".join([
+ start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
diff --git a/python/google/protobuf/internal/generator_test.py b/python/google/protobuf/internal/generator_test.py
index 03361e66..ac9808d1 100755
--- a/python/google/protobuf/internal/generator_test.py
+++ b/python/google/protobuf/internal/generator_test.py
@@ -294,7 +294,7 @@ class GeneratorTest(basetest.TestCase):
self.assertSameElements(
nested_names,
[field.name for field in desc.oneofs[0].fields])
- for field_name, field_desc in desc.fields_by_name.iteritems():
+ for field_name, field_desc in desc.fields_by_name.items():
if field_name in nested_names:
self.assertIs(desc.oneofs[0], field_desc.containing_oneof)
else:
diff --git a/python/google/protobuf/internal/message_factory_test.py b/python/google/protobuf/internal/message_factory_test.py
index fcf13410..f4617b85 100644
--- a/python/google/protobuf/internal/message_factory_test.py
+++ b/python/google/protobuf/internal/message_factory_test.py
@@ -107,14 +107,14 @@ class MessageFactoryTest(basetest.TestCase):
self.assertContainsSubset(
['google.protobuf.python.internal.Factory2Message',
'google.protobuf.python.internal.Factory1Message'],
- messages.keys())
+ list(messages.keys()))
self._ExerciseDynamicClass(
messages['google.protobuf.python.internal.Factory2Message'])
self.assertContainsSubset(
['google.protobuf.python.internal.Factory2Message.one_more_field',
'google.protobuf.python.internal.another_field'],
- (messages['google.protobuf.python.internal.Factory1Message']
- ._extensions_by_name.keys()))
+ (list(messages['google.protobuf.python.internal.Factory1Message']
+ ._extensions_by_name.keys())))
factory_msg1 = messages['google.protobuf.python.internal.Factory1Message']
msg1 = messages['google.protobuf.python.internal.Factory1Message']()
ext1 = factory_msg1._extensions_by_name[
diff --git a/python/google/protobuf/internal/python_message.py b/python/google/protobuf/internal/python_message.py
index 6fda6ae0..58c65db9 100755
--- a/python/google/protobuf/internal/python_message.py
+++ b/python/google/protobuf/internal/python_message.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Keep it Python2.5 compatible for GAE.
-#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# This code is meant to work on Python 2.4 and above only.
@@ -54,19 +52,14 @@ this file*.
__author__ = 'robinson@google.com (Will Robinson)'
+from io import BytesIO
import sys
-if sys.version_info[0] < 3:
- try:
- from cStringIO import StringIO as BytesIO
- except ImportError:
- from StringIO import StringIO as BytesIO
- import copy_reg as copyreg
-else:
- from io import BytesIO
- import copyreg
import struct
import weakref
+import six
+import six.moves.copyreg as copyreg
+
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
@@ -237,7 +230,7 @@ def _AttachFieldHelpers(cls, field_descriptor):
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
- for extension_name, extension_field in extension_dict.iteritems():
+ for extension_name, extension_field in extension_dict.items():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
@@ -314,7 +307,7 @@ def _ReraiseTypeErrorWithFieldName(message_name, field_name):
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
- raise type(exc), exc, sys.exc_info()[2]
+ six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
@@ -334,7 +327,7 @@ def _AddInitMethod(message_descriptor, cls):
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
- for field_name, field_value in kwargs.iteritems():
+ for field_name, field_value in kwargs.items():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
@@ -563,7 +556,7 @@ def _AddPropertiesForNonRepeatedCompositeField(field, cls):
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
- for extension_name, extension_field in extension_dict.iteritems():
+ for extension_name, extension_field in extension_dict.items():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
@@ -618,7 +611,7 @@ def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
- all_fields = [item for item in self._fields.iteritems() if _IsPresent(item)]
+ all_fields = [item for item in self._fields.items() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
@@ -863,7 +856,7 @@ def _AddMergeFromStringMethod(message_descriptor, cls):
except (IndexError, TypeError):
# Now ord(buf[p:p+1]) == ord('') gets TypeError.
raise message_mod.DecodeError('Truncated message.')
- except struct.error, e:
+ except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
@@ -963,7 +956,7 @@ def _AddIsInitializedMethod(message_descriptor, cls):
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
- for i in xrange(len(value)):
+ for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
@@ -993,7 +986,7 @@ def _AddMergeFromMethod(cls):
fields = self._fields
- for field, value in msg._fields.iteritems():
+ for field, value in msg._fields.items():
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
diff --git a/python/google/protobuf/internal/reflection_test.py b/python/google/protobuf/internal/reflection_test.py
index 6b24b092..52a08d7d 100755
--- a/python/google/protobuf/internal/reflection_test.py
+++ b/python/google/protobuf/internal/reflection_test.py
@@ -40,6 +40,8 @@ import gc
import operator
import struct
+import six
+
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
@@ -467,7 +469,7 @@ class ReflectionTest(basetest.TestCase):
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
- proto.repeated_string.extend(str(x) for x in xrange(2))
+ proto.repeated_string.extend(str(x) for x in range(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
@@ -620,14 +622,18 @@ class ReflectionTest(basetest.TestCase):
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
- TestGetAndDeserialize('optional_uint32', 1 << 31, long)
+ TestGetAndDeserialize('optional_uint32', 1 << 31, int)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
- TestGetAndDeserialize('optional_int64', 1 << 30, long)
- TestGetAndDeserialize('optional_int64', 1 << 60, long)
- TestGetAndDeserialize('optional_uint64', 1 << 30, long)
- TestGetAndDeserialize('optional_uint64', 1 << 60, long)
+ try:
+ integer_64 = long
+ except NameError: # Python3
+ integer_64 = int
+ TestGetAndDeserialize('optional_int64', 1 << 30, integer_64)
+ TestGetAndDeserialize('optional_int64', 1 << 60, integer_64)
+ TestGetAndDeserialize('optional_uint64', 1 << 30, integer_64)
+ TestGetAndDeserialize('optional_uint64', 1 << 60, integer_64)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
@@ -753,18 +759,18 @@ class ReflectionTest(basetest.TestCase):
def testEnum_KeysAndValues(self):
self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'],
- unittest_pb2.ForeignEnum.keys())
+ list(unittest_pb2.ForeignEnum.keys()))
self.assertEqual([4, 5, 6],
- unittest_pb2.ForeignEnum.values())
+ list(unittest_pb2.ForeignEnum.values()))
self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6)],
- unittest_pb2.ForeignEnum.items())
+ list(unittest_pb2.ForeignEnum.items()))
proto = unittest_pb2.TestAllTypes()
- self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys())
- self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values())
+ self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], list(proto.NestedEnum.keys()))
+ self.assertEqual([1, 2, 3, -1], list(proto.NestedEnum.values()))
self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)],
- proto.NestedEnum.items())
+ list(proto.NestedEnum.items()))
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
@@ -803,7 +809,7 @@ class ReflectionTest(basetest.TestCase):
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
- proto.repeated_int32[1:4] = (i for i in xrange(3))
+ proto.repeated_int32[1:4] = (i for i in range(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
@@ -1006,9 +1012,8 @@ class ReflectionTest(basetest.TestCase):
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
- class MyProtoClass(message.Message):
+ class MyProtoClass(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)):
DESCRIPTOR = mydescriptor
- __metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
@@ -1048,14 +1053,13 @@ class ReflectionTest(basetest.TestCase):
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
- self.assertTrue(desc.fields_by_name.has_key('name'))
- self.assertTrue(desc.fields_by_name.has_key('year'))
- self.assertTrue(desc.fields_by_name.has_key('automatic'))
- self.assertTrue(desc.fields_by_name.has_key('price'))
- self.assertTrue(desc.fields_by_name.has_key('owners'))
-
- class CarMessage(message.Message):
- __metaclass__ = reflection.GeneratedProtocolMessageType
+ self.assertTrue('name' in desc.fields_by_name)
+ self.assertTrue('year' in desc.fields_by_name)
+ self.assertTrue('automatic' in desc.fields_by_name)
+ self.assertTrue('price' in desc.fields_by_name)
+ self.assertTrue('owners' in desc.fields_by_name)
+
+ class CarMessage(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)):
DESCRIPTOR = desc
prius = CarMessage()
@@ -1661,14 +1665,14 @@ class ReflectionTest(basetest.TestCase):
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
- self.assertEqual(type(proto.optional_string), unicode)
+ self.assertEqual(type(proto.optional_string), six.text_type)
- proto.optional_string = unicode('Testing')
+ proto.optional_string = six.text_type('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
- self.assertEqual(proto.optional_string, unicode('Testing'))
+ self.assertEqual(proto.optional_string, six.text_type('Testing'))
# Try to assign a 'bytes' object which contains non-UTF-8.
self.assertRaises(ValueError,
@@ -1715,7 +1719,7 @@ class ReflectionTest(basetest.TestCase):
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
- self.assertEqual(type(message2.str), unicode)
+ self.assertEqual(type(message2.str), six.text_type)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
@@ -1739,7 +1743,7 @@ class ReflectionTest(basetest.TestCase):
def testBytesInTextFormat(self):
proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n',
- unicode(proto))
+ six.text_type(proto))
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
@@ -2307,7 +2311,7 @@ class SerializationTest(basetest.TestCase):
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
- for truncation_point in xrange(len(serialized) + 1):
+ for truncation_point in range(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
@@ -2908,8 +2912,7 @@ class ClassAPITest(basetest.TestCase):
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
- class MessageClass(message.Message):
- __metaclass__ = reflection.GeneratedProtocolMessageType
+ class MessageClass(six.with_metaclass(reflection.GeneratedProtocolMessageType, message.Message)):
DESCRIPTOR = msg_descriptor
msg = MessageClass()
msg_str = (
diff --git a/python/google/protobuf/internal/text_format_test.py b/python/google/protobuf/internal/text_format_test.py
index 55e3c2c8..3455a000 100755
--- a/python/google/protobuf/internal/text_format_test.py
+++ b/python/google/protobuf/internal/text_format_test.py
@@ -36,9 +36,10 @@ __author__ = 'kenton@google.com (Kenton Varda)'
import re
+import six
+
from google.apputils import basetest
from google.protobuf import text_format
-from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import unittest_pb2
from google.protobuf import unittest_mset_pb2
@@ -143,7 +144,7 @@ class TextFormatTest(basetest.TestCase):
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self):
- class UnicodeSub(unicode):
+ class UnicodeSub(six.text_type):
pass
message = unittest_pb2.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
diff --git a/python/google/protobuf/internal/type_checkers.py b/python/google/protobuf/internal/type_checkers.py
index 118725da..8d10fbe0 100755
--- a/python/google/protobuf/internal/type_checkers.py
+++ b/python/google/protobuf/internal/type_checkers.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#PY25 compatible for GAE.
-#
# Copyright 2008 Google Inc. All Rights Reserved.
"""Provides type checking routines.
@@ -49,9 +47,8 @@ TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
__author__ = 'robinson@google.com (Will Robinson)'
-import sys ##PY25
-if sys.version < '2.6': bytes = str ##PY25
-from google.protobuf.internal import api_implementation
+import six
+
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
@@ -111,9 +108,9 @@ class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
- if not isinstance(proposed_value, (int, long)):
+ if not isinstance(proposed_value, six.integer_types):
message = ('%.1024r has type %s, but expected one of: %s' %
- (proposed_value, type(proposed_value), (int, long)))
+ (proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
@@ -132,9 +129,9 @@ class EnumValueChecker(object):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
- if not isinstance(proposed_value, (int, long)):
+ if not isinstance(proposed_value, six.integer_types):
message = ('%.1024r has type %s, but expected one of: %s' %
- (proposed_value, type(proposed_value), (int, long)))
+ (proposed_value, type(proposed_value), six.integer_types))
raise TypeError(message)
if proposed_value not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
@@ -149,9 +146,9 @@ class UnicodeValueChecker(object):
"""
def CheckValue(self, proposed_value):
- if not isinstance(proposed_value, (bytes, unicode)):
+ if not isinstance(proposed_value, (bytes, six.text_type)):
message = ('%.1024r has type %s, but expected one of: %s' %
- (proposed_value, type(proposed_value), (bytes, unicode)))
+ (proposed_value, type(proposed_value), (bytes, six.text_type)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
@@ -183,13 +180,13 @@ class Uint32ValueChecker(IntValueChecker):
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
- _TYPE = long
+ _TYPE = int
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
- _TYPE = long
+ _TYPE = int
# Type-checkers for all scalar CPPTYPEs.
@@ -199,9 +196,9 @@ _VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
- float, int, long),
+ float, int, int),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
- float, int, long),
+ float, int, int),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(bytes),
}
diff --git a/python/google/protobuf/message_factory.py b/python/google/protobuf/message_factory.py
index 7fd7bec0..36062a56 100644
--- a/python/google/protobuf/message_factory.py
+++ b/python/google/protobuf/message_factory.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#PY25 compatible for GAE.
-#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Provides a factory class for generating dynamic messages.
@@ -43,7 +41,6 @@ my_proto_instance = message_classes['some.proto.package.MessageName']()
__author__ = 'matthewtoia@google.com (Matt Toia)'
-import sys ##PY25
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message
@@ -75,8 +72,7 @@ class MessageFactory(object):
"""
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
- if sys.version_info[0] < 3: ##PY25
-##!PY25 if str is bytes: # PY2
+ if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
@@ -111,7 +107,7 @@ class MessageFactory(object):
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
- for name, msg in file_desc.message_types_by_name.iteritems():
+ for name, msg in file_desc.message_types_by_name.items():
if file_desc.package:
full_name = '.'.join([file_desc.package, name])
else:
@@ -128,7 +124,7 @@ class MessageFactory(object):
# ignore the registration if the original was the same, or raise
# an error if they were different.
- for name, extension in file_desc.extensions_by_name.iteritems():
+ for name, extension in file_desc.extensions_by_name.items():
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
diff --git a/python/google/protobuf/text_encoding.py b/python/google/protobuf/text_encoding.py
index 2d86a67c..a0728e3c 100644
--- a/python/google/protobuf/text_encoding.py
+++ b/python/google/protobuf/text_encoding.py
@@ -27,16 +27,13 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#PY25 compatible for GAE.
-#
"""Encoding related utilities."""
-
import re
-import sys ##PY25
+
+import six
# Lookup table for utf8
-_cescape_utf8_to_str = [chr(i) for i in xrange(0, 256)]
+_cescape_utf8_to_str = [chr(i) for i in range(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
@@ -46,9 +43,9 @@ _cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
-_cescape_byte_to_str = ([r'\%03o' % i for i in xrange(0, 32)] +
- [chr(i) for i in xrange(32, 127)] +
- [r'\%03o' % i for i in xrange(127, 256)])
+_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
+ [chr(i) for i in range(32, 127)] +
+ [r'\%03o' % i for i in range(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
@@ -75,7 +72,7 @@ def CEscape(text, as_utf8):
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
- Ord = ord if isinstance(text, basestring) else lambda x: x
+ Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
@@ -100,8 +97,7 @@ def CUnescape(text):
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
- if sys.version_info[0] < 3: ##PY25
-##!PY25 if str is bytes: # PY2
+ if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
diff --git a/python/google/protobuf/text_format.py b/python/google/protobuf/text_format.py
index fb54c50c..87b5c222 100755
--- a/python/google/protobuf/text_format.py
+++ b/python/google/protobuf/text_format.py
@@ -28,8 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#PY25 compatible for GAE.
-#
# Copyright 2007 Google Inc. All Rights Reserved.
"""Contains routines for printing protocol messages in text format."""
@@ -39,6 +37,8 @@ __author__ = 'kenton@google.com (Kenton Varda)'
import cStringIO
import re
+import six
+
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
from google.protobuf import text_encoding
@@ -195,7 +195,7 @@ def PrintFieldValue(field, value, out, indent=0, as_utf8=False,
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
- if isinstance(value, unicode):
+ if isinstance(value, six.text_type):
out_value = value.encode('utf-8')
else:
out_value = value
@@ -505,7 +505,7 @@ class _Tokenizer(object):
def _PopLine(self):
while len(self._current_line) <= self._column:
try:
- self._current_line = self._lines.next()
+ self._current_line = next(self._lines)
except StopIteration:
self._current_line = ''
self._more_lines = False
@@ -575,7 +575,7 @@ class _Tokenizer(object):
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -591,7 +591,7 @@ class _Tokenizer(object):
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -607,7 +607,7 @@ class _Tokenizer(object):
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -623,7 +623,7 @@ class _Tokenizer(object):
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -639,7 +639,7 @@ class _Tokenizer(object):
"""
try:
result = ParseFloat(self.token)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -655,7 +655,7 @@ class _Tokenizer(object):
"""
try:
result = ParseBool(self.token)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -671,8 +671,8 @@ class _Tokenizer(object):
"""
the_bytes = self.ConsumeByteString()
try:
- return unicode(the_bytes, 'utf-8')
- except UnicodeDecodeError, e:
+ return six.text_type(the_bytes, 'utf-8')
+ except UnicodeDecodeError as e:
raise self._StringParseError(e)
def ConsumeByteString(self):
@@ -687,8 +687,7 @@ class _Tokenizer(object):
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in ('\'', '"'):
the_list.append(self._ConsumeSingleByteString())
- return ''.encode('latin1').join(the_list) ##PY25
-##!PY25 return b''.join(the_list)
+ return b''.join(the_list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
@@ -706,7 +705,7 @@ class _Tokenizer(object):
try:
result = text_encoding.CUnescape(text[1:-1])
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -714,7 +713,7 @@ class _Tokenizer(object):
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
- except ValueError, e:
+ except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
@@ -779,7 +778,7 @@ def ParseInteger(text, is_signed=False, is_long=False):
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
- result = long(text, 0)
+ result = int(text, 0)
else:
result = int(text, 0)
except ValueError: