aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/py
diff options
context:
space:
mode:
authorGravatar Akira Baruah <akira.baruah@gmail.com>2017-12-01 10:10:08 -0800
committerGravatar Ulf Adams <ulfjack@google.com>2017-12-05 14:58:03 +0100
commit4fcaf584746fef33aac4eb584e050a8b359bf134 (patch)
tree35e015e281e17e61cfa1a85f526892627f9c01ba /third_party/py
parent3d362fb9a122ceee6d781be127dfedbbff8051f8 (diff)
Add absl-py 0.1.1 as third_party/py/abseil
Diffstat (limited to 'third_party/py')
-rw-r--r--third_party/py/abseil/BUILD16
-rw-r--r--third_party/py/abseil/PKG-INFO21
-rw-r--r--third_party/py/abseil/README.md6
-rw-r--r--third_party/py/abseil/absl/__init__.py13
-rw-r--r--third_party/py/abseil/absl/app.py390
-rw-r--r--third_party/py/abseil/absl/command_name.py67
-rw-r--r--third_party/py/abseil/absl/flags/__init__.py148
-rw-r--r--third_party/py/abseil/absl/flags/_argument_parser.py491
-rw-r--r--third_party/py/abseil/absl/flags/_defines.py559
-rw-r--r--third_party/py/abseil/absl/flags/_exceptions.py112
-rw-r--r--third_party/py/abseil/absl/flags/_flag.py391
-rw-r--r--third_party/py/abseil/absl/flags/_flagvalues.py1244
-rw-r--r--third_party/py/abseil/absl/flags/_helpers.py430
-rw-r--r--third_party/py/abseil/absl/flags/_validators.py424
-rw-r--r--third_party/py/abseil/absl/logging/__init__.py978
-rw-r--r--third_party/py/abseil/absl/logging/converter.py212
-rw-r--r--third_party/py/abseil/absl/testing/__init__.py13
-rw-r--r--third_party/py/abseil/absl/testing/_bazelize_command.py49
-rw-r--r--third_party/py/abseil/absl/testing/absltest.py1715
-rwxr-xr-xthird_party/py/abseil/absl/testing/flagsaver.py183
-rwxr-xr-xthird_party/py/abseil/absl/testing/parameterized.py520
-rwxr-xr-xthird_party/py/abseil/absl/testing/xml_reporter.py445
-rw-r--r--third_party/py/abseil/absl_py.egg-info/PKG-INFO21
-rw-r--r--third_party/py/abseil/absl_py.egg-info/SOURCES.txt25
-rw-r--r--third_party/py/abseil/absl_py.egg-info/dependency_links.txt1
-rw-r--r--third_party/py/abseil/absl_py.egg-info/requires.txt1
-rw-r--r--third_party/py/abseil/absl_py.egg-info/top_level.txt1
-rw-r--r--third_party/py/abseil/setup.cfg5
-rw-r--r--third_party/py/abseil/setup.py61
29 files changed, 8542 insertions, 0 deletions
diff --git a/third_party/py/abseil/BUILD b/third_party/py/abseil/BUILD
new file mode 100644
index 0000000000..5bf0a22e4c
--- /dev/null
+++ b/third_party/py/abseil/BUILD
@@ -0,0 +1,16 @@
+licenses(["notice"])
+
+filegroup(
+ name = "srcs",
+ srcs = glob(["**"]),
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "abseil",
+ deps = ["//third_party/py/six"],
+ srcs = glob(["**/*.py"]),
+ srcs_version = "PY2AND3",
+ imports = ["."],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/py/abseil/PKG-INFO b/third_party/py/abseil/PKG-INFO
new file mode 100644
index 0000000000..c9b961612a
--- /dev/null
+++ b/third_party/py/abseil/PKG-INFO
@@ -0,0 +1,21 @@
+Metadata-Version: 1.1
+Name: absl-py
+Version: 0.1.1
+Summary: Abseil Python Common Libraries
+Home-page: https://github.com/abseil/abseil-py
+Author: The Abseil Authors
+Author-email: UNKNOWN
+License: Apache 2.0
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
diff --git a/third_party/py/abseil/README.md b/third_party/py/abseil/README.md
new file mode 100644
index 0000000000..b4adbbb224
--- /dev/null
+++ b/third_party/py/abseil/README.md
@@ -0,0 +1,6 @@
+[abseil-py](https://github.com/abseil/abseil-py)
+--------
+
+* Version: 0.1.1
+* License: Apache 2.0
+* From: [https://pypi.python.org/packages/ce/7b/a15c0c6647010bae2b06698af7039db34f4d5c723cde14dea4446e746448/absl-py-0.1.1.tar.gz](https://pypi.python.org/packages/ce/7b/a15c0c6647010bae2b06698af7039db34f4d5c723cde14dea4446e746448/absl-py-0.1.1.tar.gz)
diff --git a/third_party/py/abseil/absl/__init__.py b/third_party/py/abseil/absl/__init__.py
new file mode 100644
index 0000000000..a3bd1cd518
--- /dev/null
+++ b/third_party/py/abseil/absl/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/third_party/py/abseil/absl/app.py b/third_party/py/abseil/absl/app.py
new file mode 100644
index 0000000000..370cfc5fb6
--- /dev/null
+++ b/third_party/py/abseil/absl/app.py
@@ -0,0 +1,390 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generic entry point for Abseil Python applications.
+
+To use this module, define a 'main' function with a single 'argv' argument and
+call app.run(main). For example:
+
+def main(argv):
+ del argv # Unused.
+
+if __name__ == '__main__':
+ app.run(main)
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import errno
+import os
+import pdb
+import sys
+import traceback
+
+from absl import command_name
+from absl import flags
+from absl import logging
+
+try:
+ import faulthandler
+except ImportError:
+ faulthandler = None
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_boolean('run_with_pdb', False, 'Set to true for PDB debug mode')
+flags.DEFINE_boolean('pdb_post_mortem', False,
+ 'Set to true to handle uncaught exceptions with PDB '
+ 'post mortem.')
+flags.DEFINE_boolean('run_with_profiling', False,
+ 'Set to true for profiling the script. '
+ 'Execution will be slower, and the output format might '
+ 'change over time.')
+flags.DEFINE_string('profile_file', None,
+ 'Dump profile information to a file (for python -m '
+ 'pstats). Implies --run_with_profiling.')
+flags.DEFINE_boolean('use_cprofile_for_profiling', True,
+ 'Use cProfile instead of the profile module for '
+ 'profiling. This has no effect unless '
+ '--run_with_profiling is set.')
+flags.DEFINE_boolean('only_check_args', False,
+ 'Set to true to validate args and exit.',
+ allow_hide_cpp=True)
+
+
+
+# If main() exits via an abnormal exception, call into these
+# handlers before exiting.
+EXCEPTION_HANDLERS = []
+
+
+class Error(Exception):
+ pass
+
+
+class UsageError(Error):
+ """Exception raised when the arguments supplied by the user are invalid.
+
+ Raise this when the arguments supplied are invalid from the point of
+ view of the application. For example when two mutually exclusive
+ flags have been supplied or when there are not enough non-flag
+ arguments. It is distinct from flags.Error which covers the lower
+ level of parsing and validating individual flags.
+ """
+
+ def __init__(self, message, exitcode=1):
+ super(UsageError, self).__init__(message)
+ self.exitcode = exitcode
+
+
+class HelpFlag(flags.BooleanFlag):
+ """Special boolean flag that displays usage and raises SystemExit."""
+ NAME = 'help'
+ SHORT_NAME = '?'
+
+ def __init__(self):
+ super(HelpFlag, self).__init__(
+ self.NAME, False, 'show this help',
+ short_name=self.SHORT_NAME, allow_hide_cpp=True)
+
+ def parse(self, arg):
+ if arg:
+ usage(shorthelp=True, writeto_stdout=True)
+ # Advertise --helpfull on stdout, since usage() was on stdout.
+ print()
+ print('Try --helpfull to get a list of all flags.')
+ sys.exit(1)
+
+
+class HelpshortFlag(HelpFlag):
+ """--helpshort is an alias for --help."""
+ NAME = 'helpshort'
+ SHORT_NAME = None
+
+
+class HelpfullFlag(flags.BooleanFlag):
+ """Display help for flags in this module and all dependent modules."""
+
+ def __init__(self):
+ super(HelpfullFlag, self).__init__(
+ 'helpfull', False, 'show full help', allow_hide_cpp=True)
+
+ def parse(self, arg):
+ if arg:
+ usage(writeto_stdout=True)
+ sys.exit(1)
+
+
+class HelpXMLFlag(flags.BooleanFlag):
+ """Similar to HelpfullFlag, but generates output in XML format."""
+
+ def __init__(self):
+ super(HelpXMLFlag, self).__init__(
+ 'helpxml', False, 'like --helpfull, but generates XML output',
+ allow_hide_cpp=True)
+
+ def parse(self, arg):
+ if arg:
+ flags.FLAGS.write_help_in_xml_format(sys.stdout)
+ sys.exit(1)
+
+
+def parse_flags_with_usage(args):
+ """Tries to parse the flags, print usage, and exit if unparseable.
+
+ Args:
+ args: [str], a non-empty list of the command line arguments including
+ program name.
+
+ Returns:
+ [str], a non-empty list of remaining command line arguments after parsing
+ flags, including program name.
+ """
+ try:
+ return FLAGS(args)
+ except flags.Error as error:
+ sys.stderr.write('FATAL Flags parsing error: %s\n' % error)
+ sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
+ sys.exit(1)
+
+
+_define_help_flags_called = False
+
+
+def define_help_flags():
+ """Registers help flags. Idempotent."""
+ # Use a global to ensure idempotence.
+ global _define_help_flags_called
+
+ if not _define_help_flags_called:
+ flags.DEFINE_flag(HelpFlag())
+ flags.DEFINE_flag(HelpshortFlag()) # alias for --help
+ flags.DEFINE_flag(HelpfullFlag())
+ flags.DEFINE_flag(HelpXMLFlag())
+ _define_help_flags_called = True
+
+
+def register_and_parse_flags_with_usage(argv=None):
+ """Registers help flags, parses arguments and shows usage if appropriate.
+
+ This also calls sys.exit(0) if flag --only_check_args is True.
+
+ Args:
+ argv: [str], a non-empty list of the command line arguments including
+ program name, sys.argv is used if None.
+
+ Returns:
+ [str], a non-empty list of remaining command line arguments after parsing
+ flags, including program name.
+ """
+ define_help_flags()
+
+ argv = parse_flags_with_usage(sys.argv if argv is None else argv)
+ # Exit when told so.
+ if FLAGS.only_check_args:
+ sys.exit(0)
+ # Immediately after flags are parsed, bump verbosity to INFO if the flag has
+ # not been set.
+ if FLAGS['verbosity'].using_default_value:
+ FLAGS.verbosity = 0
+ return argv
+
+
+def _run_main(main, argv):
+ """Calls main, optionally with pdb or profiler."""
+ if FLAGS.run_with_pdb:
+ sys.exit(pdb.runcall(main, argv))
+ elif FLAGS.run_with_profiling or FLAGS.profile_file:
+ # Avoid import overhead since most apps (including performance-sensitive
+ # ones) won't be run with profiling.
+ import atexit
+ if FLAGS.use_cprofile_for_profiling:
+ import cProfile as profile
+ else:
+ import profile
+ profiler = profile.Profile()
+ if FLAGS.profile_file:
+ atexit.register(profiler.dump_stats, FLAGS.profile_file)
+ else:
+ atexit.register(profiler.print_stats)
+ retval = profiler.runcall(main, argv)
+ sys.exit(retval)
+ else:
+ sys.exit(main(argv))
+
+
+def _call_exception_handlers(exception):
+ """Calls any installed exception handlers."""
+ for handler in EXCEPTION_HANDLERS:
+ try:
+ if handler.wants(exception):
+ handler.handle(exception)
+ except: # pylint: disable=bare-except
+ try:
+ # We don't want to stop for exceptions in the exception handlers but
+ # we shouldn't hide them either.
+ logging.error(traceback.format_exc())
+ except: # pylint: disable=bare-except
+ # In case even the logging statement fails, ignore.
+ pass
+
+
+def run(main, argv=None):
+ """Begins executing the program.
+
+ Args:
+ main: The main function to execute. It takes an single argument "argv",
+ which is a list of command line arguments with parsed flags removed.
+ argv: A non-empty list of the command line arguments including program name,
+ sys.argv is used if None.
+ - Parses command line flags with the flag module.
+ - If there are any errors, prints usage().
+ - Calls main() with the remaining arguments.
+ - If main() raises a UsageError, prints usage and the error message.
+ """
+ try:
+ argv = _run_init(sys.argv if argv is None else argv)
+ try:
+ _run_main(main, argv)
+ except UsageError as error:
+ usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode)
+ except:
+ if FLAGS.pdb_post_mortem:
+ traceback.print_exc()
+ pdb.post_mortem()
+ raise
+ except Exception as e:
+ _call_exception_handlers(e)
+ raise
+
+
+def _run_init(argv):
+ """Does one-time initialization and re-parses flags on rerun."""
+ if _run_init.done:
+ return parse_flags_with_usage(argv)
+ command_name.make_process_name_useful()
+ # Set up absl logging handler.
+ logging.use_absl_handler()
+ argv = register_and_parse_flags_with_usage(argv=argv)
+ if faulthandler:
+ try:
+ faulthandler.enable()
+ except Exception: # pylint: disable=broad-except
+ # Some tests verify stderr output very closely, so don't print anything.
+ # Disabled faulthandler is a low-impact error.
+ pass
+ _run_init.done = True
+ return argv
+
+
+_run_init.done = False
+
+
+def usage(shorthelp=False, writeto_stdout=False, detailed_error=None,
+ exitcode=None):
+ """Writes __main__'s docstring to stderr with some help text.
+
+ Args:
+ shorthelp: bool, if True, prints only flags from this module,
+ rather than all flags.
+ writeto_stdout: bool, if True, writes help message to stdout,
+ rather than to stderr.
+ detailed_error: str, additional detail about why usage info was presented.
+ exitcode: optional integer, if set, exits with this status code after
+ writing help.
+ """
+ if writeto_stdout:
+ stdfile = sys.stdout
+ else:
+ stdfile = sys.stderr
+
+ doc = sys.modules['__main__'].__doc__
+ if not doc:
+ doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
+ doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
+ else:
+ # Replace all '%s' with sys.argv[0], and all '%%' with '%'.
+ num_specifiers = doc.count('%') - 2 * doc.count('%%')
+ try:
+ doc %= (sys.argv[0],) * num_specifiers
+ except (OverflowError, TypeError, ValueError):
+ # Just display the docstring as-is.
+ pass
+ if shorthelp:
+ flag_str = FLAGS.main_module_help()
+ else:
+ flag_str = str(FLAGS)
+ try:
+ stdfile.write(doc)
+ if flag_str:
+ stdfile.write('\nflags:\n')
+ stdfile.write(flag_str)
+ stdfile.write('\n')
+ if detailed_error is not None:
+ stdfile.write('\n%s\n' % detailed_error)
+ except IOError as e:
+ # We avoid printing a huge backtrace if we get EPIPE, because
+ # "foo.par --help | less" is a frequent use case.
+ if e.errno != errno.EPIPE:
+ raise
+ if exitcode is not None:
+ sys.exit(exitcode)
+
+
+class ExceptionHandler(object):
+ """Base exception handler from which other may inherit."""
+
+ def wants(self, exc):
+ """Returns whether this handler wants to handle the exception or not.
+
+ This base class returns True for all exceptions by default. Override in
+ subclass if it wants to be more selective.
+
+ Args:
+ exc: Exception, the current exception.
+ """
+ del exc # Unused.
+ return True
+
+ def handle(self, exc):
+ """Do something with the current exception.
+
+ Args:
+ exc: Exception, the current exception
+
+ This method must be overridden.
+ """
+ raise NotImplementedError()
+
+
+def install_exception_handler(handler):
+ """Installs an exception handler.
+
+ Args:
+ handler: ExceptionHandler, the exception handler to install.
+
+ Raises:
+ TypeError: Raised when the handler was not of the correct type.
+
+ All installed exception handlers will be called if main() exits via
+ an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt,
+ FlagsError or UsageError.
+ """
+ if not isinstance(handler, ExceptionHandler):
+ raise TypeError('handler of type %s does not inherit from ExceptionHandler'
+ % type(handler))
+ EXCEPTION_HANDLERS.append(handler)
diff --git a/third_party/py/abseil/absl/command_name.py b/third_party/py/abseil/absl/command_name.py
new file mode 100644
index 0000000000..3bf9fad3d2
--- /dev/null
+++ b/third_party/py/abseil/absl/command_name.py
@@ -0,0 +1,67 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A tiny stand alone library to change the kernel process name on Linux."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+
+# This library must be kept small and stand alone. It is used by small things
+# that require no extension modules.
+
+
+def make_process_name_useful():
+ """Sets the process name to something better than 'python' if possible."""
+ set_kernel_process_name(os.path.basename(sys.argv[0]))
+
+
+def set_kernel_process_name(name):
+ """Changes the Kernel's /proc/self/status process name on Linux.
+
+ The kernel name is NOT what will be shown by the ps or top command.
+ It is a 15 character string stored in the kernel's process table that
+ is included in the kernel log when a process is OOM killed.
+ The first 15 bytes of name are used. Non-ASCII unicode is replaced with '?'.
+
+ Does nothing if /proc/self/comm cannot be written or prctl() fails.
+
+ Args:
+ name: bytes|unicode, the Linux kernel's command name to set.
+ """
+ if not isinstance(name, bytes):
+ name = name.encode('ascii', 'replace')
+ try:
+ # This is preferred to using ctypes to try and call prctl() when possible.
+ with open('/proc/self/comm', 'wb') as proc_comm:
+ proc_comm.write(name[:15])
+ except EnvironmentError:
+ try:
+ import ctypes
+ except ImportError:
+ return # No ctypes.
+ try:
+ libc = ctypes.CDLL('libc.so.6')
+ except EnvironmentError:
+ return # No libc.so.6.
+ pr_set_name = ctypes.c_ulong(15) # linux/prctl.h PR_SET_NAME value.
+ zero = ctypes.c_ulong(0)
+ try:
+ libc.prctl(pr_set_name, name, zero, zero, zero)
+ # Ignore the prctl return value. Nothing we can do if it errored.
+ except AttributeError:
+ return # No prctl.
diff --git a/third_party/py/abseil/absl/flags/__init__.py b/third_party/py/abseil/absl/flags/__init__.py
new file mode 100644
index 0000000000..8f78711398
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/__init__.py
@@ -0,0 +1,148 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This package is used to define and parse command line flags.
+
+This package defines a *distributed* flag-definition policy: rather than
+an application having to define all flags in or near main(), each Python
+module defines flags that are useful to it. When one Python module
+imports another, it gains access to the other's flags. (This is
+implemented by having all modules share a common, global registry object
+containing all the flag information.)
+
+Flags are defined through the use of one of the DEFINE_xxx functions.
+The specific function used determines how the flag is parsed, checked,
+and optionally type-converted, when it's seen on the command line.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import getopt
+import os
+import re
+import sys
+import types
+import warnings
+
+from absl.flags import _argument_parser
+from absl.flags import _defines
+from absl.flags import _exceptions
+from absl.flags import _flag
+from absl.flags import _flagvalues
+from absl.flags import _helpers
+from absl.flags import _validators
+import six
+
+# Initialize the FLAGS_MODULE as early as possible.
+# It's only used by adopt_module_key_flags to take SPECIAL_FLAGS into account.
+_helpers.FLAGS_MODULE = sys.modules[__name__]
+
+# Add current module to disclaimed module ids.
+_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
+
+# DEFINE functions. They are explained in more details in the module doc string.
+# pylint: disable=invalid-name
+DEFINE = _defines.DEFINE
+DEFINE_flag = _defines.DEFINE_flag
+DEFINE_string = _defines.DEFINE_string
+DEFINE_boolean = _defines.DEFINE_boolean
+DEFINE_bool = DEFINE_boolean # Match C++ API.
+DEFINE_float = _defines.DEFINE_float
+DEFINE_integer = _defines.DEFINE_integer
+DEFINE_enum = _defines.DEFINE_enum
+DEFINE_list = _defines.DEFINE_list
+DEFINE_spaceseplist = _defines.DEFINE_spaceseplist
+DEFINE_multi = _defines.DEFINE_multi
+DEFINE_multi_string = _defines.DEFINE_multi_string
+DEFINE_multi_integer = _defines.DEFINE_multi_integer
+DEFINE_multi_float = _defines.DEFINE_multi_float
+DEFINE_multi_enum = _defines.DEFINE_multi_enum
+DEFINE_alias = _defines.DEFINE_alias
+# pylint: enable=invalid-name
+
+
+# Flag validators.
+register_validator = _validators.register_validator
+validator = _validators.validator
+register_multi_flags_validator = _validators.register_multi_flags_validator
+multi_flags_validator = _validators.multi_flags_validator
+mark_flag_as_required = _validators.mark_flag_as_required
+mark_flags_as_required = _validators.mark_flags_as_required
+mark_flags_as_mutual_exclusive = _validators.mark_flags_as_mutual_exclusive
+
+
+# Key flag related functions.
+declare_key_flag = _defines.declare_key_flag
+adopt_module_key_flags = _defines.adopt_module_key_flags
+disclaim_key_flags = _defines.disclaim_key_flags
+
+
+# Module exceptions.
+# pylint: disable=invalid-name
+Error = _exceptions.Error
+CantOpenFlagFileError = _exceptions.CantOpenFlagFileError
+DuplicateFlagError = _exceptions.DuplicateFlagError
+IllegalFlagValueError = _exceptions.IllegalFlagValueError
+UnrecognizedFlagError = _exceptions.UnrecognizedFlagError
+UnparsedFlagAccessError = _exceptions.UnparsedFlagAccessError
+ValidationError = _exceptions.ValidationError
+FlagNameConflictsWithMethodError = _exceptions.FlagNameConflictsWithMethodError
+
+
+# Public classes.
+Flag = _flag.Flag
+BooleanFlag = _flag.BooleanFlag
+EnumFlag = _flag.EnumFlag
+MultiFlag = _flag.MultiFlag
+FlagValues = _flagvalues.FlagValues
+ArgumentParser = _argument_parser.ArgumentParser
+BooleanParser = _argument_parser.BooleanParser
+EnumParser = _argument_parser.EnumParser
+ArgumentSerializer = _argument_parser.ArgumentSerializer
+FloatParser = _argument_parser.FloatParser
+IntegerParser = _argument_parser.IntegerParser
+BaseListParser = _argument_parser.BaseListParser
+ListParser = _argument_parser.ListParser
+ListSerializer = _argument_parser.ListSerializer
+CsvListSerializer = _argument_parser.CsvListSerializer
+WhitespaceSeparatedListParser = _argument_parser.WhitespaceSeparatedListParser
+# pylint: enable=invalid-name
+
+
+# Helper functions.
+get_help_width = _helpers.get_help_width
+text_wrap = _helpers.text_wrap
+flag_dict_to_args = _helpers.flag_dict_to_args
+doc_to_help = _helpers.doc_to_help
+
+
+# Special flags.
+_helpers.SPECIAL_FLAGS = FlagValues()
+
+DEFINE_string(
+ 'flagfile', '',
+ 'Insert flag definitions from the given file into the command line.',
+ _helpers.SPECIAL_FLAGS)
+
+DEFINE_string(
+ 'undefok', '',
+ 'comma-separated list of flag names that it is okay to specify '
+ 'on the command line even if the program does not define a flag '
+ 'with that name. IMPORTANT: flags in this list that have '
+ 'arguments MUST use the --flag=value format.', _helpers.SPECIAL_FLAGS)
+
+# The global FlagValues instance.
+FLAGS = _flagvalues.FLAGS
diff --git a/third_party/py/abseil/absl/flags/_argument_parser.py b/third_party/py/abseil/absl/flags/_argument_parser.py
new file mode 100644
index 0000000000..cb57b8d44b
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_argument_parser.py
@@ -0,0 +1,491 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains base classes used to parse and convert arguments.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import csv
+import io
+import string
+
+from absl.flags import _helpers
+import six
+
+
+def _is_integer_type(instance):
+ """Returns True if instance is an integer, and not a bool."""
+ return (isinstance(instance, six.integer_types) and
+ not isinstance(instance, bool))
+
+
+class _ArgumentParserCache(type):
+ """Metaclass used to cache and share argument parsers among flags."""
+
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ """Returns an instance of the argument parser cls.
+
+ This method overrides behavior of the __new__ methods in
+ all subclasses of ArgumentParser (inclusive). If an instance
+ for cls with the same set of arguments exists, this instance is
+ returned, otherwise a new instance is created.
+
+ If any keyword arguments are defined, or the values in args
+ are not hashable, this method always returns a new instance of
+ cls.
+
+ Args:
+ *args: Positional initializer arguments.
+ **kwargs: Initializer keyword arguments.
+
+ Returns:
+ An instance of cls, shared or new.
+ """
+ if kwargs:
+ return type.__call__(cls, *args, **kwargs)
+ else:
+ instances = cls._instances
+ key = (cls,) + tuple(args)
+ try:
+ return instances[key]
+ except KeyError:
+ # No cache entry for key exists, create a new one.
+ return instances.setdefault(key, type.__call__(cls, *args))
+ except TypeError:
+ # An object in args cannot be hashed, always return
+ # a new instance.
+ return type.__call__(cls, *args)
+
+
+class ArgumentParser(six.with_metaclass(_ArgumentParserCache, object)):
+ """Base class used to parse and convert arguments.
+
+ The parse() method checks to make sure that the string argument is a
+ legal value and convert it to a native type. If the value cannot be
+ converted, it should throw a 'ValueError' exception with a human
+ readable explanation of why the value is illegal.
+
+ Subclasses should also define a syntactic_help string which may be
+ presented to the user to describe the form of the legal values.
+
+ Argument parser classes must be stateless, since instances are cached
+ and shared between flags. Initializer arguments are allowed, but all
+ member variables must be derived from initializer arguments only.
+ """
+
+ syntactic_help = ''
+
+ def parse(self, argument):
+ """Parses the string argument and returns the native value.
+
+ By default it returns its argument unmodified.
+
+ Args:
+ argument: string argument passed in the commandline.
+
+ Raises:
+ ValueError: Raised when it fails to parse the argument.
+ TypeError: Raised when the argument has the wrong type.
+
+ Returns:
+ The parsed value in native type.
+ """
+ if not isinstance(argument, six.string_types):
+ raise TypeError('flag value must be a string, found "{}"'.format(
+ type(argument)))
+ return argument
+
+ def flag_type(self):
+ """Returns a string representing the type of the flag."""
+ return 'string'
+
+ def _custom_xml_dom_elements(self, doc):
+ """Returns a list of minidom.Element to add additional flag information.
+
+ Args:
+ doc: minidom.Document, the DOM document it should create nodes from.
+ """
+ del doc # Unused.
+ return []
+
+
+class ArgumentSerializer(object):
+ """Base class for generating string representations of a flag value."""
+
+ def serialize(self, value):
+ """Returns a serialized string of the value."""
+ return _helpers.str_or_unicode(value)
+
+
+class NumericParser(ArgumentParser):
+ """Parser of numeric values.
+
+ Parsed value may be bounded to a given upper and lower bound.
+ """
+
+ def is_outside_bounds(self, val):
+ """Returns whether the value is outside the bounds or not."""
+ return ((self.lower_bound is not None and val < self.lower_bound) or
+ (self.upper_bound is not None and val > self.upper_bound))
+
+ def parse(self, argument):
+ """See base class."""
+ val = self.convert(argument)
+ if self.is_outside_bounds(val):
+ raise ValueError('%s is not %s' % (val, self.syntactic_help))
+ return val
+
+ def _custom_xml_dom_elements(self, doc):
+ elements = []
+ if self.lower_bound is not None:
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'lower_bound', self.lower_bound))
+ if self.upper_bound is not None:
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'upper_bound', self.upper_bound))
+ return elements
+
+ def convert(self, argument):
+ """Returns the correct numeric value of argument.
+
+ Subclass must implement this method, and raise TypeError if argument is not
+ string or has the right numeric type.
+
+ Args:
+ argument: string argument passed in the commandline, or the numeric type.
+
+ Raises:
+ TypeError: Raised when argument is not a string or the right numeric type.
+ ValueError: Raised when failed to convert argument to the numeric value.
+ """
+ raise NotImplementedError
+
+
+class FloatParser(NumericParser):
+ """Parser of floating point values.
+
+ Parsed value may be bounded to a given upper and lower bound.
+ """
+ number_article = 'a'
+ number_name = 'number'
+ syntactic_help = ' '.join((number_article, number_name))
+
+ def __init__(self, lower_bound=None, upper_bound=None):
+ super(FloatParser, self).__init__()
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+ sh = self.syntactic_help
+ if lower_bound is not None and upper_bound is not None:
+ sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
+ elif lower_bound == 0:
+ sh = 'a non-negative %s' % self.number_name
+ elif upper_bound == 0:
+ sh = 'a non-positive %s' % self.number_name
+ elif upper_bound is not None:
+ sh = '%s <= %s' % (self.number_name, upper_bound)
+ elif lower_bound is not None:
+ sh = '%s >= %s' % (self.number_name, lower_bound)
+ self.syntactic_help = sh
+
+ def convert(self, argument):
+ """Returns the float value of argument."""
+ if (_is_integer_type(argument) or isinstance(argument, float) or
+ isinstance(argument, six.string_types)):
+ return float(argument)
+ else:
+ raise TypeError(
+ 'Expect argument to be a string, int, or float, found {}'.format(
+ type(argument)))
+
+ def flag_type(self):
+ """See base class."""
+ return 'float'
+
+
+class IntegerParser(NumericParser):
+ """Parser of an integer value.
+
+ Parsed value may be bounded to a given upper and lower bound.
+ """
+ number_article = 'an'
+ number_name = 'integer'
+ syntactic_help = ' '.join((number_article, number_name))
+
+ def __init__(self, lower_bound=None, upper_bound=None):
+ super(IntegerParser, self).__init__()
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+ sh = self.syntactic_help
+ if lower_bound is not None and upper_bound is not None:
+ sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
+ elif lower_bound == 1:
+ sh = 'a positive %s' % self.number_name
+ elif upper_bound == -1:
+ sh = 'a negative %s' % self.number_name
+ elif lower_bound == 0:
+ sh = 'a non-negative %s' % self.number_name
+ elif upper_bound == 0:
+ sh = 'a non-positive %s' % self.number_name
+ elif upper_bound is not None:
+ sh = '%s <= %s' % (self.number_name, upper_bound)
+ elif lower_bound is not None:
+ sh = '%s >= %s' % (self.number_name, lower_bound)
+ self.syntactic_help = sh
+
+ def convert(self, argument):
+ """Returns the int value of argument."""
+ if _is_integer_type(argument):
+ return argument
+ elif isinstance(argument, six.string_types):
+ base = 10
+ if len(argument) > 2 and argument[0] == '0':
+ if argument[1] == 'o':
+ base = 8
+ elif argument[1] == 'x':
+ base = 16
+ return int(argument, base)
+ else:
+ raise TypeError('Expect argument to be a string or int, found {}'.format(
+ type(argument)))
+
+ def flag_type(self):
+ """See base class."""
+ return 'int'
+
+
+class BooleanParser(ArgumentParser):
+ """Parser of boolean values."""
+
+ def parse(self, argument):
+ """See base class."""
+ if isinstance(argument, str):
+ if argument.lower() in ('true', 't', '1'):
+ return True
+ elif argument.lower() in ('false', 'f', '0'):
+ return False
+ elif isinstance(argument, six.integer_types):
+ # Only allow bool or integer 0, 1.
+ # Note that float 1.0 == True, 0.0 == False.
+ bool_value = bool(argument)
+ if argument == bool_value:
+ return bool_value
+
+ raise ValueError('Non-boolean argument to boolean flag', argument)
+
+ def flag_type(self):
+ """See base class."""
+ return 'bool'
+
+
+class EnumParser(ArgumentParser):
+ """Parser of a string enum value (a string value from a given set)."""
+
+ def __init__(self, enum_values, case_sensitive=True):
+ """Initializes EnumParser.
+
+ Args:
+ enum_values: [str], a non-empty list of string values in the enum.
+ case_sensitive: bool, whether or not the enum is to be case-sensitive.
+
+ Raises:
+ ValueError: When enum_values is empty.
+ """
+ if not enum_values:
+ raise ValueError(
+ 'enum_values cannot be empty, found "{}"'.format(enum_values))
+ super(EnumParser, self).__init__()
+ self.enum_values = enum_values
+ self.case_sensitive = case_sensitive
+
+ def parse(self, argument):
+ """Determines validity of argument and returns the correct element of enum.
+
+ Args:
+ argument: str, the supplied flag value.
+
+ Returns:
+ The first matching element from enum_values.
+
+ Raises:
+ ValueError: Raised when argument didn't match anything in enum.
+ """
+ if self.case_sensitive:
+ if argument not in self.enum_values:
+ raise ValueError('value should be one of <%s>' %
+ '|'.join(self.enum_values))
+ else:
+ return argument
+ else:
+ if argument.upper() not in [value.upper() for value in self.enum_values]:
+ raise ValueError('value should be one of <%s>' %
+ '|'.join(self.enum_values))
+ else:
+ return [value for value in self.enum_values
+ if value.upper() == argument.upper()][0]
+
+ def flag_type(self):
+ """See base class."""
+ return 'string enum'
+
+
+class ListSerializer(ArgumentSerializer):
+
+ def __init__(self, list_sep):
+ self.list_sep = list_sep
+
+ def serialize(self, value):
+ """See base class."""
+ return self.list_sep.join([_helpers.str_or_unicode(x) for x in value])
+
+
+class CsvListSerializer(ArgumentSerializer):
+
+ def __init__(self, list_sep):
+ self.list_sep = list_sep
+
+ def serialize(self, value):
+ """Serializes a list as a CSV string or unicode."""
+ if six.PY2:
+ # In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
+ output = io.BytesIO()
+ csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
+ serialized_value = output.getvalue().decode('utf-8').strip()
+ else:
+ # In Python3 csv.writer expects a text stream.
+ output = io.StringIO()
+ csv.writer(output).writerow([str(x) for x in value])
+ serialized_value = output.getvalue().strip()
+
+ # We need the returned value to be pure ascii or Unicodes so that
+ # when the xml help is generated they are usefully encodable.
+ return _helpers.str_or_unicode(serialized_value)
+
+
+class BaseListParser(ArgumentParser):
+ """Base class for a parser of lists of strings.
+
+ To extend, inherit from this class; from the subclass __init__, call
+
+ BaseListParser.__init__(self, token, name)
+
+ where token is a character used to tokenize, and name is a description
+ of the separator.
+ """
+
+ def __init__(self, token=None, name=None):
+ assert name
+ super(BaseListParser, self).__init__()
+ self._token = token
+ self._name = name
+ self.syntactic_help = 'a %s separated list' % self._name
+
+ def parse(self, argument):
+ """See base class."""
+ if isinstance(argument, list):
+ return argument
+ elif not argument:
+ return []
+ else:
+ return [s.strip() for s in argument.split(self._token)]
+
+ def flag_type(self):
+ """See base class."""
+ return '%s separated list of strings' % self._name
+
+
+class ListParser(BaseListParser):
+ """Parser for a comma-separated list of strings."""
+
+ def __init__(self):
+ super(ListParser, self).__init__(',', 'comma')
+
+ def parse(self, argument):
+ """Parses argument as comma-separated list of strings."""
+ if isinstance(argument, list):
+ return argument
+ elif not argument:
+ return []
+ else:
+ try:
+ return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
+ except csv.Error as e:
+ # Provide a helpful report for case like
+ # --listflag="$(printf 'hello,\nworld')"
+ # IOW, list flag values containing naked newlines. This error
+ # was previously "reported" by allowing csv.Error to
+ # propagate.
+ raise ValueError('Unable to parse the value %r as a %s: %s'
+ % (argument, self.flag_type(), e))
+
+ def _custom_xml_dom_elements(self, doc):
+ elements = super(ListParser, self)._custom_xml_dom_elements(doc)
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'list_separator', repr(',')))
+ return elements
+
+
+class WhitespaceSeparatedListParser(BaseListParser):
+ """Parser for a whitespace-separated list of strings."""
+
+ def __init__(self, comma_compat=False):
+ """Initializer.
+
+ Args:
+ comma_compat: bool, whether to support comma as an additional separator.
+ If False then only whitespace is supported. This is intended only for
+ backwards compatibility with flags that used to be comma-separated.
+ """
+ self._comma_compat = comma_compat
+ name = 'whitespace or comma' if self._comma_compat else 'whitespace'
+ super(WhitespaceSeparatedListParser, self).__init__(None, name)
+
+ def parse(self, argument):
+ """Parses argument as whitespace-separated list of strings.
+
+ It also parses argument as comma-separated list of strings if requested.
+
+ Args:
+ argument: string argument passed in the commandline.
+
+ Returns:
+ [str], the parsed flag value.
+ """
+ if isinstance(argument, list):
+ return argument
+ elif not argument:
+ return []
+ else:
+ if self._comma_compat:
+ argument = argument.replace(',', ' ')
+ return argument.split()
+
+ def _custom_xml_dom_elements(self, doc):
+ elements = super(WhitespaceSeparatedListParser, self
+ )._custom_xml_dom_elements(doc)
+ separators = list(string.whitespace)
+ if self._comma_compat:
+ separators.append(',')
+ separators.sort()
+ for sep_char in separators:
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'list_separator', repr(sep_char)))
+ return elements
diff --git a/third_party/py/abseil/absl/flags/_defines.py b/third_party/py/abseil/absl/flags/_defines.py
new file mode 100644
index 0000000000..35680db0d7
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_defines.py
@@ -0,0 +1,559 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This modules contains flags DEFINE functions.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+import sys
+import types
+
+from absl.flags import _argument_parser
+from absl.flags import _exceptions
+from absl.flags import _flag
+from absl.flags import _flagvalues
+from absl.flags import _helpers
+from absl.flags import _validators
+
+
+_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
+
+
+def _register_bounds_validator_if_needed(parser, name, flag_values):
+ """Enforces lower and upper bounds for numeric flags.
+
+ Args:
+ parser: NumericParser (either FloatParser or IntegerParser), provides lower
+ and upper bounds, and help text to display.
+ name: str, name of the flag
+ flag_values: FlagValues.
+ """
+ if parser.lower_bound is not None or parser.upper_bound is not None:
+
+ def checker(value):
+ if value is not None and parser.is_outside_bounds(value):
+ message = '%s is not %s' % (value, parser.syntactic_help)
+ raise _exceptions.ValidationError(message)
+ return True
+
+ _validators.register_validator(name, checker, flag_values=flag_values)
+
+
+def DEFINE(parser, name, default, help, flag_values=_flagvalues.FLAGS, # pylint: disable=redefined-builtin,invalid-name
+ serializer=None, module_name=None, **args):
+ """Registers a generic Flag object.
+
+ NOTE: in the docstrings of all DEFINE* functions, "registers" is short
+ for "creates a new flag and registers it".
+
+ Auxiliary function: clients should use the specialized DEFINE_<type>
+ function instead.
+
+ Args:
+ parser: ArgumentParser, used to parse the flag arguments.
+ name: str, the flag name.
+ default: The default value of the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ serializer: ArgumentSerializer, the flag serializer instance.
+ module_name: str, the name of the Python module declaring this flag.
+ If not provided, it will be computed using the stack trace of this call.
+ **args: dict, the extra keyword args that are passed to Flag __init__.
+ """
+ DEFINE_flag(_flag.Flag(parser, serializer, name, default, help, **args),
+ flag_values, module_name)
+
+
+def DEFINE_flag(flag, flag_values=_flagvalues.FLAGS, module_name=None): # pylint: disable=invalid-name
+ """Registers a 'Flag' object with a 'FlagValues' object.
+
+ By default, the global FLAGS 'FlagValue' object is used.
+
+ Typical users will use one of the more specialized DEFINE_xxx
+ functions, such as DEFINE_string or DEFINE_integer. But developers
+ who need to create Flag objects themselves should use this function
+ to register their flags.
+
+ Args:
+ flag: Flag, a flag that is key to the module.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ module_name: str, the name of the Python module declaring this flag.
+ If not provided, it will be computed using the stack trace of this call.
+ """
+ # Copying the reference to flag_values prevents pychecker warnings.
+ fv = flag_values
+ fv[flag.name] = flag
+ # Tell flag_values who's defining the flag.
+ if module_name:
+ module = sys.modules.get(module_name)
+ else:
+ module, module_name = _helpers.get_calling_module_object_and_name()
+ flag_values.register_flag_by_module(module_name, flag)
+ flag_values.register_flag_by_module_id(id(module), flag)
+
+
+def _internal_declare_key_flags(
+ flag_names, flag_values=_flagvalues.FLAGS, key_flag_values=None):
+ """Declares a flag as key for the calling module.
+
+ Internal function. User code should call declare_key_flag or
+ adopt_module_key_flags instead.
+
+ Args:
+ flag_names: [str], a list of strings that are names of already-registered
+ Flag objects.
+ flag_values: FlagValues, the FlagValues instance with which the flags listed
+ in flag_names have registered (the value of the flag_values
+ argument from the DEFINE_* calls that defined those flags).
+ This should almost never need to be overridden.
+ key_flag_values: FlagValues, the FlagValues instance that (among possibly
+ many other things) keeps track of the key flags for each module.
+ Default None means "same as flag_values". This should almost
+ never need to be overridden.
+
+ Raises:
+ UnrecognizedFlagError: Raised when the flag is not defined.
+ """
+ key_flag_values = key_flag_values or flag_values
+
+ module = _helpers.get_calling_module()
+
+ for flag_name in flag_names:
+ flag = flag_values[flag_name]
+ key_flag_values.register_key_flag_for_module(module, flag)
+
+
+def declare_key_flag(flag_name, flag_values=_flagvalues.FLAGS):
+ """Declares one flag as key to the current module.
+
+ Key flags are flags that are deemed really important for a module.
+ They are important when listing help messages; e.g., if the
+ --helpshort command-line flag is used, then only the key flags of the
+ main module are listed (instead of all flags, as in the case of
+ --helpfull).
+
+ Sample usage:
+
+ flags.declare_key_flag('flag_1')
+
+ Args:
+ flag_name: str, the name of an already declared flag.
+ (Redeclaring flags as key, including flags implicitly key
+ because they were declared in this module, is a no-op.)
+ flag_values: FlagValues, the FlagValues instance in which the flag will
+ be declared as a key flag. This should almost never need to be
+ overridden.
+
+ Raises:
+ ValueError: Raised if flag_name not defined as a Python flag.
+ """
+ if flag_name in _helpers.SPECIAL_FLAGS:
+ # Take care of the special flags, e.g., --flagfile, --undefok.
+ # These flags are defined in _SPECIAL_FLAGS, and are treated
+ # specially during flag parsing, taking precedence over the
+ # user-defined flags.
+ _internal_declare_key_flags([flag_name],
+ flag_values=_helpers.SPECIAL_FLAGS,
+ key_flag_values=flag_values)
+ return
+ try:
+ _internal_declare_key_flags([flag_name], flag_values=flag_values)
+ except KeyError:
+ raise ValueError(
+ 'Key flag --%s needs to be defined in Python' % flag_name)
+
+
+def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS):
+ """Declares that all flags key to a module are key to the current module.
+
+ Args:
+ module: module, the module object from which all key flags will be declared
+ as key flags to the current module.
+ flag_values: FlagValues, the FlagValues instance in which the flags will
+ be declared as key flags. This should almost never need to be
+ overridden.
+
+ Raises:
+ Error: Raised when given an argument that is a module name (a string),
+ instead of a module object.
+ """
+ if not isinstance(module, types.ModuleType):
+ raise _exceptions.Error('Expected a module object, not %r.' % (module,))
+ _internal_declare_key_flags(
+ [f.name for f in flag_values.get_key_flags_for_module(module.__name__)],
+ flag_values=flag_values)
+ # If module is this flag module, take _helpers.SPECIAL_FLAGS into account.
+ if module == _helpers.FLAGS_MODULE:
+ _internal_declare_key_flags(
+ # As we associate flags with get_calling_module_object_and_name(), the
+ # special flags defined in this module are incorrectly registered with
+ # a different module. So, we can't use get_key_flags_for_module.
+ # Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private
+ # FlagValues, where no other module should register flags).
+ [_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS],
+ flag_values=_helpers.SPECIAL_FLAGS,
+ key_flag_values=flag_values)
+
+
+def disclaim_key_flags():
+ """Declares that the current module will not define any more key flags.
+
+ Normally, the module that calls the DEFINE_xxx functions claims the
+ flag to be its key flag. This is undesirable for modules that
+ define additional DEFINE_yyy functions with its own flag parsers and
+ serializers, since that module will accidentally claim flags defined
+ by DEFINE_yyy as its key flags. After calling this function, the
+ module disclaims flag definitions thereafter, so the key flags will
+ be correctly attributed to the caller of DEFINE_yyy.
+
+ After calling this function, the module will not be able to define
+ any more flags. This function will affect all FlagValues objects.
+ """
+ globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access
+ module, _ = _helpers.get_module_object_and_name(globals_for_caller)
+ _helpers.disclaim_module_ids.add(id(module))
+
+
+def DEFINE_string( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value can be any string."""
+ parser = _argument_parser.ArgumentParser()
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE(parser, name, default, help, flag_values, serializer, **args)
+
+
+def DEFINE_boolean( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, flag_values=_flagvalues.FLAGS, module_name=None,
+ **args):
+ """Registers a boolean flag.
+
+ Such a boolean flag does not take an argument. If a user wants to
+ specify a false value explicitly, the long option beginning with 'no'
+ must be used: i.e. --noflag
+
+ This flag will have a value of None, True or False. None is possible
+ if default=None and the user does not specify the flag on the command
+ line.
+
+ Args:
+ name: str, the flag name.
+ default: bool|str|None, the default value of the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ module_name: str, the name of the Python module declaring this flag.
+ If not provided, it will be computed using the stack trace of this call.
+ **args: dict, the extra keyword args that are passed to Flag __init__.
+ """
+ DEFINE_flag(_flag.BooleanFlag(name, default, help, **args),
+ flag_values, module_name)
+
+
+def DEFINE_float( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, lower_bound=None, upper_bound=None,
+ flag_values=_flagvalues.FLAGS, **args): # pylint: disable=invalid-name
+ """Registers a flag whose value must be a float.
+
+ If lower_bound or upper_bound are set, then this flag must be
+ within the given range.
+
+ Args:
+ name: str, the flag name.
+ default: float|str|None, the default value of the flag.
+ help: str, the help message.
+ lower_bound: float, min value of the flag.
+ upper_bound: float, max value of the flag.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: dict, the extra keyword args that are passed to DEFINE.
+ """
+ parser = _argument_parser.FloatParser(lower_bound, upper_bound)
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE(parser, name, default, help, flag_values, serializer, **args)
+ _register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
+
+
+def DEFINE_integer( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, lower_bound=None, upper_bound=None,
+ flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value must be an integer.
+
+ If lower_bound, or upper_bound are set, then this flag must be
+ within the given range.
+
+ Args:
+ name: str, the flag name.
+ default: int|str|None, the default value of the flag.
+ help: str, the help message.
+ lower_bound: int, min value of the flag.
+ upper_bound: int, max value of the flag.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: dict, the extra keyword args that are passed to DEFINE.
+ """
+ parser = _argument_parser.IntegerParser(lower_bound, upper_bound)
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE(parser, name, default, help, flag_values, serializer, **args)
+ _register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
+
+
+def DEFINE_enum( # pylint: disable=invalid-name,redefined-builtin
+ name, default, enum_values, help, flag_values=_flagvalues.FLAGS,
+ module_name=None, **args):
+ """Registers a flag whose value can be any string from enum_values.
+
+ Args:
+ name: str, the flag name.
+ default: str|None, the default value of the flag.
+ enum_values: [str], a non-empty list of strings with the possible values for
+ the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ module_name: str, the name of the Python module declaring this flag.
+ If not provided, it will be computed using the stack trace of this call.
+ **args: dict, the extra keyword args that are passed to Flag __init__.
+ """
+ DEFINE_flag(_flag.EnumFlag(name, default, help, enum_values, **args),
+ flag_values, module_name)
+
+
+def DEFINE_list( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value is a comma-separated list of strings.
+
+ The flag value is parsed with a CSV parser.
+
+ Args:
+ name: str, the flag name.
+ default: list|str|None, the default value of the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.ListParser()
+ serializer = _argument_parser.CsvListSerializer(',')
+ DEFINE(parser, name, default, help, flag_values, serializer, **args)
+
+
+def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, comma_compat=False, flag_values=_flagvalues.FLAGS,
+ **args):
+ """Registers a flag whose value is a whitespace-separated list of strings.
+
+ Any whitespace can be used as a separator.
+
+ Args:
+ name: str, the flag name.
+ default: list|str|None, the default value of the flag.
+ help: str, the help message.
+ comma_compat: bool - Whether to support comma as an additional separator.
+ If false then only whitespace is supported. This is intended only for
+ backwards compatibility with flags that used to be comma-separated.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.WhitespaceSeparatedListParser(
+ comma_compat=comma_compat)
+ serializer = _argument_parser.ListSerializer(' ')
+ DEFINE(parser, name, default, help, flag_values, serializer, **args)
+
+
+def DEFINE_multi( # pylint: disable=invalid-name,redefined-builtin
+ parser, serializer, name, default, help, flag_values=_flagvalues.FLAGS,
+ module_name=None, **args):
+ """Registers a generic MultiFlag that parses its args with a given parser.
+
+ Auxiliary function. Normal users should NOT use it directly.
+
+ Developers who need to create their own 'Parser' classes for options
+ which can appear multiple times can call this module function to
+ register their flags.
+
+ Args:
+ parser: ArgumentParser, used to parse the flag arguments.
+ serializer: ArgumentSerializer, the flag serializer instance.
+ name: str, the flag name.
+ default: list|str|None, the default value of the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ module_name: A string, the name of the Python module declaring this flag.
+ If not provided, it will be computed using the stack trace of this call.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ DEFINE_flag(_flag.MultiFlag(parser, serializer, name, default, help, **args),
+ flag_values, module_name)
+
+
+def DEFINE_multi_string( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value can be a list of any strings.
+
+ Use the flag on the command line multiple times to place multiple
+ string values into the list. The 'default' may be a single string
+ (which will be converted into a single-element list) or a list of
+ strings.
+
+
+ Args:
+ name: str, the flag name.
+ default: [str]|str|None, the default value of the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.ArgumentParser()
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
+
+
+def DEFINE_multi_integer( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, lower_bound=None, upper_bound=None,
+ flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value can be a list of arbitrary integers.
+
+ Use the flag on the command line multiple times to place multiple
+ integer values into the list. The 'default' may be a single integer
+ (which will be converted into a single-element list) or a list of
+ integers.
+
+ Args:
+ name: str, the flag name.
+ default: [int]|str|None, the default value of the flag.
+ help: str, the help message.
+ lower_bound: int, min values of the flag.
+ upper_bound: int, max values of the flag.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.IntegerParser(lower_bound, upper_bound)
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
+
+
+def DEFINE_multi_float( # pylint: disable=invalid-name,redefined-builtin
+ name, default, help, lower_bound=None, upper_bound=None,
+ flag_values=_flagvalues.FLAGS, **args):
+ """Registers a flag whose value can be a list of arbitrary floats.
+
+ Use the flag on the command line multiple times to place multiple
+ float values into the list. The 'default' may be a single float
+ (which will be converted into a single-element list) or a list of
+ floats.
+
+ Args:
+ name: str, the flag name.
+ default: [float]|str|None, the default value of the flag.
+ help: str, the help message.
+ lower_bound: float, min values of the flag.
+ upper_bound: float, max values of the flag.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.FloatParser(lower_bound, upper_bound)
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
+
+
+def DEFINE_multi_enum( # pylint: disable=invalid-name,redefined-builtin
+ name, default, enum_values, help, flag_values=_flagvalues.FLAGS,
+ case_sensitive=True, **args):
+ """Registers a flag whose value can be a list strings from enum_values.
+
+ Use the flag on the command line multiple times to place multiple
+ enum values into the list. The 'default' may be a single string
+ (which will be converted into a single-element list) or a list of
+ strings.
+
+ Args:
+ name: str, the flag name.
+ default: [str]|str|None, the default value of the flag.
+ enum_values: [str], a non-empty list of strings with the possible values for
+ the flag.
+ help: str, the help message.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ case_sensitive: Whether or not the enum is to be case-sensitive.
+ **args: Dictionary with extra keyword args that are passed to the
+ Flag __init__.
+ """
+ parser = _argument_parser.EnumParser(enum_values, case_sensitive)
+ serializer = _argument_parser.ArgumentSerializer()
+ DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
+
+
+def DEFINE_alias(name, original_name, flag_values=_flagvalues.FLAGS, # pylint: disable=invalid-name
+ module_name=None):
+ """Defines an alias flag for an existing one.
+
+ Args:
+ name: str, the flag name.
+ original_name: str, the original flag name.
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be registered. This should almost never need to be overridden.
+ module_name: A string, the name of the module that defines this flag.
+
+ Raises:
+ flags.FlagError:
+ UnrecognizedFlagError: if the referenced flag doesn't exist.
+ DuplicateFlagError: if the alias name has been used by some existing flag.
+ """
+ if original_name not in flag_values:
+ raise _exceptions.UnrecognizedFlagError(original_name)
+ flag = flag_values[original_name]
+
+ class _Parser(_argument_parser.ArgumentParser):
+ """The parser for the alias flag calls the original flag parser."""
+
+ def parse(self, argument):
+ flag.parse(argument)
+ return flag.value
+
+ class _FlagAlias(_flag.Flag):
+ """Overrides Flag class so alias value is copy of original flag value."""
+
+ @property
+ def value(self):
+ return flag.value
+
+ @value.setter
+ def value(self, value):
+ flag.value = value
+
+ help_msg = 'Alias for --%s.' % flag.name
+ # If alias_name has been used, flags.DuplicatedFlag will be raised.
+ DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default,
+ help_msg, boolean=flag.boolean),
+ flag_values, module_name)
diff --git a/third_party/py/abseil/absl/flags/_exceptions.py b/third_party/py/abseil/absl/flags/_exceptions.py
new file mode 100644
index 0000000000..254eb9b3ce
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_exceptions.py
@@ -0,0 +1,112 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Exception classes in ABSL flags library.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import sys
+
+from absl.flags import _helpers
+
+
+_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
+
+
+class Error(Exception):
+ """The base class for all flags errors."""
+
+
+class CantOpenFlagFileError(Error):
+ """Raised when flagfile fails to open.
+
+ E.g. the file doesn't exist, or has wrong permissions.
+ """
+
+
+class DuplicateFlagError(Error):
+ """Raised if there is a flag naming conflict."""
+
+ @classmethod
+ def from_flag(cls, flagname, flag_values, other_flag_values=None):
+ """Creates a DuplicateFlagError by providing flag name and values.
+
+ Args:
+ flagname: str, the name of the flag being redefined.
+ flag_values: FlagValues, the FlagValues instance containing the first
+ definition of flagname.
+ other_flag_values: FlagValues, if it is not None, it should be the
+ FlagValues object where the second definition of flagname occurs.
+ If it is None, we assume that we're being called when attempting
+ to create the flag a second time, and we use the module calling
+ this one as the source of the second definition.
+
+ Returns:
+ An instance of DuplicateFlagError.
+ """
+ first_module = flag_values.find_module_defining_flag(
+ flagname, default='<unknown>')
+ if other_flag_values is None:
+ second_module = _helpers.get_calling_module()
+ else:
+ second_module = other_flag_values.find_module_defining_flag(
+ flagname, default='<unknown>')
+ flag_summary = flag_values[flagname].help
+ msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
+ "Description from first occurrence: %s") % (
+ flagname, first_module, second_module, flag_summary)
+ return cls(msg)
+
+
+class IllegalFlagValueError(Error):
+ """Raised when the flag command line argument is illegal."""
+
+
+class UnrecognizedFlagError(Error):
+ """Raised when a flag is unrecognized.
+
+ Attributes:
+ flagname: str, the name of the unrecognized flag.
+ flagvalue: The value of the flag, empty if the flag is not defined.
+ """
+
+ def __init__(self, flagname, flagvalue='', suggestions=None):
+ self.flagname = flagname
+ self.flagvalue = flagvalue
+ if suggestions:
+ # Space before the question mark is intentional to not include it in the
+ # selection when copy-pasting the suggestion from (some) terminals.
+ tip = '. Did you mean: %s ?' % ', '.join(suggestions)
+ else:
+ tip = ''
+ super(UnrecognizedFlagError, self).__init__(
+ 'Unknown command line flag \'%s\'%s' % (flagname, tip))
+
+
+class UnparsedFlagAccessError(Error):
+ """Raised when accessing the flag value from unparsed FlagValues."""
+
+
+class ValidationError(Error):
+ """Raised when flag validator constraint is not satisfied."""
+
+
+class FlagNameConflictsWithMethodError(Error):
+ """Raised when a flag name conflicts with FlagValues methods."""
diff --git a/third_party/py/abseil/absl/flags/_flag.py b/third_party/py/abseil/absl/flags/_flag.py
new file mode 100644
index 0000000000..80845efa9b
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_flag.py
@@ -0,0 +1,391 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains Flag class - information about single command-line flag.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import functools
+
+from absl.flags import _argument_parser
+from absl.flags import _exceptions
+from absl.flags import _helpers
+
+
+@functools.total_ordering
+class Flag(object):
+ """Information about a command-line flag.
+
+ 'Flag' objects define the following fields:
+ .name - the name for this flag;
+ .default - the default value for this flag;
+ .default_unparsed - the unparsed default value for this flag.
+ .default_as_str - default value as repr'd string, e.g., "'true'" (or None);
+ .value - the most recent parsed value of this flag; set by parse();
+ .help - a help string or None if no help is available;
+ .short_name - the single letter alias for this flag (or None);
+ .boolean - if 'true', this flag does not accept arguments;
+ .present - true if this flag was parsed from command line flags;
+ .parser - an ArgumentParser object;
+ .serializer - an ArgumentSerializer object;
+ .allow_override - the flag may be redefined without raising an error, and
+ newly defined flag overrides the old one.
+ .allow_override_cpp - the flag may be redefined in C++ without raising an
+ error, value "transferred" to C++, and the flag is
+ replaced by the C++ flag after init;
+ .allow_hide_cpp - the flag may be redefined despite hiding a C++ flag with
+ the same name;
+ .using_default_value - the flag value has not been set by user;
+ .allow_overwrite - the flag may be parsed more than once without raising
+ an error, the last set value will be used;
+ .allow_using_method_names - whether this flag can be defined even if it has
+ a name that conflicts with a FlagValues method.
+
+ The only public method of a 'Flag' object is parse(), but it is
+ typically only called by a 'FlagValues' object. The parse() method is
+ a thin wrapper around the 'ArgumentParser' parse() method. The parsed
+ value is saved in .value, and the .present attribute is updated. If
+ this flag was already present, an Error is raised.
+
+ parse() is also called during __init__ to parse the default value and
+ initialize the .value attribute. This enables other python modules to
+ safely use flags even if the __main__ module neglects to parse the
+ command line arguments. The .present attribute is cleared after
+ __init__ parsing. If the default value is set to None, then the
+ __init__ parsing step is skipped and the .value attribute is
+ initialized to None.
+
+ Note: The default value is also presented to the user in the help
+ string, so it is important that it be a legal value for this flag.
+ """
+
+ def __init__(self, parser, serializer, name, default, help_string,
+ short_name=None, boolean=False, allow_override=False,
+ allow_override_cpp=False, allow_hide_cpp=False,
+ allow_overwrite=True, allow_using_method_names=False):
+ self.name = name
+
+ if not help_string:
+ help_string = '(no help available)'
+
+ self.help = help_string
+ self.short_name = short_name
+ self.boolean = boolean
+ self.present = 0
+ self.parser = parser
+ self.serializer = serializer
+ self.allow_override = allow_override
+ self.allow_override_cpp = allow_override_cpp
+ self.allow_hide_cpp = allow_hide_cpp
+ self.allow_overwrite = allow_overwrite
+ self.allow_using_method_names = allow_using_method_names
+
+ self.using_default_value = True
+ self._value = None
+ self.validators = []
+ if allow_hide_cpp and allow_override_cpp:
+ raise _exceptions.Error(
+ "Can't have both allow_hide_cpp (means use Python flag) and "
+ 'allow_override_cpp (means use C++ flag after InitGoogle)')
+
+ self._set_default(default)
+
+ @property
+ def value(self):
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ self._value = value
+
+ def __hash__(self):
+ return hash(id(self))
+
+ def __eq__(self, other):
+ return self is other
+
+ def __lt__(self, other):
+ if isinstance(other, Flag):
+ return id(self) < id(other)
+ return NotImplemented
+
+ def _get_parsed_value_as_string(self, value):
+ """Returns parsed flag value as string."""
+ if value is None:
+ return None
+ if self.serializer:
+ return repr(self.serializer.serialize(value))
+ if self.boolean:
+ if value:
+ return repr('true')
+ else:
+ return repr('false')
+ return repr(_helpers.str_or_unicode(value))
+
+ def parse(self, argument):
+ """Parses string and sets flag value.
+
+ Args:
+ argument: str or the correct flag value type, argument to be parsed.
+ """
+ if self.present and not self.allow_overwrite:
+ raise _exceptions.IllegalFlagValueError(
+ 'flag --%s=%s: already defined as %s' % (
+ self.name, argument, self.value))
+ self.value = self._parse(argument)
+ self.present += 1
+
+ def _parse(self, argument):
+ """Internal parse function.
+
+ It returns the parsed value, and does not modify class states.
+
+ Args:
+ argument: str or the correct flag value type, argument to be parsed.
+
+ Returns:
+ The parsed value.
+ """
+ try:
+ return self.parser.parse(argument)
+ except (TypeError, ValueError) as e: # Recast as IllegalFlagValueError.
+ raise _exceptions.IllegalFlagValueError(
+ 'flag --%s=%s: %s' % (self.name, argument, e))
+
+ def unparse(self):
+ self.value = self.default
+ self.using_default_value = True
+ self.present = 0
+
+ def serialize(self):
+ if self.value is None:
+ return ''
+ if self.boolean:
+ if self.value:
+ return '--%s' % self.name
+ else:
+ return '--no%s' % self.name
+ else:
+ if not self.serializer:
+ raise _exceptions.Error(
+ 'Serializer not present for flag %s' % self.name)
+ return '--%s=%s' % (self.name, self.serializer.serialize(self.value))
+
+ def _set_default(self, value):
+ """Changes the default value (and current value too) for this Flag."""
+ self.default_unparsed = value
+ if value is None:
+ self.default = None
+ else:
+ self.default = self._parse(value)
+ self.default_as_str = self._get_parsed_value_as_string(self.default)
+ if self.using_default_value:
+ self.value = self.default
+
+ def flag_type(self):
+ """Returns a str that describes the type of the flag.
+
+ NOTE: we use strings, and not the types.*Type constants because
+ our flags can have more exotic types, e.g., 'comma separated list
+ of strings', 'whitespace separated list of strings', etc.
+ """
+ return self.parser.flag_type()
+
+ def _create_xml_dom_element(self, doc, module_name, is_key=False):
+ """Returns an XML element that contains this flag's information.
+
+ This is information that is relevant to all flags (e.g., name,
+ meaning, etc.). If you defined a flag that has some other pieces of
+ info, then please override _ExtraXMLInfo.
+
+ Please do NOT override this method.
+
+ Args:
+ doc: minidom.Document, the DOM document it should create nodes from.
+ module_name: str,, the name of the module that defines this flag.
+ is_key: boolean, True iff this flag is key for main module.
+
+ Returns:
+ A minidom.Element instance.
+ """
+ element = doc.createElement('flag')
+ if is_key:
+ element.appendChild(_helpers.create_xml_dom_element(doc, 'key', 'yes'))
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'file', module_name))
+ # Adds flag features that are relevant for all flags.
+ element.appendChild(_helpers.create_xml_dom_element(doc, 'name', self.name))
+ if self.short_name:
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'short_name', self.short_name))
+ if self.help:
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'meaning', self.help))
+ # The default flag value can either be represented as a string like on the
+ # command line, or as a Python object. We serialize this value in the
+ # latter case in order to remain consistent.
+ if self.serializer and not isinstance(self.default, str):
+ if self.default is not None:
+ default_serialized = self.serializer.serialize(self.default)
+ else:
+ default_serialized = ''
+ else:
+ default_serialized = self.default
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'default', default_serialized))
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'current', self.value))
+ element.appendChild(_helpers.create_xml_dom_element(
+ doc, 'type', self.flag_type()))
+ # Adds extra flag features this flag may have.
+ for e in self._extra_xml_dom_elements(doc):
+ element.appendChild(e)
+ return element
+
+ def _extra_xml_dom_elements(self, doc):
+ """Returns extra info about this flag in XML.
+
+ "Extra" means "not already included by _create_xml_dom_element above."
+
+ Args:
+ doc: minidom.Document, the DOM document it should create nodes from.
+
+ Returns:
+ A list of minidom.Element.
+ """
+ # Usually, the parser knows the extra details about the flag, so
+ # we just forward the call to it.
+ return self.parser._custom_xml_dom_elements(doc) # pylint: disable=protected-access
+
+
+class BooleanFlag(Flag):
+ """Basic boolean flag.
+
+ Boolean flags do not take any arguments, and their value is either
+ True (1) or False (0). The false value is specified on the command
+ line by prepending the word 'no' to either the long or the short flag
+ name.
+
+ For example, if a Boolean flag was created whose long name was
+ 'update' and whose short name was 'x', then this flag could be
+ explicitly unset through either --noupdate or --nox.
+ """
+
+ def __init__(self, name, default, help, short_name=None, **args): # pylint: disable=redefined-builtin
+ p = _argument_parser.BooleanParser()
+ super(BooleanFlag, self).__init__(
+ p, None, name, default, help, short_name, 1, **args)
+
+
+class EnumFlag(Flag):
+ """Basic enum flag; its value can be any string from list of enum_values."""
+
+ def __init__(self, name, default, help, enum_values, # pylint: disable=redefined-builtin
+ short_name=None, case_sensitive=True, **args):
+ p = _argument_parser.EnumParser(enum_values, case_sensitive)
+ g = _argument_parser.ArgumentSerializer()
+ super(EnumFlag, self).__init__(
+ p, g, name, default, help, short_name, **args)
+ self.help = '<%s>: %s' % ('|'.join(enum_values), self.help)
+
+ def _extra_xml_dom_elements(self, doc):
+ elements = []
+ for enum_value in self.parser.enum_values:
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'enum_value', enum_value))
+ return elements
+
+
+class MultiFlag(Flag):
+ """A flag that can appear multiple time on the command-line.
+
+ The value of such a flag is a list that contains the individual values
+ from all the appearances of that flag on the command-line.
+
+ See the __doc__ for Flag for most behavior of this class. Only
+ differences in behavior are described here:
+
+ * The default value may be either a single value or a list of values.
+ A single value is interpreted as the [value] singleton list.
+
+ * The value of the flag is always a list, even if the option was
+ only supplied once, and even if the default value is a single
+ value
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(MultiFlag, self).__init__(*args, **kwargs)
+ self.help += ';\n repeat this option to specify a list of values'
+
+ def parse(self, arguments):
+ """Parses one or more arguments with the installed parser.
+
+ Args:
+ arguments: a single argument or a list of arguments (typically a
+ list of default values); a single argument is converted
+ internally into a list containing one item.
+ """
+ new_values = self._parse(arguments)
+ if self.present:
+ self.value.extend(new_values)
+ else:
+ self.value = new_values
+ self.present += len(new_values)
+
+ def _parse(self, arguments):
+ if not isinstance(arguments, list):
+ # Default value may be a list of values. Most other arguments
+ # will not be, so convert them into a single-item list to make
+ # processing simpler below.
+ arguments = [arguments]
+
+ return [super(MultiFlag, self)._parse(item) for item in arguments]
+
+ def serialize(self):
+ """See base class."""
+ if not self.serializer:
+ raise _exceptions.Error(
+ 'Serializer not present for flag %s' % self.name)
+ if self.value is None:
+ return ''
+
+ s = ''
+
+ multi_value = self.value
+
+ for self.value in multi_value:
+ if s: s += ' '
+ s += Flag.serialize(self)
+
+ self.value = multi_value
+
+ return s
+
+ def flag_type(self):
+ """See base class."""
+ return 'multi ' + self.parser.flag_type()
+
+ def _extra_xml_dom_elements(self, doc):
+ elements = []
+ if hasattr(self.parser, 'enum_values'):
+ for enum_value in self.parser.enum_values:
+ elements.append(_helpers.create_xml_dom_element(
+ doc, 'enum_value', enum_value))
+ return elements
diff --git a/third_party/py/abseil/absl/flags/_flagvalues.py b/third_party/py/abseil/absl/flags/_flagvalues.py
new file mode 100644
index 0000000000..61a5bb4566
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_flagvalues.py
@@ -0,0 +1,1244 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Defines the FlagValues class - registry of 'Flag' objects.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import itertools
+import logging
+import os
+import sys
+import warnings
+from xml.dom import minidom
+
+from absl.flags import _exceptions
+from absl.flags import _flag
+from absl.flags import _helpers
+import six
+
+# Add flagvalues module to disclaimed module ids.
+_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
+
+
+class FlagValues(object):
+ """Registry of 'Flag' objects.
+
+ A 'FlagValues' can then scan command line arguments, passing flag
+ arguments through to the 'Flag' objects that it owns. It also
+ provides easy access to the flag values. Typically only one
+ 'FlagValues' object is needed by an application: flags.FLAGS
+
+ This class is heavily overloaded:
+
+ 'Flag' objects are registered via __setitem__:
+ FLAGS['longname'] = x # register a new flag
+
+ The .value attribute of the registered 'Flag' objects can be accessed
+ as attributes of this 'FlagValues' object, through __getattr__. Both
+ the long and short name of the original 'Flag' objects can be used to
+ access its value:
+ FLAGS.longname # parsed flag value
+ FLAGS.x # parsed flag value (short name)
+
+ Command line arguments are scanned and passed to the registered 'Flag'
+ objects through the __call__ method. Unparsed arguments, including
+ argv[0] (e.g. the program name) are returned.
+ argv = FLAGS(sys.argv) # scan command line arguments
+
+ The original registered Flag objects can be retrieved through the use
+ of the dictionary-like operator, __getitem__:
+ x = FLAGS['longname'] # access the registered Flag object
+
+ The str() operator of a 'FlagValues' object provides help for all of
+ the registered 'Flag' objects.
+ """
+
+ # A note on collections.abc.Mapping:
+ # FlagValues defines __getitem__, __iter__, and __len__. It makes perfect
+ # sense to let it be a collections.abc.Mapping class. However, we are not
+ # able to do so. The mixin methods, e.g. keys, values, are not uncommon flag
+ # names. Those flag values would not be accessible via the FLAGS.xxx form.
+
+ def __init__(self):
+ # Since everything in this class is so heavily overloaded, the only
+ # way of defining and using fields is to access __dict__ directly.
+
+ # Dictionary: flag name (string) -> Flag object.
+ self.__dict__['__flags'] = {}
+
+ # Set: name of hidden flag (string).
+ # Holds flags that should not be directly accessible from Python.
+ self.__dict__['__hiddenflags'] = set()
+
+ # Dictionary: module name (string) -> list of Flag objects that are defined
+ # by that module.
+ self.__dict__['__flags_by_module'] = {}
+ # Dictionary: module id (int) -> list of Flag objects that are defined by
+ # that module.
+ self.__dict__['__flags_by_module_id'] = {}
+ # Dictionary: module name (string) -> list of Flag objects that are
+ # key for that module.
+ self.__dict__['__key_flags_by_module'] = {}
+
+ # Bool: True if flags were parsed.
+ self.__dict__['__flags_parsed'] = False
+
+ # Bool: True if unparse_flags() was called.
+ self.__dict__['__unparse_flags_called'] = False
+
+ # None or Method(name, value) to call from __setattr__ for an unknown flag.
+ self.__dict__['__set_unknown'] = None
+
+ # A set of banned flag names. This is to prevent users from accidentally
+ # defining a flag that has the same name as a method on this class.
+ # Users can still allow defining the flag by passing
+ # allow_using_method_names=True in DEFINE_xxx functions.
+ self.__dict__['__banned_flag_names'] = frozenset(dir(FlagValues))
+
+ # Bool: Whether to use GNU style scanning.
+ self.__dict__['__use_gnu_getopt'] = True
+
+ # Bool: Whether use_gnu_getopt has been explicitly set by the user.
+ self.__dict__['__use_gnu_getopt_explicitly_set'] = False
+
+ # Function: Takes a flag name as parameter, returns a tuple
+ # (is_retired, type_is_bool).
+ self.__dict__['__is_retired_flag_func'] = None
+
+ def set_gnu_getopt(self, gnu_getopt=True):
+ """Sets whether or not to use GNU style scanning.
+
+ GNU style allows mixing of flag and non-flag arguments. See
+ http://docs.python.org/library/getopt.html#getopt.gnu_getopt
+
+ Args:
+ gnu_getopt: bool, whether or not to use GNU style scanning.
+ """
+ self.__dict__['__use_gnu_getopt'] = gnu_getopt
+ self.__dict__['__use_gnu_getopt_explicitly_set'] = True
+
+ def is_gnu_getopt(self):
+ return self.__dict__['__use_gnu_getopt']
+
+ def _flags(self):
+ return self.__dict__['__flags']
+
+ def flags_by_module_dict(self):
+ """Returns the dictionary of module_name -> list of defined flags.
+
+ Returns:
+ A dictionary. Its keys are module names (strings). Its values
+ are lists of Flag objects.
+ """
+ return self.__dict__['__flags_by_module']
+
+ def flags_by_module_id_dict(self):
+ """Returns the dictionary of module_id -> list of defined flags.
+
+ Returns:
+ A dictionary. Its keys are module IDs (ints). Its values
+ are lists of Flag objects.
+ """
+ return self.__dict__['__flags_by_module_id']
+
+ def key_flags_by_module_dict(self):
+ """Returns the dictionary of module_name -> list of key flags.
+
+ Returns:
+ A dictionary. Its keys are module names (strings). Its values
+ are lists of Flag objects.
+ """
+ return self.__dict__['__key_flags_by_module']
+
+ def register_flag_by_module(self, module_name, flag):
+ """Records the module that defines a specific flag.
+
+ We keep track of which flag is defined by which module so that we
+ can later sort the flags by module.
+
+ Args:
+ module_name: str, the name of a Python module.
+ flag: Flag, the Flag instance that is key to the module.
+ """
+ flags_by_module = self.flags_by_module_dict()
+ flags_by_module.setdefault(module_name, []).append(flag)
+
+ def register_flag_by_module_id(self, module_id, flag):
+ """Records the module that defines a specific flag.
+
+ Args:
+ module_id: int, the ID of the Python module.
+ flag: Flag, the Flag instance that is key to the module.
+ """
+ flags_by_module_id = self.flags_by_module_id_dict()
+ flags_by_module_id.setdefault(module_id, []).append(flag)
+
+ def register_key_flag_for_module(self, module_name, flag):
+ """Specifies that a flag is a key flag for a module.
+
+ Args:
+ module_name: str, the name of a Python module.
+ flag: Flag, the Flag instance that is key to the module.
+ """
+ key_flags_by_module = self.key_flags_by_module_dict()
+ # The list of key flags for the module named module_name.
+ key_flags = key_flags_by_module.setdefault(module_name, [])
+ # Add flag, but avoid duplicates.
+ if flag not in key_flags:
+ key_flags.append(flag)
+
+ def _flag_is_registered(self, flag_obj):
+ """Checks whether a Flag object is registered under long name or short name.
+
+ Args:
+ flag_obj: Flag, the Flag instance to check for.
+
+ Returns:
+ bool, True iff flag_obj is registered under long name or short name.
+ """
+ flag_dict = self._flags()
+ # Check whether flag_obj is registered under its long name.
+ name = flag_obj.name
+ if flag_dict.get(name, None) == flag_obj:
+ return True
+ # Check whether flag_obj is registered under its short name.
+ short_name = flag_obj.short_name
+ if (short_name is not None and
+ flag_dict.get(short_name, None) == flag_obj):
+ return True
+ return False
+
+ def _cleanup_unregistered_flag_from_module_dicts(self, flag_obj):
+ """Cleans up unregistered flags from all module -> [flags] dictionaries.
+
+ If flag_obj is registered under either its long name or short name, it
+ won't be removed from the dictionaries.
+
+ Args:
+ flag_obj: Flag, the Flag instance to clean up for.
+ """
+ if self._flag_is_registered(flag_obj):
+ return
+ for flags_by_module_dict in (self.flags_by_module_dict(),
+ self.flags_by_module_id_dict(),
+ self.key_flags_by_module_dict()):
+ for flags_in_module in six.itervalues(flags_by_module_dict):
+ # While (as opposed to if) takes care of multiple occurrences of a
+ # flag in the list for the same module.
+ while flag_obj in flags_in_module:
+ flags_in_module.remove(flag_obj)
+
+ def _get_flags_defined_by_module(self, module):
+ """Returns the list of flags defined by a module.
+
+ Args:
+ module: module|str, the module to get flags from.
+
+ Returns:
+ [Flag], a new list of Flag instances. Caller may update this list as
+ desired: none of those changes will affect the internals of this
+ FlagValue instance.
+ """
+ if not isinstance(module, str):
+ module = module.__name__
+
+ return list(self.flags_by_module_dict().get(module, []))
+
+ def get_key_flags_for_module(self, module):
+ """Returns the list of key flags for a module.
+
+ Args:
+ module: module|str, the module to get key flags from.
+
+ Returns:
+ [Flag], a new list of Flag instances. Caller may update this list as
+ desired: none of those changes will affect the internals of this
+ FlagValue instance.
+ """
+ if not isinstance(module, str):
+ module = module.__name__
+
+ # Any flag is a key flag for the module that defined it. NOTE:
+ # key_flags is a fresh list: we can update it without affecting the
+ # internals of this FlagValues object.
+ key_flags = self._get_flags_defined_by_module(module)
+
+ # Take into account flags explicitly declared as key for a module.
+ for flag in self.key_flags_by_module_dict().get(module, []):
+ if flag not in key_flags:
+ key_flags.append(flag)
+ return key_flags
+
+ def find_module_defining_flag(self, flagname, default=None):
+ """Return the name of the module defining this flag, or default.
+
+ Args:
+ flagname: str, name of the flag to lookup.
+ default: Value to return if flagname is not defined. Defaults
+ to None.
+
+ Returns:
+ The name of the module which registered the flag with this name.
+ If no such module exists (i.e. no flag with this name exists),
+ we return default.
+ """
+ registered_flag = self._flags().get(flagname)
+ if registered_flag is None:
+ return default
+ for module, flags in six.iteritems(self.flags_by_module_dict()):
+ for flag in flags:
+ # It must compare the flag with the one in _flags. This is because a
+ # flag might be overridden only for its long name (or short name),
+ # and only its short name (or long name) is considered registered.
+ if (flag.name == registered_flag.name and
+ flag.short_name == registered_flag.short_name):
+ return module
+ return default
+
+ def find_module_id_defining_flag(self, flagname, default=None):
+ """Return the ID of the module defining this flag, or default.
+
+ Args:
+ flagname: str, name of the flag to lookup.
+ default: Value to return if flagname is not defined. Defaults
+ to None.
+
+ Returns:
+ The ID of the module which registered the flag with this name.
+ If no such module exists (i.e. no flag with this name exists),
+ we return default.
+ """
+ registered_flag = self._flags().get(flagname)
+ if registered_flag is None:
+ return default
+ for module_id, flags in six.iteritems(self.flags_by_module_id_dict()):
+ for flag in flags:
+ # It must compare the flag with the one in _flags. This is because a
+ # flag might be overridden only for its long name (or short name),
+ # and only its short name (or long name) is considered registered.
+ if (flag.name == registered_flag.name and
+ flag.short_name == registered_flag.short_name):
+ return module_id
+ return default
+
+ def _register_unknown_flag_setter(self, setter):
+ """Allow set default values for undefined flags.
+
+ Args:
+ setter: Method(name, value) to call to __setattr__ an unknown flag.
+ Must raise NameError or ValueError for invalid name/value.
+ """
+ self.__dict__['__set_unknown'] = setter
+
+ def _set_unknown_flag(self, name, value):
+ """Returns value if setting flag |name| to |value| returned True.
+
+ Args:
+ name: str, name of the flag to set.
+ value: Value to set.
+
+ Returns:
+ Flag value on successful call.
+
+ Raises:
+ UnrecognizedFlagError
+ IllegalFlagValueError
+ """
+ setter = self.__dict__['__set_unknown']
+ if setter:
+ try:
+ setter(name, value)
+ return value
+ except (TypeError, ValueError): # Flag value is not valid.
+ raise _exceptions.IllegalFlagValueError(
+ '"{1}" is not valid for --{0}' .format(name, value))
+ except NameError: # Flag name is not valid.
+ pass
+ raise _exceptions.UnrecognizedFlagError(name, value)
+
+ def append_flag_values(self, flag_values):
+ """Appends flags registered in another FlagValues instance.
+
+ Args:
+ flag_values: FlagValues, the FlagValues instance from which to copy flags.
+ """
+ for flag_name, flag in six.iteritems(flag_values._flags()): # pylint: disable=protected-access
+ # Each flags with short_name appears here twice (once under its
+ # normal name, and again with its short name). To prevent
+ # problems (DuplicateFlagError) with double flag registration, we
+ # perform a check to make sure that the entry we're looking at is
+ # for its normal name.
+ if flag_name == flag.name:
+ try:
+ self[flag_name] = flag
+ except _exceptions.DuplicateFlagError:
+ raise _exceptions.DuplicateFlagError.from_flag(
+ flag_name, self, other_flag_values=flag_values)
+
+ def remove_flag_values(self, flag_values):
+ """Remove flags that were previously appended from another FlagValues.
+
+ Args:
+ flag_values: FlagValues, the FlagValues instance containing flags to
+ remove.
+ """
+ for flag_name in flag_values:
+ self.__delattr__(flag_name)
+
+ def __setitem__(self, name, flag):
+ """Registers a new flag variable."""
+ fl = self._flags()
+ if not isinstance(flag, _flag.Flag):
+ raise _exceptions.IllegalFlagValueError(flag)
+ if str is bytes and isinstance(name, unicode):
+ # When using Python 2 with unicode_literals, allow it but encode it
+ # into the bytes type we require.
+ name = name.encode('utf-8')
+ if not isinstance(name, type('')):
+ raise _exceptions.Error('Flag name must be a string')
+ if not name:
+ raise _exceptions.Error('Flag name cannot be empty')
+ self._check_method_name_conflicts(name, flag)
+ if name in fl and not flag.allow_override and not fl[name].allow_override:
+ module, module_name = _helpers.get_calling_module_object_and_name()
+ if (self.find_module_defining_flag(name) == module_name and
+ id(module) != self.find_module_id_defining_flag(name)):
+ # If the flag has already been defined by a module with the same name,
+ # but a different ID, we can stop here because it indicates that the
+ # module is simply being imported a subsequent time.
+ return
+ raise _exceptions.DuplicateFlagError.from_flag(name, self)
+ short_name = flag.short_name
+ # If a new flag overrides an old one, we need to cleanup the old flag's
+ # modules if it's not registered.
+ flags_to_cleanup = set()
+ if short_name is not None:
+ if (short_name in fl and not flag.allow_override and
+ not fl[short_name].allow_override):
+ raise _exceptions.DuplicateFlagError.from_flag(short_name, self)
+ if short_name in fl and fl[short_name] != flag:
+ flags_to_cleanup.add(fl[short_name])
+ fl[short_name] = flag
+ if (name not in fl # new flag
+ or fl[name].using_default_value
+ or not flag.using_default_value):
+ if name in fl and fl[name] != flag:
+ flags_to_cleanup.add(fl[name])
+ fl[name] = flag
+ for f in flags_to_cleanup:
+ self._cleanup_unregistered_flag_from_module_dicts(f)
+
+ def __dir__(self):
+ """Returns list of names of all defined flags.
+
+ Useful for TAB-completion in ipython.
+
+ Returns:
+ [str], a list of names of all defined flags.
+ """
+ return sorted(self.__dict__['__flags'])
+
+ def __getitem__(self, name):
+ """Returns the Flag object for the flag --name."""
+ return self._flags()[name]
+
+ def _hide_flag(self, name):
+ """Marks the flag --name as hidden."""
+ self.__dict__['__hiddenflags'].add(name)
+
+ # This exists for legacy reasons, and will be removed in the future.
+ def _is_unparsed_flag_access_allowed(self, name):
+ """Determine whether to allow unparsed flag access or not."""
+ del name
+ return False
+
+ def __getattr__(self, name):
+ """Retrieves the 'value' attribute of the flag --name."""
+ fl = self._flags()
+ if name not in fl:
+ raise AttributeError(name)
+ if name in self.__dict__['__hiddenflags']:
+ raise AttributeError(name)
+
+ if self.__dict__['__flags_parsed'] or fl[name].present:
+ return fl[name].value
+ else:
+ error_message = (
+ 'Trying to access flag --%s before flags were parsed.' % name)
+ if self._is_unparsed_flag_access_allowed(name):
+ # Print warning to stderr. Messages in logs are often ignored/unnoticed.
+ warnings.warn(
+ error_message + ' This will raise an exception in the future.',
+ RuntimeWarning,
+ stacklevel=2)
+ # Force logging.exception() to behave realistically, but don't propagate
+ # exception up. Allow flag value to be returned (for now).
+ try:
+ raise _exceptions.UnparsedFlagAccessError(error_message)
+ except _exceptions.UnparsedFlagAccessError:
+ logging.exception(error_message)
+ return fl[name].value
+ else:
+ if six.PY2:
+ # In Python 2, hasattr returns False if getattr raises any exception.
+ # That means if someone calls hasattr(FLAGS, 'flag'), it returns False
+ # instead of raises UnparsedFlagAccessError even if --flag is already
+ # defined. To make the error more visible, the best we can do is to
+ # log an error message before raising the exception.
+ # Don't log a full stacktrace here since that makes other callers
+ # get too much noise.
+ logging.error(error_message)
+ raise _exceptions.UnparsedFlagAccessError(error_message)
+
+ def __setattr__(self, name, value):
+ """Sets the 'value' attribute of the flag --name."""
+ fl = self._flags()
+ if name in self.__dict__['__hiddenflags']:
+ raise AttributeError(name)
+ if name not in fl:
+ return self._set_unknown_flag(name, value)
+ fl[name].value = value
+ self._assert_validators(fl[name].validators)
+ fl[name].using_default_value = False
+ return value
+
+ def _assert_all_validators(self):
+ all_validators = set()
+ for flag in six.itervalues(self._flags()):
+ for validator in flag.validators:
+ all_validators.add(validator)
+ self._assert_validators(all_validators)
+
+ def _assert_validators(self, validators):
+ """Asserts if all validators in the list are satisfied.
+
+ It asserts validators in the order they were created.
+
+ Args:
+ validators: Iterable(validators.Validator), validators to be
+ verified.
+ Raises:
+ AttributeError: Raised if validators work with a non-existing flag.
+ IllegalFlagValueError: Raised if validation fails for at least one
+ validator.
+ """
+ for validator in sorted(
+ validators, key=lambda validator: validator.insertion_index):
+ try:
+ validator.verify(self)
+ except _exceptions.ValidationError as e:
+ message = validator.print_flags_with_values(self)
+ raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))
+
+ def __delattr__(self, flag_name):
+ """Deletes a previously-defined flag from a flag object.
+
+ This method makes sure we can delete a flag by using
+
+ del FLAGS.<flag_name>
+
+ E.g.,
+
+ flags.DEFINE_integer('foo', 1, 'Integer flag.')
+ del flags.FLAGS.foo
+
+ If a flag is also registered by its the other name (long name or short
+ name), the other name won't be deleted.
+
+ Args:
+ flag_name: str, the name of the flag to be deleted.
+
+ Raises:
+ AttributeError: Raised when there is no registered flag named flag_name.
+ """
+ fl = self._flags()
+ if flag_name not in fl:
+ raise AttributeError(flag_name)
+
+ flag_obj = fl[flag_name]
+ del fl[flag_name]
+
+ self._cleanup_unregistered_flag_from_module_dicts(flag_obj)
+
+ def set_default(self, name, value):
+ """Changes the default value of the named flag object.
+
+ The flag's current value is also updated if the flag is currently using
+ the default value, i.e. not specified in the command line, and not set
+ by FLAGS.name = value.
+
+ Args:
+ name: str, the name of the flag to modify.
+ value: The new default value.
+
+ Raises:
+ UnrecognizedFlagError: Raised when there is no registered flag named name.
+ IllegalFlagValueError: Raised when value is not valid.
+ """
+ fl = self._flags()
+ if name not in fl:
+ self._set_unknown_flag(name, value)
+ return
+ fl[name]._set_default(value) # pylint: disable=protected-access
+ self._assert_validators(fl[name].validators)
+
+ def __contains__(self, name):
+ """Returns True if name is a value (flag) in the dict."""
+ return name in self._flags()
+
+ def __len__(self):
+ return len(self.__dict__['__flags'])
+
+ def __iter__(self):
+ return iter(self._flags())
+
+ def __call__(self, argv):
+ """Parses flags from argv; stores parsed flags into this FlagValues object.
+
+ All unparsed arguments are returned.
+
+ Args:
+ argv: a tuple/list of strings.
+
+ Returns:
+ The list of arguments not parsed as options, including argv[0].
+
+ Raises:
+ Error: Raised on any parsing error.
+ TypeError: Raised on passing wrong type of arguments.
+ ValueError: Raised on flag value parsing error.
+ """
+ if _helpers.is_bytes_or_string(argv):
+ raise TypeError(
+ 'argv should be a tuple/list of strings, not bytes or string.')
+ if not argv:
+ raise ValueError(
+ 'argv cannot be an empty list, and must contain the program name as '
+ 'the first element.')
+
+ # This pre parses the argv list for --flagfile=<> options.
+ program_name = argv[0]
+ args = self.read_flags_from_files(argv[1:], force_gnu=False)
+
+ # Parse the arguments.
+ unknown_flags, unparsed_args, undefok = self._parse_args(args)
+
+ # Handle unknown flags by raising UnrecognizedFlagError.
+ # Note some users depend on us raising this particular error.
+ for name, value in unknown_flags:
+ if name in undefok:
+ continue
+
+ suggestions = _helpers.get_flag_suggestions(name, list(self))
+ raise _exceptions.UnrecognizedFlagError(
+ name, value, suggestions=suggestions)
+
+ self.mark_as_parsed()
+ self._assert_all_validators()
+ return [program_name] + unparsed_args
+
+ def _set_is_retired_flag_func(self, is_retired_flag_func):
+ """Sets a function for checking retired flags.
+
+ Do not use it. This is a private absl API used to check retired flags
+ registered by the absl C++ flags library.
+
+ Args:
+ is_retired_flag_func: Callable(str) -> (bool, bool), a function takes flag
+ name as parameter, returns a tuple (is_retired, type_is_bool).
+ """
+ self.__dict__['__is_retired_flag_func'] = is_retired_flag_func
+
+ def _parse_args(self, args):
+ """Helper function to do the main argument parsing.
+
+ This function goes through args and does the bulk of the flag parsing.
+ It will find the corresponding flag in our flag dictionary, and call its
+ .parse() method on the flag value.
+
+ Args:
+ args: [str], a list of strings with the arguments to parse.
+
+ Returns:
+ A tuple with the following:
+ unknown_flags: List of (flag name, arg) for flags we don't know about.
+ unparsed_args: List of arguments we did not parse.
+ undefok: Set of flags that were given via --undefok.
+
+ Raises:
+ Error: Raised on any parsing error.
+ ValueError: Raised on flag value parsing error.
+ """
+ unknown_flags, unparsed_args, undefok = [], [], set()
+ retired_flag_func = self.__dict__['__is_retired_flag_func']
+
+ flag_dict = self._flags()
+ args = iter(args)
+ for arg in args:
+ value = None
+
+ def get_value():
+ # pylint: disable=cell-var-from-loop
+ try:
+ return next(args) if value is None else value
+ except StopIteration:
+ raise _exceptions.Error('Missing value for flag ' + arg) # pylint: disable=undefined-loop-variable
+
+ if not arg.startswith('-'):
+ # A non-argument: default is break, GNU is skip.
+ unparsed_args.append(arg)
+ if self.is_gnu_getopt():
+ continue
+ else:
+ break
+
+ if arg == '--':
+ break
+
+ # At this point, arg must start with '-'.
+ if arg.startswith('--'):
+ arg_without_dashes = arg[2:]
+ else:
+ arg_without_dashes = arg[1:]
+
+ if '=' in arg_without_dashes:
+ name, value = arg_without_dashes.split('=', 1)
+ else:
+ name, value = arg_without_dashes, None
+
+ if not name:
+ # The argument is all dashes (including one dash).
+ unparsed_args.append(arg)
+ if self.is_gnu_getopt():
+ continue
+ else:
+ break
+
+ # --undefok is a special case.
+ if name == 'undefok':
+ value = get_value()
+ undefok.update(v.strip() for v in value.split(','))
+ undefok.update('no' + v.strip() for v in value.split(','))
+ continue
+
+ flag = flag_dict.get(name)
+ if flag:
+ if flag.boolean and value is None:
+ value = 'true'
+ else:
+ value = get_value()
+ elif name.startswith('no') and len(name) > 2:
+ # Boolean flags can take the form of --noflag, with no value.
+ noflag = flag_dict.get(name[2:])
+ if noflag and noflag.boolean:
+ if value is not None:
+ raise ValueError(arg + ' does not take an argument')
+ flag = noflag
+ value = 'false'
+
+ if retired_flag_func and not flag:
+ is_retired, is_bool = retired_flag_func(name)
+
+ # If we didn't recognize that flag, but it starts with
+ # "no" then maybe it was a boolean flag specified in the
+ # --nofoo form.
+ if not is_retired and name.startswith('no'):
+ is_retired, is_bool = retired_flag_func(name[2:])
+ is_retired = is_retired and is_bool
+
+ if is_retired:
+ if not is_bool and value is None:
+ # This happens when a non-bool retired flag is specified
+ # in format of "--flag value".
+ get_value()
+ logging.error('Flag "%s" is retired and should no longer '
+ 'be specified. See go/totw/90.', name)
+ continue
+
+ if flag:
+ flag.parse(value)
+ flag.using_default_value = False
+ else:
+ unknown_flags.append((name, arg))
+
+ unparsed_args.extend(list(args))
+ return unknown_flags, unparsed_args, undefok
+
+ def is_parsed(self):
+ """Returns whether flags were parsed."""
+ return self.__dict__['__flags_parsed']
+
+ def mark_as_parsed(self):
+ """Explicitly marks flags as parsed.
+
+ Use this when the caller knows that this FlagValues has been parsed as if
+ a __call__() invocation has happened. This is only a public method for
+ use by things like appcommands which do additional command like parsing.
+ """
+ self.__dict__['__flags_parsed'] = True
+
+ def unparse_flags(self):
+ """Unparses all flags to the point before any FLAGS(argv) was called."""
+ for f in self._flags().values():
+ f.unparse()
+ # We log this message before marking flags as unparsed to avoid a
+ # problem when the logging library causes flags access.
+ logging.info('unparse_flags() called; flags access will now raise errors.')
+ self.__dict__['__flags_parsed'] = False
+ self.__dict__['__unparse_flags_called'] = True
+
+ def flag_values_dict(self):
+ """Returns a dictionary that maps flag names to flag values."""
+ return {name: flag.value for name, flag in six.iteritems(self._flags())}
+
+ def __str__(self):
+ """Returns a help string for all known flags."""
+ return self.get_help()
+
+ def get_help(self, prefix='', include_special_flags=True):
+ """Returns a help string for all known flags.
+
+ Args:
+ prefix: str, per-line output prefix.
+ include_special_flags: bool, whether to include description of
+ _SPECIAL_FLAGS, i.e. --flagfile and --undefok.
+
+ Returns:
+ str, formatted help message.
+ """
+ helplist = []
+
+ flags_by_module = self.flags_by_module_dict()
+ if flags_by_module:
+ modules = sorted(flags_by_module)
+
+ # Print the help for the main module first, if possible.
+ main_module = sys.argv[0]
+ if main_module in modules:
+ modules.remove(main_module)
+ modules = [main_module] + modules
+
+ for module in modules:
+ self._render_our_module_flags(module, helplist, prefix)
+ if include_special_flags:
+ self._render_module_flags(
+ 'absl.flags',
+ _helpers.SPECIAL_FLAGS._flags().values(), # pylint: disable=protected-access
+ helplist,
+ prefix)
+ else:
+ # Just print one long list of flags.
+ values = six.itervalues(self._flags())
+ if include_special_flags:
+ values = itertools.chain(
+ values, six.itervalues(_helpers.SPECIAL_FLAGS._flags())) # pylint: disable=protected-access
+ self._render_flag_list(values, helplist, prefix)
+
+ return '\n'.join(helplist)
+
+ def _render_module_flags(self, module, flags, output_lines, prefix=''):
+ """Returns a help string for a given module."""
+ if not isinstance(module, str):
+ module = module.__name__
+ output_lines.append('\n%s%s:' % (prefix, module))
+ self._render_flag_list(flags, output_lines, prefix + ' ')
+
+ def _render_our_module_flags(self, module, output_lines, prefix=''):
+ """Returns a help string for a given module."""
+ flags = self._get_flags_defined_by_module(module)
+ if flags:
+ self._render_module_flags(module, flags, output_lines, prefix)
+
+ def _render_our_module_key_flags(self, module, output_lines, prefix=''):
+ """Returns a help string for the key flags of a given module.
+
+ Args:
+ module: module|str, the module to render key flags for.
+ output_lines: [str], a list of strings. The generated help message
+ lines will be appended to this list.
+ prefix: str, a string that is prepended to each generated help line.
+ """
+ key_flags = self.get_key_flags_for_module(module)
+ if key_flags:
+ self._render_module_flags(module, key_flags, output_lines, prefix)
+
+ def module_help(self, module):
+ """Describes the key flags of a module.
+
+ Args:
+ module: module|str, the module to describe the key flags for.
+
+ Returns:
+ str, describing the key flags of a module.
+ """
+ helplist = []
+ self._render_our_module_key_flags(module, helplist)
+ return '\n'.join(helplist)
+
+ def main_module_help(self):
+ """Describes the key flags of the main module.
+
+ Returns:
+ str, describing the key flags of the main module.
+ """
+ return self.module_help(sys.argv[0])
+
+ def _render_flag_list(self, flaglist, output_lines, prefix=' '):
+ fl = self._flags()
+ special_fl = _helpers.SPECIAL_FLAGS._flags() # pylint: disable=protected-access
+ flaglist = [(flag.name, flag) for flag in flaglist]
+ flaglist.sort()
+ flagset = {}
+ for (name, flag) in flaglist:
+ # It's possible this flag got deleted or overridden since being
+ # registered in the per-module flaglist. Check now against the
+ # canonical source of current flag information, the _flags.
+ if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
+ # a different flag is using this name now
+ continue
+ # only print help once
+ if flag in flagset: continue
+ flagset[flag] = 1
+ flaghelp = ''
+ if flag.short_name: flaghelp += '-%s,' % flag.short_name
+ if flag.boolean:
+ flaghelp += '--[no]%s:' % flag.name
+ else:
+ flaghelp += '--%s:' % flag.name
+ flaghelp += ' '
+ if flag.help:
+ flaghelp += flag.help
+ flaghelp = _helpers.text_wrap(
+ flaghelp, indent=prefix+' ', firstline_indent=prefix)
+ if flag.default_as_str:
+ flaghelp += '\n'
+ flaghelp += _helpers.text_wrap(
+ '(default: %s)' % flag.default_as_str, indent=prefix+' ')
+ if flag.parser.syntactic_help:
+ flaghelp += '\n'
+ flaghelp += _helpers.text_wrap(
+ '(%s)' % flag.parser.syntactic_help, indent=prefix+' ')
+ output_lines.append(flaghelp)
+
+ def get_flag_value(self, name, default): # pylint: disable=invalid-name
+ """Returns the value of a flag (if not None) or a default value.
+
+ Args:
+ name: str, the name of a flag.
+ default: Default value to use if the flag value is None.
+
+ Returns:
+ Requested flag value or default.
+ """
+
+ value = self.__getattr__(name)
+ if value is not None: # Can't do if not value, b/c value might be '0' or ""
+ return value
+ else:
+ return default
+
+ def _is_flag_file_directive(self, flag_string):
+ """Checks whether flag_string contain a --flagfile=<foo> directive."""
+ if isinstance(flag_string, type('')):
+ if flag_string.startswith('--flagfile='):
+ return 1
+ elif flag_string == '--flagfile':
+ return 1
+ elif flag_string.startswith('-flagfile='):
+ return 1
+ elif flag_string == '-flagfile':
+ return 1
+ else:
+ return 0
+ return 0
+
+ def _extract_filename(self, flagfile_str):
+ """Returns filename from a flagfile_str of form -[-]flagfile=filename.
+
+ The cases of --flagfile foo and -flagfile foo shouldn't be hitting
+ this function, as they are dealt with in the level above this
+ function.
+
+ Args:
+ flagfile_str: str, the flagfile string.
+
+ Returns:
+ str, the filename from a flagfile_str of form -[-]flagfile=filename.
+
+ Raises:
+ Error: Raised when illegal --flagfile is provided.
+ """
+ if flagfile_str.startswith('--flagfile='):
+ return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
+ elif flagfile_str.startswith('-flagfile='):
+ return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
+ else:
+ raise _exceptions.Error(
+ 'Hit illegal --flagfile type: %s' % flagfile_str)
+
+ def _get_flag_file_lines(self, filename, parsed_file_stack=None):
+ """Returns the useful (!=comments, etc) lines from a file with flags.
+
+ Args:
+ filename: str, the name of the flag file.
+ parsed_file_stack: [str], a list of the names of the files that we have
+ recursively encountered at the current depth. MUTATED BY THIS FUNCTION
+ (but the original value is preserved upon successfully returning from
+ function call).
+
+ Returns:
+ List of strings. See the note below.
+
+ NOTE(springer): This function checks for a nested --flagfile=<foo>
+ tag and handles the lower file recursively. It returns a list of
+ all the lines that _could_ contain command flags. This is
+ EVERYTHING except whitespace lines and comments (lines starting
+ with '#' or '//').
+ """
+ if parsed_file_stack is None:
+ parsed_file_stack = []
+ # We do a little safety check for reparsing a file we've already encountered
+ # at a previous depth.
+ if filename in parsed_file_stack:
+ sys.stderr.write('Warning: Hit circular flagfile dependency. Ignoring'
+ ' flagfile: %s\n' % (filename,))
+ return []
+ else:
+ parsed_file_stack.append(filename)
+
+ line_list = [] # All line from flagfile.
+ flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
+ try:
+ file_obj = open(filename, 'r')
+ except IOError as e_msg:
+ raise _exceptions.CantOpenFlagFileError(
+ 'ERROR:: Unable to open flagfile: %s' % e_msg)
+
+ with file_obj:
+ line_list = file_obj.readlines()
+
+ # This is where we check each line in the file we just read.
+ for line in line_list:
+ if line.isspace():
+ pass
+ # Checks for comment (a line that starts with '#').
+ elif line.startswith('#') or line.startswith('//'):
+ pass
+ # Checks for a nested "--flagfile=<bar>" flag in the current file.
+ # If we find one, recursively parse down into that file.
+ elif self._is_flag_file_directive(line):
+ sub_filename = self._extract_filename(line)
+ included_flags = self._get_flag_file_lines(
+ sub_filename, parsed_file_stack=parsed_file_stack)
+ flag_line_list.extend(included_flags)
+ else:
+ # Any line that's not a comment or a nested flagfile should get
+ # copied into 2nd position. This leaves earlier arguments
+ # further back in the list, thus giving them higher priority.
+ flag_line_list.append(line.strip())
+
+ parsed_file_stack.pop()
+ return flag_line_list
+
+ def read_flags_from_files(self, argv, force_gnu=True):
+ """Processes command line args, but also allow args to be read from file.
+
+ Args:
+ argv: [str], a list of strings, usually sys.argv[1:], which may contain
+ one or more flagfile directives of the form --flagfile="./filename".
+ Note that the name of the program (sys.argv[0]) should be omitted.
+ force_gnu: bool, if False, --flagfile parsing obeys normal flag semantics.
+ If True, --flagfile parsing instead follows gnu_getopt semantics.
+ *** WARNING *** force_gnu=False may become the future default!
+
+ Returns:
+ A new list which has the original list combined with what we read
+ from any flagfile(s).
+
+ Raises:
+ IllegalFlagValueError: Raised when --flagfile is provided with no
+ argument.
+
+ This function is called by FLAGS(argv).
+ It scans the input list for a flag that looks like:
+ --flagfile=<somefile>. Then it opens <somefile>, reads all valid key
+ and value pairs and inserts them into the input list in exactly the
+ place where the --flagfile arg is found.
+
+ Note that your application's flags are still defined the usual way
+ using absl.flags DEFINE_flag() type functions.
+
+ Notes (assuming we're getting a commandline of some sort as our input):
+ --> For duplicate flags, the last one we hit should "win".
+ --> Since flags that appear later win, a flagfile's settings can be "weak"
+ if the --flagfile comes at the beginning of the argument sequence,
+ and it can be "strong" if the --flagfile comes at the end.
+ --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
+ It will be expanded in exactly the spot where it is found.
+ --> In a flagfile, a line beginning with # or // is a comment.
+ --> Entirely blank lines _should_ be ignored.
+ """
+ rest_of_args = argv
+ new_argv = []
+ while rest_of_args:
+ current_arg = rest_of_args[0]
+ rest_of_args = rest_of_args[1:]
+ if self._is_flag_file_directive(current_arg):
+ # This handles the case of -(-)flagfile foo. In this case the
+ # next arg really is part of this one.
+ if current_arg == '--flagfile' or current_arg == '-flagfile':
+ if not rest_of_args:
+ raise _exceptions.IllegalFlagValueError(
+ '--flagfile with no argument')
+ flag_filename = os.path.expanduser(rest_of_args[0])
+ rest_of_args = rest_of_args[1:]
+ else:
+ # This handles the case of (-)-flagfile=foo.
+ flag_filename = self._extract_filename(current_arg)
+ new_argv.extend(self._get_flag_file_lines(flag_filename))
+ else:
+ new_argv.append(current_arg)
+ # Stop parsing after '--', like getopt and gnu_getopt.
+ if current_arg == '--':
+ break
+ # Stop parsing after a non-flag, like getopt.
+ if not current_arg.startswith('-'):
+ if not force_gnu and not self.__dict__['__use_gnu_getopt']:
+ break
+ else:
+ if ('=' not in current_arg and
+ rest_of_args and not rest_of_args[0].startswith('-')):
+ # If this is an occurrence of a legitimate --x y, skip the value
+ # so that it won't be mistaken for a standalone arg.
+ fl = self._flags()
+ name = current_arg.lstrip('-')
+ if name in fl and not fl[name].boolean:
+ current_arg = rest_of_args[0]
+ rest_of_args = rest_of_args[1:]
+ new_argv.append(current_arg)
+
+ if rest_of_args:
+ new_argv.extend(rest_of_args)
+
+ return new_argv
+
+ def flags_into_string(self):
+ """Returns a string with the flags assignments from this FlagValues object.
+
+ This function ignores flags whose value is None. Each flag
+ assignment is separated by a newline.
+
+ NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
+ from https://github.com/gflags/gflags.
+
+ Returns:
+ str, the string with the flags assignments from this FlagValues object.
+ """
+ s = ''
+ for flag in self._flags().values():
+ if flag.value is not None:
+ s += flag.serialize() + '\n'
+ return s
+
+ def append_flags_into_file(self, filename):
+ """Appends all flags assignments from this FlagInfo object to a file.
+
+ Output will be in the format of a flagfile.
+
+ NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
+ from https://github.com/gflags/gflags.
+
+ Args:
+ filename: str, name of the file.
+ """
+ with open(filename, 'a') as out_file:
+ out_file.write(self.flags_into_string())
+
+ def write_help_in_xml_format(self, outfile=None):
+ """Outputs flag documentation in XML format.
+
+ NOTE: We use element names that are consistent with those used by
+ the C++ command-line flag library, from
+ https://github.com/gflags/gflags.
+ We also use a few new elements (e.g., <key>), but we do not
+ interfere / overlap with existing XML elements used by the C++
+ library. Please maintain this consistency.
+
+ Args:
+ outfile: File object we write to. Default None means sys.stdout.
+ """
+ doc = minidom.Document()
+ all_flag = doc.createElement('AllFlags')
+ doc.appendChild(all_flag)
+
+ all_flag.appendChild(_helpers.create_xml_dom_element(
+ doc, 'program', os.path.basename(sys.argv[0])))
+
+ usage_doc = sys.modules['__main__'].__doc__
+ if not usage_doc:
+ usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
+ else:
+ usage_doc = usage_doc.replace('%s', sys.argv[0])
+ all_flag.appendChild(_helpers.create_xml_dom_element(
+ doc, 'usage', usage_doc))
+
+ # Get list of key flags for the main module.
+ key_flags = self.get_key_flags_for_module(sys.argv[0])
+
+ # Sort flags by declaring module name and next by flag name.
+ flags_by_module = self.flags_by_module_dict()
+ all_module_names = list(flags_by_module.keys())
+ all_module_names.sort()
+ for module_name in all_module_names:
+ flag_list = [(f.name, f) for f in flags_by_module[module_name]]
+ flag_list.sort()
+ for unused_flag_name, flag in flag_list:
+ is_key = flag in key_flags
+ all_flag.appendChild(flag._create_xml_dom_element( # pylint: disable=protected-access
+ doc, module_name, is_key=is_key))
+
+ outfile = outfile or sys.stdout
+ if six.PY2:
+ outfile.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
+ else:
+ outfile.write(
+ doc.toprettyxml(indent=' ', encoding='utf-8').decode('utf-8'))
+ outfile.flush()
+
+ def _check_method_name_conflicts(self, name, flag):
+ if flag.allow_using_method_names:
+ return
+ short_name = flag.short_name
+ flag_names = {name} if short_name is None else {name, short_name}
+ for flag_name in flag_names:
+ if flag_name in self.__dict__['__banned_flag_names']:
+ raise _exceptions.FlagNameConflictsWithMethodError(
+ 'Cannot define a flag named "{name}". It conflicts with a method '
+ 'on class "{class_name}". To allow defining it, use '
+ 'allow_using_method_names and access the flag value with '
+ "FLAGS['{name}'].value. FLAGS.{name} returns the method, "
+ 'not the flag value.'.format(
+ name=flag_name, class_name=type(self).__name__))
+
+
+
+
+FLAGS = FlagValues()
diff --git a/third_party/py/abseil/absl/flags/_helpers.py b/third_party/py/abseil/absl/flags/_helpers.py
new file mode 100644
index 0000000000..a3734189bf
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_helpers.py
@@ -0,0 +1,430 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Internal helper functions for Abseil Python flags library."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import os
+import re
+import struct
+import sys
+import textwrap
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+try:
+ # Importing termios will fail on non-unix platforms.
+ import termios
+except ImportError:
+ termios = None
+
+import six
+from six.moves import range # pylint: disable=redefined-builtin
+
+
+_DEFAULT_HELP_WIDTH = 80 # Default width of help output.
+_MIN_HELP_WIDTH = 40 # Minimal "sane" width of help output. We assume that any
+ # value below 40 is unreasonable.
+
+# Define the allowed error rate in an input string to get suggestions.
+#
+# We lean towards a high threshold because we tend to be matching a phrase,
+# and the simple algorithm used here is geared towards correcting word
+# spellings.
+#
+# For manual testing, consider "<command> --list" which produced a large number
+# of spurious suggestions when we used "least_errors > 0.5" instead of
+# "least_erros >= 0.5".
+_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50
+
+# Characters that cannot appear or are highly discouraged in an XML 1.0
+# document. (See http://www.w3.org/TR/REC-xml/#charsets or
+# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0)
+_ILLEGAL_XML_CHARS_REGEX = re.compile(
+ u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]')
+
+# This is a set of module ids for the modules that disclaim key flags.
+# This module is explicitly added to this set so that we never consider it to
+# define key flag.
+disclaim_module_ids = set([id(sys.modules[__name__])])
+
+
+# Define special flags here so that help may be generated for them.
+# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module.
+# Initialized inside flagvalues.py.
+SPECIAL_FLAGS = None
+
+
+# This points to the flags module, initialized in flags/__init__.py.
+# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into
+# account.
+FLAGS_MODULE = None
+
+
+class _ModuleObjectAndName(
+ collections.namedtuple('_ModuleObjectAndName', 'module module_name')):
+ """Module object and name.
+
+ Fields:
+ - module: object, module object.
+ - module_name: str, module name.
+ """
+
+
+def get_module_object_and_name(globals_dict):
+ """Returns the module that defines a global environment, and its name.
+
+ Args:
+ globals_dict: A dictionary that should correspond to an environment
+ providing the values of the globals.
+
+ Returns:
+ _ModuleObjectAndName - pair of module object & module name.
+ Returns (None, None) if the module could not be identified.
+ """
+ name = globals_dict.get('__name__', None)
+ module = sys.modules.get(name, None)
+ # Pick a more informative name for the main module.
+ return _ModuleObjectAndName(module,
+ (sys.argv[0] if name == '__main__' else name))
+
+
+def get_calling_module_object_and_name():
+ """Returns the module that's calling into this module.
+
+ We generally use this function to get the name of the module calling a
+ DEFINE_foo... function.
+
+ Returns:
+ The module object that called into this one.
+
+ Raises:
+ AssertionError: Raised when no calling module could be identified.
+ """
+ for depth in range(1, sys.getrecursionlimit()):
+ # sys._getframe is the right thing to use here, as it's the best
+ # way to walk up the call stack.
+ globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access
+ module, module_name = get_module_object_and_name(globals_for_frame)
+ if id(module) not in disclaim_module_ids and module_name is not None:
+ return _ModuleObjectAndName(module, module_name)
+ raise AssertionError('No module was found')
+
+
+def get_calling_module():
+ """Returns the name of the module that's calling into this module."""
+ return get_calling_module_object_and_name().module_name
+
+
+def str_or_unicode(value):
+ """Converts a value to a python string.
+
+ Behavior of this function is intentionally different in Python2/3.
+
+ In Python2, the given value is attempted to convert to a str (byte string).
+ If it contains non-ASCII characters, it is converted to a unicode instead.
+
+ In Python3, the given value is always converted to a str (unicode string).
+
+ This behavior reflects the (bad) practice in Python2 to try to represent
+ a string as str as long as it contains ASCII characters only.
+
+ Args:
+ value: An object to be converted to a string.
+
+ Returns:
+ A string representation of the given value. See the description above
+ for its type.
+ """
+ try:
+ return str(value)
+ except UnicodeEncodeError:
+ return unicode(value) # Python3 should never come here
+
+
+def create_xml_dom_element(doc, name, value):
+ """Returns an XML DOM element with name and text value.
+
+ Args:
+ doc: minidom.Document, the DOM document it should create nodes from.
+ name: str, the tag of XML element.
+ value: object, whose string representation will be used
+ as the value of the XML element. Illegal or highly discouraged xml 1.0
+ characters are stripped.
+
+ Returns:
+ An instance of minidom.Element.
+ """
+ s = str_or_unicode(value)
+ if six.PY2 and not isinstance(s, unicode):
+ # Get a valid unicode string.
+ s = s.decode('utf-8', 'ignore')
+ if isinstance(value, bool):
+ # Display boolean values as the C++ flag library does: no caps.
+ s = s.lower()
+ # Remove illegal xml characters.
+ s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)
+
+ e = doc.createElement(name)
+ e.appendChild(doc.createTextNode(s))
+ return e
+
+
+def get_help_width():
+ """Returns the integer width of help lines that is used in TextWrap."""
+ if not sys.stdout.isatty() or termios is None or fcntl is None:
+ return _DEFAULT_HELP_WIDTH
+ try:
+ data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
+ columns = struct.unpack('hh', data)[1]
+ # Emacs mode returns 0.
+ # Here we assume that any value below 40 is unreasonable.
+ if columns >= _MIN_HELP_WIDTH:
+ return columns
+ # Returning an int as default is fine, int(int) just return the int.
+ return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH))
+
+ except (TypeError, IOError, struct.error):
+ return _DEFAULT_HELP_WIDTH
+
+
+def get_flag_suggestions(attempt, longopt_list):
+ """Returns helpful similar matches for an invalid flag."""
+ # Don't suggest on very short strings, or if no longopts are specified.
+ if len(attempt) <= 2 or not longopt_list:
+ return []
+
+ option_names = [v.split('=')[0] for v in longopt_list]
+
+ # Find close approximations in flag prefixes.
+ # This also handles the case where the flag is spelled right but ambiguous.
+ distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option)
+ for option in option_names]
+ distances.sort(key=lambda t: t[0])
+
+ least_errors, _ = distances[0]
+ # Don't suggest excessively bad matches.
+ if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
+ return []
+
+ suggestions = []
+ for errors, name in distances:
+ if errors == least_errors:
+ suggestions.append(name)
+ else:
+ break
+ return suggestions
+
+
+def _damerau_levenshtein(a, b):
+ """Returns Damerau-Levenshtein edit distance from a to b."""
+ memo = {}
+
+ def distance(x, y):
+ """Recursively defined string distance with memoization."""
+ if (x, y) in memo:
+ return memo[x, y]
+ if not x:
+ d = len(y)
+ elif not y:
+ d = len(x)
+ else:
+ d = min(
+ distance(x[1:], y) + 1, # correct an insertion error
+ distance(x, y[1:]) + 1, # correct a deletion error
+ distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character
+ if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:
+ # Correct a transposition.
+ t = distance(x[2:], y[2:]) + 1
+ if d > t:
+ d = t
+
+ memo[x, y] = d
+ return d
+ return distance(a, b)
+
+
+def text_wrap(text, length=None, indent='', firstline_indent=None):
+ """Wraps a given text to a maximum line length and returns it.
+
+ It turns lines that only contain whitespace into empty lines, keeps new lines,
+ and expands tabs using 4 spaces.
+
+ Args:
+ text: str, text to wrap.
+ length: int, maximum length of a line, includes indentation.
+ If this is None then use get_help_width()
+ indent: str, indent for all but first line.
+ firstline_indent: str, indent for first line; if None, fall back to indent.
+
+ Returns:
+ str, the wrapped text.
+
+ Raises:
+ ValueError: Raised if indent or firstline_indent not shorter than length.
+ """
+ # Get defaults where callee used None
+ if length is None:
+ length = get_help_width()
+ if indent is None:
+ indent = ''
+ if firstline_indent is None:
+ firstline_indent = indent
+
+ if len(indent) >= length:
+ raise ValueError('Length of indent exceeds length')
+ if len(firstline_indent) >= length:
+ raise ValueError('Length of first line indent exceeds length')
+
+ text = text.expandtabs(4)
+
+ result = []
+ # Create one wrapper for the first paragraph and one for subsequent
+ # paragraphs that does not have the initial wrapping.
+ wrapper = textwrap.TextWrapper(
+ width=length, initial_indent=firstline_indent, subsequent_indent=indent)
+ subsequent_wrapper = textwrap.TextWrapper(
+ width=length, initial_indent=indent, subsequent_indent=indent)
+
+ # textwrap does not have any special treatment for newlines. From the docs:
+ # "...newlines may appear in the middle of a line and cause strange output.
+ # For this reason, text should be split into paragraphs (using
+ # str.splitlines() or similar) which are wrapped separately."
+ for paragraph in (p.strip() for p in text.splitlines()):
+ if paragraph:
+ result.extend(wrapper.wrap(paragraph))
+ else:
+ result.append('') # Keep empty lines.
+ # Replace initial wrapper with wrapper for subsequent paragraphs.
+ wrapper = subsequent_wrapper
+
+ return '\n'.join(result)
+
+
+def flag_dict_to_args(flag_map):
+ """Convert a dict of values into process call parameters.
+
+ This method is used to convert a dictionary into a sequence of parameters
+ for a binary that parses arguments using this module.
+
+ Args:
+ flag_map: dict, a mapping where the keys are flag names (strings).
+ values are treated according to their type:
+ * If value is None, then only the name is emitted.
+ * If value is True, then only the name is emitted.
+ * If value is False, then only the name prepended with 'no' is emitted.
+ * If value is a string then --name=value is emitted.
+ * If value is a collection, this will emit --name=value1,value2,value3.
+ * Everything else is converted to string an passed as such.
+ Yields:
+ sequence of string suitable for a subprocess execution.
+ """
+ for key, value in six.iteritems(flag_map):
+ if value is None:
+ yield '--%s' % key
+ elif isinstance(value, bool):
+ if value:
+ yield '--%s' % key
+ else:
+ yield '--no%s' % key
+ elif isinstance(value, (bytes, type(u''))):
+ # We don't want strings to be handled like python collections.
+ yield '--%s=%s' % (key, value)
+ else:
+ # Now we attempt to deal with collections.
+ try:
+ yield '--%s=%s' % (key, ','.join(str(item) for item in value))
+ except TypeError:
+ # Default case.
+ yield '--%s=%s' % (key, value)
+
+
+def trim_docstring(docstring):
+ """Removes indentation from triple-quoted strings.
+
+ This is the function specified in PEP 257 to handle docstrings:
+ https://www.python.org/dev/peps/pep-0257/.
+
+ Args:
+ docstring: str, a python docstring.
+
+ Returns:
+ str, docstring with indentation removed.
+ """
+ if not docstring:
+ return ''
+
+ # If you've got a line longer than this you have other problems...
+ max_indent = 1 << 29
+
+ # Convert tabs to spaces (following the normal Python rules)
+ # and split into a list of lines:
+ lines = docstring.expandtabs().splitlines()
+
+ # Determine minimum indentation (first line doesn't count):
+ indent = max_indent
+ for line in lines[1:]:
+ stripped = line.lstrip()
+ if stripped:
+ indent = min(indent, len(line) - len(stripped))
+ # Remove indentation (first line is special):
+ trimmed = [lines[0].strip()]
+ if indent < max_indent:
+ for line in lines[1:]:
+ trimmed.append(line[indent:].rstrip())
+ # Strip off trailing and leading blank lines:
+ while trimmed and not trimmed[-1]:
+ trimmed.pop()
+ while trimmed and not trimmed[0]:
+ trimmed.pop(0)
+ # Return a single string:
+ return '\n'.join(trimmed)
+
+
+def doc_to_help(doc):
+ """Takes a __doc__ string and reformats it as help."""
+
+ # Get rid of starting and ending white space. Using lstrip() or even
+ # strip() could drop more than maximum of first line and right space
+ # of last line.
+ doc = doc.strip()
+
+ # Get rid of all empty lines.
+ whitespace_only_line = re.compile('^[ \t]+$', re.M)
+ doc = whitespace_only_line.sub('', doc)
+
+ # Cut out common space at line beginnings.
+ doc = trim_docstring(doc)
+
+ # Just like this module's comment, comments tend to be aligned somehow.
+ # In other words they all start with the same amount of white space.
+ # 1) keep double new lines;
+ # 2) keep ws after new lines if not empty line;
+ # 3) all other new lines shall be changed to a space;
+ # Solution: Match new lines between non white space and replace with space.
+ doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M)
+
+ return doc
+
+
+def is_bytes_or_string(maybe_string):
+ if str is bytes:
+ return isinstance(maybe_string, basestring)
+ else:
+ return isinstance(maybe_string, (str, bytes))
diff --git a/third_party/py/abseil/absl/flags/_validators.py b/third_party/py/abseil/absl/flags/_validators.py
new file mode 100644
index 0000000000..02b508e26f
--- /dev/null
+++ b/third_party/py/abseil/absl/flags/_validators.py
@@ -0,0 +1,424 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to enforce different constraints on flags.
+
+Flags validators can be registered using following functions / decorators:
+ flags.register_validator
+ @flags.validator
+ flags.register_multi_flags_validator
+ @flags.multi_flags_validator
+
+Three convenience functions are also provided for common flag constraints:
+ flags.mark_flag_as_required
+ flags.mark_flags_as_required
+ flags.mark_flags_as_mutual_exclusive
+
+See their docstring in this module for a usage manual.
+
+Do NOT import this module directly. Import the flags package and use the
+aliases defined at the package level instead.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import warnings
+
+from absl.flags import _exceptions
+from absl.flags import _flagvalues
+
+
+class Validator(object):
+ """Base class for flags validators.
+
+ Users should NOT overload these classes, and use flags.Register...
+ methods instead.
+ """
+
+ # Used to assign each validator an unique insertion_index
+ validators_count = 0
+
+ def __init__(self, checker, message):
+ """Constructor to create all validators.
+
+ Args:
+ checker: function to verify the constraint.
+ Input of this method varies, see SingleFlagValidator and
+ multi_flags_validator for a detailed description.
+ message: str, error message to be shown to the user.
+ """
+ self.checker = checker
+ self.message = message
+ Validator.validators_count += 1
+ # Used to assert validators in the order they were registered.
+ self.insertion_index = Validator.validators_count
+
+ def verify(self, flag_values):
+ """Verifies that constraint is satisfied.
+
+ flags library calls this method to verify Validator's constraint.
+
+ Args:
+ flag_values: flags.FlagValues, the FlagValues instance to get flags from.
+ Raises:
+ Error: Raised if constraint is not satisfied.
+ """
+ param = self._get_input_to_checker_function(flag_values)
+ if not self.checker(param):
+ raise _exceptions.ValidationError(self.message)
+
+ def get_flags_names(self):
+ """Returns the names of the flags checked by this validator.
+
+ Returns:
+ [string], names of the flags.
+ """
+ raise NotImplementedError('This method should be overloaded')
+
+ def print_flags_with_values(self, flag_values):
+ raise NotImplementedError('This method should be overloaded')
+
+ def _get_input_to_checker_function(self, flag_values):
+ """Given flag values, returns the input to be given to checker.
+
+ Args:
+ flag_values: flags.FlagValues, containing all flags.
+ Returns:
+ The input to be given to checker. The return type depends on the specific
+ validator.
+ """
+ raise NotImplementedError('This method should be overloaded')
+
+
+class SingleFlagValidator(Validator):
+ """Validator behind register_validator() method.
+
+ Validates that a single flag passes its checker function. The checker function
+ takes the flag value and returns True (if value looks fine) or, if flag value
+ is not valid, either returns False or raises an Exception.
+ """
+
+ def __init__(self, flag_name, checker, message):
+ """Constructor.
+
+ Args:
+ flag_name: string, name of the flag.
+ checker: function to verify the validator.
+ input - value of the corresponding flag (string, boolean, etc).
+ output - bool, True if validator constraint is satisfied.
+ If constraint is not satisfied, it should either return False or
+ raise flags.ValidationError(desired_error_message).
+ message: str, error message to be shown to the user if validator's
+ condition is not satisfied.
+ """
+ super(SingleFlagValidator, self).__init__(checker, message)
+ self.flag_name = flag_name
+
+ def get_flags_names(self):
+ return [self.flag_name]
+
+ def print_flags_with_values(self, flag_values):
+ return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
+
+ def _get_input_to_checker_function(self, flag_values):
+ """Given flag values, returns the input to be given to checker.
+
+ Args:
+ flag_values: flags.FlagValues, the FlagValues instance to get flags from.
+ Returns:
+ object, the input to be given to checker.
+ """
+ return flag_values[self.flag_name].value
+
+
+class MultiFlagsValidator(Validator):
+ """Validator behind register_multi_flags_validator method.
+
+ Validates that flag values pass their common checker function. The checker
+ function takes flag values and returns True (if values look fine) or,
+ if values are not valid, either returns False or raises an Exception.
+ """
+
+ def __init__(self, flag_names, checker, message):
+ """Constructor.
+
+ Args:
+ flag_names: [str], containing names of the flags used by checker.
+ checker: function to verify the validator.
+ input - dict, with keys() being flag_names, and value for each
+ key being the value of the corresponding flag (string, boolean,
+ etc).
+ output - bool, True if validator constraint is satisfied.
+ If constraint is not satisfied, it should either return False or
+ raise flags.ValidationError(desired_error_message).
+ message: str, error message to be shown to the user if validator's
+ condition is not satisfied
+ """
+ super(MultiFlagsValidator, self).__init__(checker, message)
+ self.flag_names = flag_names
+
+ def _get_input_to_checker_function(self, flag_values):
+ """Given flag values, returns the input to be given to checker.
+
+ Args:
+ flag_values: flags.FlagValues, the FlagValues instance to get flags from.
+ Returns:
+ dict, with keys() being self.lag_names, and value for each key
+ being the value of the corresponding flag (string, boolean, etc).
+ """
+ return dict([key, flag_values[key].value] for key in self.flag_names)
+
+ def print_flags_with_values(self, flag_values):
+ prefix = 'flags '
+ flags_with_values = []
+ for key in self.flag_names:
+ flags_with_values.append('%s=%s' % (key, flag_values[key].value))
+ return prefix + ', '.join(flags_with_values)
+
+ def get_flags_names(self):
+ return self.flag_names
+
+
+def register_validator(flag_name,
+ checker,
+ message='Flag validation failed',
+ flag_values=_flagvalues.FLAGS):
+ """Adds a constraint, which will be enforced during program execution.
+
+ The constraint is validated when flags are initially parsed, and after each
+ change of the corresponding flag's value.
+ Args:
+ flag_name: str, name of the flag to be checked.
+ checker: callable, a function to validate the flag.
+ input - A single positional argument: The value of the corresponding
+ flag (string, boolean, etc. This value will be passed to checker
+ by the library).
+ output - bool, True if validator constraint is satisfied.
+ If constraint is not satisfied, it should either return False or
+ raise flags.ValidationError(desired_error_message).
+ message: str, error text to be shown to the user if checker returns False.
+ If checker raises flags.ValidationError, message from the raised
+ error will be shown.
+ flag_values: flags.FlagValues, optional FlagValues instance to validate
+ against.
+ Raises:
+ AttributeError: Raised when flag_name is not registered as a valid flag
+ name.
+ """
+ v = SingleFlagValidator(flag_name, checker, message)
+ _add_validator(flag_values, v)
+
+
+def validator(flag_name, message='Flag validation failed',
+ flag_values=_flagvalues.FLAGS):
+ """A function decorator for defining a flag validator.
+
+ Registers the decorated function as a validator for flag_name, e.g.
+
+ @flags.validator('foo')
+ def _CheckFoo(foo):
+ ...
+
+ See register_validator() for the specification of checker function.
+
+ Args:
+ flag_name: str, name of the flag to be checked.
+ message: str, error text to be shown to the user if checker returns False.
+ If checker raises flags.ValidationError, message from the raised
+ error will be shown.
+ flag_values: flags.FlagValues, optional FlagValues instance to validate
+ against.
+ Returns:
+ A function decorator that registers its function argument as a validator.
+ Raises:
+ AttributeError: Raised when flag_name is not registered as a valid flag
+ name.
+ """
+
+ def decorate(function):
+ register_validator(flag_name, function,
+ message=message,
+ flag_values=flag_values)
+ return function
+ return decorate
+
+
+def register_multi_flags_validator(flag_names,
+ multi_flags_checker,
+ message='Flags validation failed',
+ flag_values=_flagvalues.FLAGS):
+ """Adds a constraint to multiple flags.
+
+ The constraint is validated when flags are initially parsed, and after each
+ change of the corresponding flag's value.
+
+ Args:
+ flag_names: [str], a list of the flag names to be checked.
+ multi_flags_checker: callable, a function to validate the flag.
+ input - dict, with keys() being flag_names, and value for each key
+ being the value of the corresponding flag (string, boolean, etc).
+ output - bool, True if validator constraint is satisfied.
+ If constraint is not satisfied, it should either return False or
+ raise flags.ValidationError.
+ message: str, error text to be shown to the user if checker returns False.
+ If checker raises flags.ValidationError, message from the raised
+ error will be shown.
+ flag_values: flags.FlagValues, optional FlagValues instance to validate
+ against.
+
+ Raises:
+ AttributeError: Raised when a flag is not registered as a valid flag name.
+ """
+ v = MultiFlagsValidator(
+ flag_names, multi_flags_checker, message)
+ _add_validator(flag_values, v)
+
+
+def multi_flags_validator(flag_names,
+ message='Flag validation failed',
+ flag_values=_flagvalues.FLAGS):
+ """A function decorator for defining a multi-flag validator.
+
+ Registers the decorated function as a validator for flag_names, e.g.
+
+ @flags.multi_flags_validator(['foo', 'bar'])
+ def _CheckFooBar(flags_dict):
+ ...
+
+ See register_multi_flags_validator() for the specification of checker
+ function.
+
+ Args:
+ flag_names: [str], a list of the flag names to be checked.
+ message: str, error text to be shown to the user if checker returns False.
+ If checker raises flags.ValidationError, message from the raised
+ error will be shown.
+ flag_values: flags.FlagValues, optional FlagValues instance to validate
+ against.
+
+ Returns:
+ A function decorator that registers its function argument as a validator.
+
+ Raises:
+ AttributeError: Raised when a flag is not registered as a valid flag name.
+ """
+
+ def decorate(function):
+ register_multi_flags_validator(flag_names,
+ function,
+ message=message,
+ flag_values=flag_values)
+ return function
+
+ return decorate
+
+
+def mark_flag_as_required(flag_name, flag_values=_flagvalues.FLAGS):
+ """Ensures that flag is not None during program execution.
+
+ Registers a flag validator, which will follow usual validator rules.
+ Important note: validator will pass for any non-None value, such as False,
+ 0 (zero), '' (empty string) and so on.
+
+ It is recommended to call this method like this:
+
+ if __name__ == '__main__':
+ flags.mark_flag_as_required('your_flag_name')
+ app.run()
+
+ Because validation happens at app.run() we want to ensure required-ness
+ is enforced at that time. However, you generally do not want to force
+ users who import your code to have additional required flags for their
+ own binaries or tests.
+
+ Args:
+ flag_name: str, name of the flag
+ flag_values: flags.FlagValues, optional FlagValues instance where the flag
+ is defined.
+ Raises:
+ AttributeError: Raised when flag_name is not registered as a valid flag
+ name.
+ """
+ if flag_values[flag_name].default is not None:
+ warnings.warn(
+ 'Flag --%s has a non-None default value; therefore, '
+ 'mark_flag_as_required will pass even if flag is not specified in the '
+ 'command line!' % flag_name)
+ register_validator(flag_name,
+ lambda value: value is not None,
+ message='Flag --%s must be specified.' % flag_name,
+ flag_values=flag_values)
+
+
+def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS):
+ """Ensures that flags are not None during program execution.
+
+ Recommended usage:
+
+ if __name__ == '__main__':
+ flags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])
+ app.run()
+
+ Args:
+ flag_names: Sequence[str], names of the flags.
+ flag_values: flags.FlagValues, optional FlagValues instance where the flags
+ are defined.
+ Raises:
+ AttributeError: If any of flag name has not already been defined as a flag.
+ """
+ for flag_name in flag_names:
+ mark_flag_as_required(flag_name, flag_values)
+
+
+def mark_flags_as_mutual_exclusive(flag_names, required=False,
+ flag_values=_flagvalues.FLAGS):
+ """Ensures that only one flag among flag_names is set.
+
+ Important note: validator will pass for any non-None value, such as False,
+ 0 (zero), '' (empty string) and so on. For multi flags, this means that the
+ default needs to be None not [].
+
+ Args:
+ flag_names: [str], names of the flags.
+ required: bool, if set, exactly one of the flags must be set.
+ Otherwise, it is also valid for none of the flags to be set.
+ flag_values: flags.FlagValues, optional FlagValues instance where the flags
+ are defined.
+ """
+
+ def validate_mutual_exclusion(flags_dict):
+ flag_count = sum(1 for val in flags_dict.values() if val is not None)
+ if flag_count == 1 or (not required and flag_count == 0):
+ return True
+ message = ('%s one of (%s) must be specified.' %
+ ('Exactly' if required else 'At most', ', '.join(flag_names)))
+ raise _exceptions.ValidationError(message)
+
+ register_multi_flags_validator(
+ flag_names, validate_mutual_exclusion, flag_values=flag_values)
+
+
+def _add_validator(fv, validator_instance):
+ """Register new flags validator to be checked.
+
+ Args:
+ fv: flags.FlagValues, the FlagValues instance to add the validator.
+ validator_instance: validators.Validator, the validator to add.
+ Raises:
+ KeyError: Raised when validators work with a non-existing flag.
+ """
+ for flag_name in validator_instance.get_flags_names():
+ fv[flag_name].validators.append(validator_instance)
diff --git a/third_party/py/abseil/absl/logging/__init__.py b/third_party/py/abseil/absl/logging/__init__.py
new file mode 100644
index 0000000000..551cf43aa3
--- /dev/null
+++ b/third_party/py/abseil/absl/logging/__init__.py
@@ -0,0 +1,978 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Abseil Python logging module implemented on top of standard logging.
+
+Simple usage:
+
+ from absl import logging
+
+ logging.info('Interesting Stuff')
+ logging.info('Interesting Stuff with Arguments: %d', 42)
+
+ logging.set_verbosity(logging.INFO)
+ logging.log(logging.DEBUG, 'This will *not* be printed')
+ logging.set_verbosity(logging.DEBUG)
+ logging.log(logging.DEBUG, 'This will be printed')
+
+ logging.warn('Worrying Stuff')
+ logging.error('Alarming Stuff')
+ logging.fatal('AAAAHHHHH!!!!') # Process exits.
+
+Usage note: Do not pre-format the strings in your program code.
+Instead, let the logging module perform argument interpolation.
+This saves cycles because strings that don't need to be printed
+are never formatted. Note that this module does not attempt to
+interpolate arguments when no arguments are given. In other words
+
+ logging.info('Interesting Stuff: %s')
+
+does not raise an exception because logging.info() has only one
+argument, the message string.
+
+"Lazy" evaluation for debugging:
+
+If you do something like this:
+ logging.debug('Thing: %s', thing.ExpensiveOp())
+then the ExpensiveOp will be evaluated even if nothing
+is printed to the log. To avoid this, use the level_debug() function:
+ if logging.level_debug():
+ logging.debug('Thing: %s', thing.ExpensiveOp())
+
+Notes on Unicode:
+
+The log output is encoded as UTF-8. Don't pass data in other encodings in
+bytes() instances -- instead pass unicode string instances when you need to
+(for both the format string and arguments).
+
+Note on critical and fatal:
+Standard logging module defines fatal as an alias to critical, but it's not
+documented, and it does NOT actually terminate the program.
+This module only defines fatal but not critical, and it DOES terminate the
+program.
+
+The differences in behavior are historical and unfortunate.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import getpass
+import io
+import itertools
+import logging
+import os
+import socket
+import struct
+import sys
+import time
+import traceback
+
+from absl import flags
+from absl.logging import converter
+import six
+
+if six.PY2:
+ import thread as _thread_lib # For .get_ident().
+else:
+ import threading as _thread_lib # For .get_ident().
+
+
+FLAGS = flags.FLAGS
+
+
+# Logging levels.
+FATAL = converter.ABSL_FATAL
+ERROR = converter.ABSL_ERROR
+WARN = converter.ABSL_WARN
+WARNING = converter.ABSL_WARN
+INFO = converter.ABSL_INFO
+DEBUG = converter.ABSL_DEBUG
+
+# Regex to match/parse log line prefixes.
+ABSL_LOGGING_PREFIX_REGEX = (
+ r'^(?P<severity>[IWEF])'
+ r'(?P<month>\d\d)(?P<day>\d\d) '
+ r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
+ r'\.(?P<microsecond>\d\d\d\d\d\d) +'
+ r'(?P<thread_id>-?\d+) '
+ r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
+
+
+# Mask to convert integer thread ids to unsigned quantities for logging purposes
+_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
+
+# Extra property set on the LogRecord created by ABSLLogger when its level is
+# CRITICAL/FATAL.
+_ABSL_LOG_FATAL = '_absl_log_fatal'
+# Extra prefix added to the log message when a non-absl logger logs a
+# CRITICAL/FATAL message.
+_CRITICAL_PREFIX = 'CRITICAL - '
+
+# Used by findCaller to skip callers from */logging/__init__.py.
+_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
+
+# The ABSL logger instance, initialized in _initialize().
+_absl_logger = None
+# The ABSL handler instance, initialized in _initialize().
+_absl_handler = None
+
+
+
+_CPP_NAME_TO_LEVELS = {
+ 'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
+ 'info': '0',
+ 'warning': '1',
+ 'warn': '1',
+ 'error': '2',
+ 'fatal': '3'
+}
+
+_CPP_LEVEL_TO_NAMES = {
+ '0': 'info',
+ '1': 'warn',
+ '2': 'error',
+ '3': 'fatal',
+}
+
+
+class _VerbosityFlag(flags.Flag):
+ """Flag class for -v/--verbosity."""
+
+ def __init__(self, *args, **kwargs):
+ super(_VerbosityFlag, self).__init__(
+ flags.IntegerParser(),
+ flags.ArgumentSerializer(),
+ *args, **kwargs)
+
+ @property
+ def value(self):
+ return self._value
+
+ @value.setter
+ def value(self, v):
+ self._value = v
+ self._update_logging_levels()
+
+ def _update_logging_levels(self):
+ """Updates absl logging levels to the current verbosity."""
+ if not _absl_logger:
+ return
+
+ if self._value <= converter.ABSL_DEBUG:
+ standard_verbosity = converter.absl_to_standard(self._value)
+ else:
+ # --verbosity is set to higher than 1 for vlog.
+ standard_verbosity = logging.DEBUG - (self._value - 1)
+
+ # Also update root level when absl_handler is used.
+ if _absl_handler in logging.root.handlers:
+ logging.root.setLevel(standard_verbosity)
+
+
+class _StderrthresholdFlag(flags.Flag):
+ """Flag class for --stderrthreshold."""
+
+ def __init__(self, *args, **kwargs):
+ super(_StderrthresholdFlag, self).__init__(
+ flags.ArgumentParser(),
+ flags.ArgumentSerializer(),
+ *args, **kwargs)
+
+ @property
+ def value(self):
+ return self._value
+
+ @value.setter
+ def value(self, v):
+ if v in _CPP_LEVEL_TO_NAMES:
+ # --stderrthreshold also accepts numberic strings whose values are
+ # Abseil C++ log levels.
+ cpp_value = int(v)
+ v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
+ elif v.lower() in _CPP_NAME_TO_LEVELS:
+ v = v.lower()
+ cpp_value = int(_CPP_NAME_TO_LEVELS[v])
+ else:
+ raise ValueError(
+ '--stderrthreshold must be one of (case-insensitive) '
+ "'debug', 'info', 'warn', 'warning', 'error', 'fatal', "
+ "or '0', '1', '2', '3', not '%s'" % v)
+
+ self._value = v
+
+
+
+
+flags.DEFINE_boolean('logtostderr',
+ False,
+ 'Should only log to stderr?', allow_override_cpp=True)
+flags.DEFINE_boolean('alsologtostderr',
+ False,
+ 'also log to stderr?', allow_override_cpp=True)
+flags.DEFINE_string('log_dir',
+ os.getenv('TEST_TMPDIR', ''),
+ 'directory to write logfiles into',
+ allow_override_cpp=True)
+flags.DEFINE_flag(_VerbosityFlag(
+ 'verbosity', -1,
+ 'Logging verbosity level. Messages logged at this level or lower will '
+ 'be included. Set to 1 for debug logging. If the flag was not set or '
+ 'supplied, the value will be changed from the default of -1 (warning) to '
+ '0 (info) after flags are parsed.',
+ short_name='v', allow_hide_cpp=True))
+flags.DEFINE_flag(_StderrthresholdFlag(
+ 'stderrthreshold', 'fatal',
+ 'log messages at this level, or more severe, to stderr in'
+ ' addition to the logfile. Possible values are '
+ "'debug', 'info', 'warn', 'error', and 'fatal'. "
+ 'Obsoletes --alsologtostderr. Using --alsologtostderr '
+ 'cancels the effect of this flag. Please also note that '
+ 'this flag is subject to --verbosity and requires logfile'
+ ' not be stderr.', allow_hide_cpp=True))
+flags.DEFINE_boolean('showprefixforinfo', True,
+ 'If False, do not prepend prefix to info messages '
+ 'when it\'s logged to stderr, '
+ '--verbosity is set to INFO level, '
+ 'and python logging is used.')
+
+
+def get_verbosity():
+ """Returns the logging verbosity."""
+ return FLAGS['verbosity'].value
+
+
+def set_verbosity(v):
+ """Sets the logging verbosity.
+
+ Causes all messages of level <= v to be logged,
+ and all messages of level > v to be silently discarded.
+
+ Args:
+ v: int|str, the verbosity level as an integer or string. Legal string values
+ are those that can be coerced to an integer as well as case-insensitive
+ 'debug', 'info', 'warn', 'error', and 'fatal'.
+ """
+ try:
+ new_level = int(v)
+ except ValueError:
+ new_level = converter.ABSL_NAMES[v.upper()]
+ FLAGS.verbosity = new_level
+
+
+def set_stderrthreshold(s):
+ """Sets the stderr threshold to the value passed in.
+
+ Args:
+ s: str|int, valid strings values are case-insensitive 'debug',
+ 'info', 'warn', 'error', and 'fatal'; valid integer values are
+ logging.DEBUG|INFO|WARN|ERROR|FATAL.
+
+ Raises:
+ ValueError: Raised when s is an invalid value.
+ """
+ if s in converter.ABSL_LEVELS:
+ FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
+ elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
+ FLAGS.stderrthreshold = s
+ else:
+ raise ValueError(
+ 'set_stderrthreshold only accepts integer absl logging level '
+ 'from -3 to 1, or case-insensitive string values '
+ "'debug', 'info', 'warn', 'error', and 'fatal'. "
+ 'But found "{}" ({}).'.format(s, type(s)))
+
+
+def fatal(msg, *args, **kwargs):
+ """Logs a fatal message."""
+ log(FATAL, msg, *args, **kwargs)
+
+
+def error(msg, *args, **kwargs):
+ """Logs an error message."""
+ log(ERROR, msg, *args, **kwargs)
+
+
+def warning(msg, *args, **kwargs):
+ """Logs a warning message."""
+ log(WARN, msg, *args, **kwargs)
+
+
+warn = warning
+
+
+def info(msg, *args, **kwargs):
+ """Logs an info message."""
+ log(INFO, msg, *args, **kwargs)
+
+
+def debug(msg, *args, **kwargs):
+ """Logs a debug message."""
+ log(DEBUG, msg, *args, **kwargs)
+
+
+def exception(msg, *args):
+ """Logs an exception, with traceback and message."""
+ error(msg, *args, exc_info=True)
+
+
+# Counter to keep track of number of log entries per token.
+_log_counter_per_token = {}
+
+
+def _get_next_log_count_per_token(token):
+ """Wrapper for _log_counter_per_token. Thread-safe.
+
+ Args:
+ token: The token for which to look up the count.
+
+ Returns:
+ The number of times this function has been called with
+ *token* as an argument (starting at 0).
+ """
+ # Can't use a defaultdict because defaultdict isn't atomic, whereas
+ # setdefault is.
+ return next(_log_counter_per_token.setdefault(token, itertools.count()))
+
+
+def log_every_n(level, msg, n, *args):
+ """Logs 'msg % args' at level 'level' once per 'n' times.
+
+ Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
+ Not threadsafe.
+
+ Args:
+ level: int, the absl logging level at which to log.
+ msg: str, the message to be logged.
+ n: int, the number of times this should be called before it is logged.
+ *args: The args to be substitued into the msg.
+ """
+ count = _get_next_log_count_per_token(get_absl_logger().findCaller())
+ log_if(level, msg, not (count % n), *args)
+
+
+def log_first_n(level, msg, n, *args):
+ """Logs 'msg % args' at level 'level' only first 'n' times.
+
+ Not threadsafe.
+
+ Args:
+ level: int, the absl logging level at which to log.
+ msg: str, the message to be logged.
+ n: int, the maximal number of times the message is logged.
+ *args: The args to be substitued into the msg.
+ """
+ count = _get_next_log_count_per_token(get_absl_logger().findCaller())
+ log_if(level, msg, count < n, *args)
+
+
+def log_if(level, msg, condition, *args):
+ """Logs 'msg % args' at level 'level' only if condition is fulfilled."""
+ if condition:
+ log(level, msg, *args)
+
+
+def log(level, msg, *args, **kwargs):
+ """Logs 'msg % args' at absl logging level 'level'.
+
+ If no args are given just print msg, ignoring any interpolation specifiers.
+
+ Args:
+ level: int, the absl logging level at which to log the message
+ (logging.DEBUG|INFO|WARN|ERROR|FATAL). While some C++ verbose logging
+ level constants are also supported, callers should prefer explicit
+ logging.vlog() calls for such purpose.
+
+ msg: str, the message to be logged.
+ *args: The args to be substitued into the msg.
+ **kwargs: May contain exc_info to add exception traceback to message.
+ """
+ if level > converter.ABSL_DEBUG:
+ # Even though this function supports level that is greater than 1, users
+ # should use logging.vlog instead for such cases.
+ # Treat this as vlog, 1 is equivalent to DEBUG.
+ standard_level = converter.STANDARD_DEBUG - (level - 1)
+ else:
+ if level < converter.ABSL_FATAL:
+ level = converter.ABSL_FATAL
+ standard_level = converter.absl_to_standard(level)
+
+ _absl_logger.log(standard_level, msg, *args, **kwargs)
+
+
+def vlog(level, msg, *args, **kwargs):
+ """Log 'msg % args' at C++ vlog level 'level'.
+
+ Args:
+ level: int, the C++ verbose logging level at which to log the message,
+ e.g. 1, 2, 3, 4... While absl level constants are also supported,
+ callers should prefer logging.log|debug|info|... calls for such purpose.
+ msg: str, the message to be logged.
+ *args: The args to be substitued into the msg.
+ **kwargs: May contain exc_info to add exception traceback to message.
+ """
+ log(level, msg, *args, **kwargs)
+
+
+def flush():
+ """Flushes all log files."""
+ get_absl_handler().flush()
+
+
+def level_debug():
+ """Returns True if debug logging is turned on."""
+ return get_verbosity() >= DEBUG
+
+
+def level_info():
+ """Returns True if info logging is turned on."""
+ return get_verbosity() >= INFO
+
+
+def level_warn():
+ """Returns True if warning logging is turned on."""
+ return get_verbosity() >= WARN
+
+
+def level_error():
+ """Returns True if error logging is turned on."""
+ return get_verbosity() >= ERROR
+
+
+def find_log_dir_and_names(program_name=None, log_dir=None):
+ """Computes the directory and filename prefix for log file.
+
+ Args:
+ program_name: str|None, the filename part of the path to the program that
+ is running without its extension. e.g: if your program is called
+ 'usr/bin/foobar.py' this method should probably be called with
+ program_name='foobar' However, this is just a convention, you can
+ pass in any string you want, and it will be used as part of the
+ log filename. If you don't pass in anything, the default behavior
+ is as described in the example. In python standard logging mode,
+ the program_name will be prepended with py_ if it is the program_name
+ argument is omitted.
+ log_dir: str|None, the desired log directory.
+
+ Returns:
+ (log_dir, file_prefix, symlink_prefix)
+ """
+ if not program_name:
+ # Strip the extension (foobar.par becomes foobar, and
+ # fubar.py becomes fubar). We do this so that the log
+ # file names are similar to C++ log file names.
+ program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+
+ # Prepend py_ to files so that python code gets a unique file, and
+ # so that C++ libraries do not try to write to the same log files as us.
+ program_name = 'py_%s' % program_name
+
+ actual_log_dir = find_log_dir(log_dir=log_dir)
+
+ username = getpass.getuser()
+ hostname = socket.gethostname()
+ file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
+
+ return actual_log_dir, file_prefix, program_name
+
+
+def find_log_dir(log_dir=None):
+ """Returns the most suitable directory to put log files into.
+
+ Args:
+ log_dir: str|None, if specified, the logfile(s) will be created in that
+ directory. Otherwise if the --log_dir command-line flag is provided,
+ the logfile will be created in that directory. Otherwise the logfile
+ will be created in a standard location.
+ """
+ # Get a list of possible log dirs (will try to use them in order).
+ if log_dir:
+ # log_dir was explicitly specified as an arg, so use it and it alone.
+ dirs = [log_dir]
+ elif FLAGS['log_dir'].value:
+ # log_dir flag was provided, so use it and it alone (this mimics the
+ # behavior of the same flag in logging.cc).
+ dirs = [FLAGS['log_dir'].value]
+ else:
+ dirs = ['/tmp/', './']
+
+ # Find the first usable log dir.
+ for d in dirs:
+ if os.path.isdir(d) and os.access(d, os.W_OK):
+ return d
+ _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs)
+
+
+def get_absl_log_prefix(record):
+ """Returns the absl log prefix for the log record.
+
+ Args:
+ record: logging.LogRecord, the record to get prefix for.
+ """
+ created_tuple = time.localtime(record.created)
+ created_microsecond = int(record.created % 1.0 * 1e6)
+
+ critical_prefix = ''
+ level = record.levelno
+ if _is_non_absl_fatal_record(record):
+ # When the level is FATAL, but not logged from absl, lower the level so
+ # it's treated as ERROR.
+ level = logging.ERROR
+ critical_prefix = _CRITICAL_PREFIX
+ severity = converter.get_initial_for_level(level)
+
+ return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
+ severity,
+ created_tuple.tm_mon,
+ created_tuple.tm_mday,
+ created_tuple.tm_hour,
+ created_tuple.tm_min,
+ created_tuple.tm_sec,
+ created_microsecond,
+ _get_thread_id(),
+ record.filename,
+ record.lineno,
+ critical_prefix)
+
+
+def _is_non_absl_fatal_record(log_record):
+ return (log_record.levelno >= logging.FATAL and
+ not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
+
+
+def _is_absl_fatal_record(log_record):
+ return (log_record.levelno >= logging.FATAL and
+ log_record.__dict__.get(_ABSL_LOG_FATAL, False))
+
+
+# Indicates if we still need to warn about pre-init logs going to stderr.
+_warn_preinit_stderr = True
+
+
+class PythonHandler(logging.StreamHandler):
+ """The handler class used by Abseil Python logging implementation."""
+
+ def __init__(self, stream=None, formatter=None):
+ super(PythonHandler, self).__init__(stream)
+ self.setFormatter(formatter or PythonFormatter())
+
+ def start_logging_to_file(self, program_name=None, log_dir=None):
+ """Starts logging messages to files instead of standard error."""
+ FLAGS.logtostderr = 0
+
+ actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
+ program_name=program_name, log_dir=log_dir)
+
+ basename = '%s.INFO.%s.%d' % (
+ file_prefix,
+ time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
+ os.getpid())
+ filename = os.path.join(actual_log_dir, basename)
+
+ if six.PY2:
+ self.stream = open(filename, 'a')
+ else:
+ self.stream = open(filename, 'a', encoding='utf-8')
+
+ # os.symlink is not available on Windows Python 2.
+ if getattr(os, 'symlink', None):
+ # Create a symlink to the log file with a canonical name.
+ symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
+ try:
+ if os.path.islink(symlink):
+ os.unlink(symlink)
+ os.symlink(os.path.basename(filename), symlink)
+ except EnvironmentError:
+ # If it fails, we're sad but it's no error. Commonly, this
+ # fails because the symlink was created by another user and so
+ # we can't modify it
+ pass
+
+ def use_absl_log_file(self, program_name=None, log_dir=None):
+ """Conditionally logs to files, based on --logtostderr."""
+ if FLAGS['logtostderr'].value:
+ self.stream = sys.stderr
+ else:
+ self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
+
+ def flush(self):
+ """Flushes all log files."""
+ self.acquire()
+ try:
+ self.stream.flush()
+ except (EnvironmentError, ValueError):
+ # A ValueError is thrown if we try to flush a closed file.
+ pass
+ finally:
+ self.release()
+
+ def _log_to_stderr(self, record):
+ """Emits the record to stderr.
+
+ This temporarily sets the handler stream to stderr, calls
+ StreamHandler.emit, then reverts the stream back.
+
+ Args:
+ record: logging.LogRecord, the record to log.
+ """
+ # emit() is protected by a lock in logging.Handler, so we don't need to
+ # protect here again.
+ old_stream = self.stream
+ self.stream = sys.stderr
+ try:
+ super(PythonHandler, self).emit(record)
+ finally:
+ self.stream = old_stream
+
+ def emit(self, record):
+ """Prints a record out to some streams.
+
+ If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
+ If FLAGS.alsologtostderr is set, it will print to sys.stderr.
+ If FLAGS.logtostderr is not set, it will log to the stream
+ associated with the current thread.
+
+ Args:
+ record: logging.LogRecord, the record to emit.
+ """
+ # People occasionally call logging functions at import time before
+ # our flags may have even been defined yet, let alone even parsed, as we
+ # rely on the C++ side to define some flags for us and app init to
+ # deal with parsing. Match the C++ library behavior of notify and emit
+ # such messages to stderr. It encourages people to clean-up and does
+ # not hide the message.
+ level = record.levelno
+ if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
+ global _warn_preinit_stderr
+ if _warn_preinit_stderr:
+ sys.stderr.write(
+ 'WARNING: Logging before flag parsing goes to stderr.\n')
+ _warn_preinit_stderr = False
+ self._log_to_stderr(record)
+ elif FLAGS['logtostderr'].value:
+ self._log_to_stderr(record)
+ else:
+ super(PythonHandler, self).emit(record)
+ stderr_threshold = converter.string_to_standard(
+ FLAGS['stderrthreshold'].value)
+ if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
+ self.stream != sys.stderr):
+ self._log_to_stderr(record)
+ # Die when the record is created from ABSLLogger and level is FATAL.
+ if _is_absl_fatal_record(record):
+ self.flush() # Flush the log before dying.
+
+ # In threaded python, sys.exit() from a non-main thread only
+ # exits the thread in question.
+ os.abort()
+
+ def close(self):
+ """Closes the stream to which we are writing."""
+ self.acquire()
+ try:
+ self.flush()
+ try:
+ # Do not close the stream if it's sys.stderr|stdout. They may be
+ # redirected or overridden to files, which should be managed by users
+ # explicitly.
+ if self.stream not in (sys.stderr, sys.stdout) and (
+ not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
+ self.stream.close()
+ except ValueError:
+ # A ValueError is thrown if we try to run isatty() on a closed file.
+ pass
+ super(PythonHandler, self).close()
+ finally:
+ self.release()
+
+
+class ABSLHandler(logging.Handler):
+ """Abseil Python logging module's log handler."""
+
+ def __init__(self, python_logging_formatter):
+ super(ABSLHandler, self).__init__()
+
+ self._python_handler = PythonHandler(formatter=python_logging_formatter)
+ self.activate_python_handler()
+
+ def format(self, record):
+ return self._current_handler.format(record)
+
+ def setFormatter(self, fmt):
+ self._current_handler.setFormatter(fmt)
+
+ def emit(self, record):
+ self._current_handler.emit(record)
+
+ def flush(self):
+ self._current_handler.flush()
+
+ def close(self):
+ super(ABSLHandler, self).close()
+ self._current_handler.close()
+
+ @property
+ def python_handler(self):
+ return self._python_handler
+
+ def activate_python_handler(self):
+ """Uses the Python logging handler as the current logging handler."""
+ self._current_handler = self._python_handler
+
+ def use_absl_log_file(self, program_name=None, log_dir=None):
+ self._current_handler.use_absl_log_file(program_name, log_dir)
+
+ def start_logging_to_file(self, program_name=None, log_dir=None):
+ self._current_handler.start_logging_to_file(program_name, log_dir)
+
+
+class PythonFormatter(logging.Formatter):
+ """Formatter class used by PythonHandler."""
+
+ def format(self, record):
+ """Appends the message from the record to the results of the prefix.
+
+ Args:
+ record: logging.LogRecord, the record to be formatted.
+
+ Returns:
+ The formatted string representing the record.
+ """
+ if (not FLAGS['showprefixforinfo'].value and
+ FLAGS['verbosity'].value == converter.ABSL_INFO and
+ record.levelno == logging.INFO and
+ _absl_handler.python_handler.stream == sys.stderr):
+ prefix = ''
+ else:
+ prefix = get_absl_log_prefix(record)
+ return prefix + super(PythonFormatter, self).format(record)
+
+
+class ABSLLogger(logging.getLoggerClass()):
+ """A logger that will create LogRecords while skipping some stack frames.
+
+ This class maintains an internal list of filenames and method names
+ for use when determining who called the currently execuing stack
+ frame. Any method names from specific source files are skipped when
+ walking backwards through the stack.
+
+ Client code should use the register_frame_to_skip method to let the
+ ABSLLogger know which method from which file should be
+ excluded from the walk backwards through the stack.
+ """
+ _frames_to_skip = set()
+
+ def findCaller(self, stack_info=False):
+ """Finds the frame of the calling method on the stack.
+
+ This method skips any frames registered with the
+ ABSLLogger and any methods from this file, and whatever
+ method is currently being used to generate the prefix for the log
+ line. Then it returns the file name, line nubmer, and method name
+ of the calling method.
+
+ Args:
+ stack_info: bool, when using Python 3 and True, include the stack trace as
+ the fourth item returned instead of None.
+
+ Returns:
+ (filename, lineno, methodname[, sinfo]) of the calling method.
+ """
+ f_to_skip = ABSLLogger._frames_to_skip
+ frame = logging.currentframe()
+
+ while frame:
+ code = frame.f_code
+ if (_LOGGING_FILE_PREFIX not in code.co_filename and
+ (code.co_filename, code.co_name) not in f_to_skip):
+ if six.PY2:
+ return (code.co_filename, frame.f_lineno, code.co_name)
+ else:
+ sinfo = None
+ if stack_info:
+ out = io.StringIO()
+ out.write('Stack (most recent call last):\n')
+ traceback.print_stack(frame, file=out)
+ sinfo = out.getvalue().rstrip('\n')
+ out.close()
+ return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
+ frame = frame.f_back
+
+ def critical(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'CRITICAL'."""
+ self.log(logging.CRITICAL, msg, *args, **kwargs)
+
+ def fatal(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'FATAL'."""
+ self.log(logging.FATAL, msg, *args, **kwargs)
+
+ def error(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'ERROR'."""
+ self.log(logging.ERROR, msg, *args, **kwargs)
+
+ def warn(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'WARN'."""
+ self.log(logging.WARN, msg, *args, **kwargs)
+
+ def warning(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'WARNING'."""
+ self.log(logging.WARNING, msg, *args, **kwargs)
+
+ def info(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'INFO'."""
+ self.log(logging.INFO, msg, *args, **kwargs)
+
+ def debug(self, msg, *args, **kwargs):
+ """Logs 'msg % args' with severity 'DEBUG'."""
+ self.log(logging.DEBUG, msg, *args, **kwargs)
+
+ def log(self, level, msg, *args, **kwargs):
+ """Logs a message at a cetain level substituting in the supplied arguments.
+
+ This method behaves differently in python and c++ modes.
+
+ Args:
+ level: int, the standard logging level at which to log the message.
+ msg: str, the text of the message to log.
+ *args: The arguments to substitute in the message.
+ **kwargs: The keyword arguments to substitute in the message.
+ """
+ if level >= logging.FATAL:
+ # Add property to the LogRecord created by this logger.
+ # This will be used by the ABSLHandler to determine whether it should
+ # treat CRITICAL/FATAL logs as really FATAL.
+ extra = kwargs.setdefault('extra', {})
+ extra[_ABSL_LOG_FATAL] = True
+ super(ABSLLogger, self).log(level, msg, *args, **kwargs)
+
+ def handle(self, record):
+ """Calls handlers without checking Logger.disabled.
+
+ Non-root loggers are set to disabled after setup with logging.config if
+ it's not explicitly specified. Historically, absl logging will not be
+ disabled by that. To maintaining this behavior, this function skips
+ checking the Logger.disabled bit.
+
+ This logger can still be disabled by adding a filter that filters out
+ everything.
+
+ Args:
+ record: logging.LogRecord, the record to handle.
+ """
+ if self.filter(record):
+ self.callHandlers(record)
+
+ @classmethod
+ def register_frame_to_skip(cls, file_name, function_name):
+ """Registers a function name to skip when walking the stack.
+
+ The ABSLLogger sometimes skips method calls on the stack
+ to make the log messages meaningful in their appropriate context.
+ This method registers a function from a particluar file as one
+ which should be skipped.
+
+ Args:
+ file_name: str, the name of the file that contains the function.
+ function_name: str, the name of the function to skip.
+ """
+ cls._frames_to_skip.add((file_name, function_name))
+
+
+def _get_thread_id():
+ """Gets id of current thread, suitable for logging as an unsigned quantity.
+
+ If pywrapbase is linked, returns GetTID() for the thread ID to be
+ consistent with C++ logging. Otherwise, returns the numeric thread id.
+ The quantities are made unsigned by masking with 2*sys.maxint + 1.
+
+ Returns:
+ Thread ID unique to this process (unsigned)
+ """
+ thread_id = _thread_lib.get_ident()
+ return thread_id & _THREAD_ID_MASK
+
+
+def get_absl_logger():
+ """Returns the absl logger instance."""
+ return _absl_logger
+
+
+def get_absl_handler():
+ """Returns the absl handler instance."""
+ return _absl_handler
+
+
+def use_python_logging(quiet=False):
+ """Uses the python implementation of the logging code.
+
+ Args:
+ quiet: No logging message about switching logging type.
+ """
+ get_absl_handler().activate_python_handler()
+ if not quiet:
+ info('Restoring pure python logging')
+
+
+def use_absl_handler():
+ """Uses the ABSL logging handler for logging if not yet configured.
+
+ The absl handler is already attached to root if there are no other handlers
+ attached when importing this module.
+
+ Otherwise, this method is called in app.run() so absl handler is used.
+ """
+ absl_handler = get_absl_handler()
+ if absl_handler not in logging.root.handlers:
+ logging.root.addHandler(absl_handler)
+ FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
+
+
+def _initialize():
+ """Initializes loggers and handlers."""
+ global _absl_logger, _absl_handler
+
+ if _absl_logger:
+ return
+
+ original_logger_class = logging.getLoggerClass()
+ logging.setLoggerClass(ABSLLogger)
+ _absl_logger = logging.getLogger('absl')
+ logging.setLoggerClass(original_logger_class)
+
+ python_logging_formatter = PythonFormatter()
+ _absl_handler = ABSLHandler(python_logging_formatter)
+
+ # The absl handler logs to stderr by default. To prevent double logging to
+ # stderr, the following code tries its best to remove other handlers that emit
+ # to stderr. Those handlers are most commonly added when logging.info/debug is
+ # called before importing this module.
+ handlers = [
+ h for h in logging.root.handlers
+ if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
+ for h in handlers:
+ logging.root.removeHandler(h)
+
+ # The absl handler will always be attached to root, not the absl logger.
+ if not logging.root.handlers:
+ # Attach the absl handler at import time when there are no other handlers.
+ # Otherwise it means users have explicitly configured logging, and the absl
+ # handler will only be attached later in app.run(). For App Engine apps,
+ # the absl handler is not used.
+ logging.root.addHandler(_absl_handler)
+
+
+# Initialize absl logger.
+# Must be called after logging flags in this module are defined.
+_initialize()
diff --git a/third_party/py/abseil/absl/logging/converter.py b/third_party/py/abseil/absl/logging/converter.py
new file mode 100644
index 0000000000..6292547ee5
--- /dev/null
+++ b/third_party/py/abseil/absl/logging/converter.py
@@ -0,0 +1,212 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to convert log levels between Abseil Python, C++, and Python standard.
+
+This converter has to convert (best effort) between three different
+logging level schemes:
+ cpp = The C++ logging level scheme used in Abseil C++.
+ absl = The absl.logging level scheme used in Abseil Python.
+ standard = The python standard library logging level scheme.
+
+Here is a handy ascii chart for easy mental mapping.
+
+ LEVEL | cpp | absl | standard |
+ ----------+-----+--------+----------+
+ DEBUG | 0 | 1 | 10 |
+ INFO | 0 | 0 | 20 |
+ WARN(ING) | 1 | -1 | 30 |
+ ERROR | 2 | -2 | 40 |
+ CRITICAL | 3 | -3 | 50 |
+ FATAL | 3 | -3 | 50 |
+
+Note: standard logging CRITICAL is mapped to absl/cpp FATAL.
+However, only CRITICAL logs from the absl logger (or absl.logging.fatal) will
+terminate the program. CRITICAL logs from non-absl loggers are treated as
+error logs with a message prefix "CRITICAL - ".
+
+Converting from standard to absl or cpp is a lossy conversion.
+Converting back to standard will lose granularity. For this reason,
+users should always try to convert to standard, the richest
+representation, before manipulating the levels, and then only to cpp
+or absl if those level schemes are absolutely necessary.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+
+STANDARD_CRITICAL = logging.CRITICAL
+STANDARD_ERROR = logging.ERROR
+STANDARD_WARNING = logging.WARNING
+STANDARD_INFO = logging.INFO
+STANDARD_DEBUG = logging.DEBUG
+
+# These levels are also used to define the constants
+# FATAL, ERROR, WARN, WARNING, INFO, and DEBUG in the
+# absl.logging module.
+ABSL_FATAL = -3
+ABSL_ERROR = -2
+ABSL_WARN = -1
+ABSL_INFO = 0
+ABSL_DEBUG = 1
+
+ABSL_LEVELS = {ABSL_FATAL: 'FATAL',
+ ABSL_ERROR: 'ERROR',
+ ABSL_WARN: 'WARN',
+ ABSL_INFO: 'INFO',
+ ABSL_DEBUG: 'DEBUG'}
+
+# Inverts the ABSL_LEVELS dictionary
+ABSL_NAMES = dict((v, k) for (k, v) in ABSL_LEVELS.items())
+
+ABSL_TO_STANDARD = {ABSL_FATAL: STANDARD_CRITICAL,
+ ABSL_ERROR: STANDARD_ERROR,
+ ABSL_WARN: STANDARD_WARNING,
+ ABSL_INFO: STANDARD_INFO,
+ ABSL_DEBUG: STANDARD_DEBUG}
+
+# Inverts the ABSL_TO_STANDARD
+STANDARD_TO_ABSL = dict((v, k) for (k, v) in ABSL_TO_STANDARD.items())
+
+
+def get_initial_for_level(level):
+ """Gets the initial that should start the log line for the given level.
+
+ It returns:
+ - 'I' when: level < STANDARD_WARNING.
+ - 'W' when: STANDARD_WARNING <= level < STANDARD_ERROR.
+ - 'E' when: STANDARD_ERROR <= level < STANDARD_CRITICAL.
+ - 'F' when: level >= STANDARD_CRITICAL.
+
+ Args:
+ level: int, a Python standard logging level.
+
+ Returns:
+ The first initial as it would be logged by the C++ logging module.
+ """
+ if level < STANDARD_WARNING:
+ return 'I'
+ elif level < STANDARD_ERROR:
+ return 'W'
+ elif level < STANDARD_CRITICAL:
+ return 'E'
+ else:
+ return 'F'
+
+
+def absl_to_cpp(level):
+ """Converts an absl log level to a cpp log level.
+
+ Args:
+ level: int, an absl.logging level.
+
+ Raises:
+ TypeError: Raised when level is not an integer.
+
+ Returns:
+ The corresponding integer level for use in Abseil C++.
+ """
+ if not isinstance(level, int):
+ raise TypeError('Expect an int level, found {}'.format(type(level)))
+ if level >= 0:
+ # C++ log levels must be >= 0
+ return 0
+ else:
+ return -level
+
+
+def absl_to_standard(level):
+ """Converts an integer level from the absl value to the standard value.
+
+ Args:
+ level: int, an absl.logging level.
+
+ Raises:
+ TypeError: Raised when level is not an integer.
+
+ Returns:
+ The corresponding integer level for use in standard logging.
+ """
+ if not isinstance(level, int):
+ raise TypeError('Expect an int level, found {}'.format(type(level)))
+ if level < ABSL_FATAL:
+ level = ABSL_FATAL
+ if level <= ABSL_DEBUG:
+ return ABSL_TO_STANDARD[level]
+ # Maps to vlog levels.
+ return STANDARD_DEBUG - level + 1
+
+
+def string_to_standard(level):
+ """Converts a string level to standard logging level value.
+
+ Args:
+ level: str, case-insensitive 'debug', 'info', 'warn', 'error', 'fatal'.
+
+ Returns:
+ The corresponding integer level for use in standard logging.
+ """
+ # Also support warning as an alias to warn.
+ if level.upper() == 'WARNING':
+ level = 'WARN'
+ return absl_to_standard(ABSL_NAMES.get(level.upper()))
+
+
+def standard_to_absl(level):
+ """Converts an integer level from the standard value to the absl value.
+
+ Args:
+ level: int, a Python standard logging level.
+
+ Raises:
+ TypeError: Raised when level is not an integer.
+
+ Returns:
+ The corresponding integer level for use in absl logging.
+ """
+ if not isinstance(level, int):
+ raise TypeError('Expect an int level, found {}'.format(type(level)))
+ if level < 0:
+ level = 0
+ if level < STANDARD_DEBUG:
+ # Maps to vlog levels.
+ return STANDARD_DEBUG - level + 1
+ elif level < STANDARD_INFO:
+ return ABSL_DEBUG
+ elif level < STANDARD_WARNING:
+ return ABSL_INFO
+ elif level < STANDARD_ERROR:
+ return ABSL_WARN
+ elif level < STANDARD_CRITICAL:
+ return ABSL_ERROR
+ else:
+ return ABSL_FATAL
+
+
+def standard_to_cpp(level):
+ """Converts an integer level from the standard value to the cpp value.
+
+ Args:
+ level: int, a Python standard logging level.
+
+ Raises:
+ TypeError: Raised when level is not an integer.
+
+ Returns:
+ The corresponding integer level for use in cpp logging.
+ """
+ return absl_to_cpp(standard_to_absl(level))
diff --git a/third_party/py/abseil/absl/testing/__init__.py b/third_party/py/abseil/absl/testing/__init__.py
new file mode 100644
index 0000000000..a3bd1cd518
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/third_party/py/abseil/absl/testing/_bazelize_command.py b/third_party/py/abseil/absl/testing/_bazelize_command.py
new file mode 100644
index 0000000000..93c135caa6
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/_bazelize_command.py
@@ -0,0 +1,49 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Internal helper for running tests on Windows Bazel."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+
+def get_executable_path(py_binary_path):
+ """Returns the executable path of a py_binary.
+
+ This returns the executable path of a py_binary that is in another Bazel
+ target's data dependencies.
+
+ On Linux/macOS, it's the same as the py_binary_path.
+ On Windows, the py_binary_path points to a zip file, and Bazel 0.5.3+
+ generates a .cmd file that can be used to execute the py_binary.
+
+ Args:
+ py_binary_path: string, the path of a py_binary that is in another Bazel
+ target's data dependencies.
+ """
+ if os.name == 'nt':
+ executable_path = py_binary_path + '.cmd'
+ if executable_path.startswith('\\\\?\\'):
+ # In Bazel 0.5.3 and Python 3, the paths starts with "\\?\".
+ # However, Python subprocess doesn't support those paths well.
+ # Strip them as we don't need the prefix.
+ # See this page for more informaton about "\\?\":
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.
+ executable_path = executable_path[4:]
+ return executable_path
+ else:
+ return py_binary_path
diff --git a/third_party/py/abseil/absl/testing/absltest.py b/third_party/py/abseil/absl/testing/absltest.py
new file mode 100644
index 0000000000..8702bfd9d7
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/absltest.py
@@ -0,0 +1,1715 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base functionality for Abseil Python tests.
+
+This module contains base classes and high-level functions for Abseil-style
+tests.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import difflib
+import errno
+import getpass
+import inspect
+import itertools
+import json
+import os
+import random
+import re
+import shlex
+import signal
+import subprocess
+import sys
+import tempfile
+import textwrap
+import unittest
+
+try:
+ import faulthandler
+except ImportError:
+ # We use faulthandler if it is available.
+ faulthandler = None
+
+from absl import app
+from absl import flags
+from absl import logging
+from absl.testing import xml_reporter
+import six
+from six.moves import urllib
+from six.moves import xrange # pylint: disable=redefined-builtin
+
+
+FLAGS = flags.FLAGS
+
+_TEXT_OR_BINARY_TYPES = (six.text_type, six.binary_type)
+
+
+# Many of the methods in this module have names like assertSameElements.
+# This kind of name does not comply with PEP8 style,
+# but it is consistent with the naming of methods in unittest.py.
+# pylint: disable=invalid-name
+
+
+def _get_default_test_random_seed():
+ random_seed = 301
+ value = os.environ.get('TEST_RANDOM_SEED', '')
+ try:
+ random_seed = int(value)
+ except ValueError:
+ pass
+ return random_seed
+
+
+def get_default_test_srcdir():
+ """Returns default test source dir."""
+ return os.environ.get('TEST_SRCDIR', '')
+
+
+def get_default_test_tmpdir():
+ """Returns default test temp dir."""
+ tmpdir = os.environ.get('TEST_TMPDIR', '')
+ if not tmpdir:
+ tmpdir = os.path.join(tempfile.gettempdir(), 'absl_testing')
+
+ return tmpdir
+
+
+def _get_default_randomize_ordering_seed():
+ """Returns default seed to use for randomizing test order.
+
+ This function first checks the --test_randomize_ordering_seed flag, and then
+ the TEST_RANDOMIZE_ORDERING_SEED environment variable. If the first value
+ we find is:
+ * (not set): disable test randomization
+ * 0: disable test randomization
+ * 'random': choose a random seed in [1, 4294967295] for test order
+ randomization
+ * positive integer: use this seed for test order randomization
+
+ (The values used are patterned after
+ https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED).
+
+ In principle, it would be simpler to return None if no override is provided;
+ however, the python random module has no `get_seed()`, only `getstate()`,
+ which returns far more data than we want to pass via an environment variable
+ or flag.
+
+ Returns:
+ A default value for test case randomization (int). 0 means do not randomize.
+
+ Raises:
+ ValueError: Raised when the flag or env value is not one of the options
+ above.
+ """
+ if FLAGS.test_randomize_ordering_seed is not None:
+ randomize = FLAGS.test_randomize_ordering_seed
+ else:
+ randomize = os.environ.get('TEST_RANDOMIZE_ORDERING_SEED')
+ if randomize is None:
+ return 0
+ if randomize == 'random':
+ return random.Random().randint(1, 4294967295)
+ if randomize == '0':
+ return 0
+ try:
+ seed = int(randomize)
+ if seed > 0:
+ return seed
+ except ValueError:
+ pass
+ raise ValueError(
+ 'Unknown test randomization seed value: {}'.format(randomize))
+
+
+flags.DEFINE_integer('test_random_seed', _get_default_test_random_seed(),
+ 'Random seed for testing. Some test frameworks may '
+ 'change the default value of this flag between runs, so '
+ 'it is not appropriate for seeding probabilistic tests.',
+ allow_override_cpp=True)
+flags.DEFINE_string('test_srcdir',
+ get_default_test_srcdir(),
+ 'Root of directory tree where source files live',
+ allow_override_cpp=True)
+flags.DEFINE_string('test_tmpdir', get_default_test_tmpdir(),
+ 'Directory for temporary testing files',
+ allow_override_cpp=True)
+flags.DEFINE_string('test_randomize_ordering_seed', None,
+ 'If positive, use this as a seed to randomize the '
+ 'execution order for test cases. If "random", pick a '
+ 'random seed to use. If 0 or not set, do not randomize '
+ 'test case execution order. This flag also overrides '
+ 'the TEST_RANDOMIZE_ORDERING_SEED environment variable.')
+flags.DEFINE_string('xml_output_file', '',
+ 'File to store XML test results')
+
+
+# We might need to monkey-patch TestResult so that it stops considering an
+# unexpected pass as a as a "successful result". For details, see
+# http://bugs.python.org/issue20165
+def _monkey_patch_test_result_for_unexpected_passes():
+ """Workaround for <http://bugs.python.org/issue20165>."""
+
+ def wasSuccessful(self):
+ """Tells whether or not this result was a success.
+
+ Any unexpected pass is to be counted as a non-success.
+
+ Args:
+ self: The TestResult instance.
+
+ Returns:
+ Whether or not this result was a success.
+ """
+ return (len(self.failures) == len(self.errors) ==
+ len(self.unexpectedSuccesses) == 0)
+
+ test_result = unittest.result.TestResult()
+ test_result.addUnexpectedSuccess('test')
+ if test_result.wasSuccessful(): # The bug is present.
+ unittest.result.TestResult.wasSuccessful = wasSuccessful
+ if test_result.wasSuccessful(): # Warn the user if our hot-fix failed.
+ sys.stderr.write('unittest.result.TestResult monkey patch to report'
+ ' unexpected passes as failures did not work.\n')
+
+
+_monkey_patch_test_result_for_unexpected_passes()
+
+
+class TestCase(unittest.TestCase):
+ """Extension of unittest.TestCase providing more powerful assertions."""
+
+ maxDiff = 80 * 20
+
+ def shortDescription(self):
+ """Formats both the test method name and the first line of its docstring.
+
+ If no docstring is given, only returns the method name.
+
+ This method overrides unittest.TestCase.shortDescription(), which
+ only returns the first line of the docstring, obscuring the name
+ of the test upon failure.
+
+ Returns:
+ desc: A short description of a test method.
+ """
+ desc = str(self)
+ # NOTE: super() is used here instead of directly invoking
+ # unittest.TestCase.shortDescription(self), because of the
+ # following line that occurs later on:
+ # unittest.TestCase = TestCase
+ # Because of this, direct invocation of what we think is the
+ # superclass will actually cause infinite recursion.
+ doc_first_line = super(TestCase, self).shortDescription()
+ if doc_first_line is not None:
+ desc = '\n'.join((desc, doc_first_line))
+ return desc
+
+ def assertStartsWith(self, actual, expected_start, msg=None):
+ """Asserts that actual.startswith(expected_start) is True.
+
+ Args:
+ actual: str
+ expected_start: str
+ msg: Optional message to report on failure.
+ """
+ if not actual.startswith(expected_start):
+ self.fail('%r does not start with %r' % (actual, expected_start), msg)
+
+ def assertNotStartsWith(self, actual, unexpected_start, msg=None):
+ """Asserts that actual.startswith(unexpected_start) is False.
+
+ Args:
+ actual: str
+ unexpected_start: str
+ msg: Optional message to report on failure.
+ """
+ if actual.startswith(unexpected_start):
+ self.fail('%r does start with %r' % (actual, unexpected_start), msg)
+
+ def assertEndsWith(self, actual, expected_end, msg=None):
+ """Asserts that actual.endswith(expected_end) is True.
+
+ Args:
+ actual: str
+ expected_end: str
+ msg: Optional message to report on failure.
+ """
+ if not actual.endswith(expected_end):
+ self.fail('%r does not end with %r' % (actual, expected_end), msg)
+
+ def assertNotEndsWith(self, actual, unexpected_end, msg=None):
+ """Asserts that actual.endswith(unexpected_end) is False.
+
+ Args:
+ actual: str
+ unexpected_end: str
+ msg: Optional message to report on failure.
+ """
+ if actual.endswith(unexpected_end):
+ self.fail('%r does end with %r' % (actual, unexpected_end), msg)
+
+ def assertSequenceStartsWith(self, prefix, whole, msg=None):
+ """An equality assertion for the beginning of ordered sequences.
+
+ If prefix is an empty sequence, it will raise an error unless whole is also
+ an empty sequence.
+
+ If prefix is not a sequence, it will raise an error if the first element of
+ whole does not match.
+
+ Args:
+ prefix: A sequence expected at the beginning of the whole parameter.
+ whole: The sequence in which to look for prefix.
+ msg: Optional message to report on failure.
+ """
+ try:
+ prefix_len = len(prefix)
+ except (TypeError, NotImplementedError):
+ prefix = [prefix]
+ prefix_len = 1
+
+ try:
+ whole_len = len(whole)
+ except (TypeError, NotImplementedError):
+ self.fail('For whole: len(%s) is not supported, it appears to be type: '
+ '%s' % (whole, type(whole)), msg)
+
+ assert prefix_len <= whole_len, self._formatMessage(
+ msg,
+ 'Prefix length (%d) is longer than whole length (%d).' %
+ (prefix_len, whole_len)
+ )
+
+ if not prefix_len and whole_len:
+ self.fail('Prefix length is 0 but whole length is %d: %s' %
+ (len(whole), whole), msg)
+
+ try:
+ self.assertSequenceEqual(prefix, whole[:prefix_len], msg)
+ except AssertionError:
+ self.fail('prefix: %s not found at start of whole: %s.' %
+ (prefix, whole), msg)
+
+ def assertEmpty(self, container, msg=None):
+ """Asserts that an object has zero length.
+
+ Args:
+ container: Anything that implements the collections.Sized interface.
+ msg: Optional message to report on failure.
+ """
+ if not isinstance(container, collections.Sized):
+ self.fail('Expected a Sized object, got: '
+ '{!r}'.format(type(container).__name__), msg)
+
+ # explicitly check the length since some Sized objects (e.g. numpy.ndarray)
+ # have strange __nonzero__/__bool__ behavior.
+ if len(container): # pylint: disable=g-explicit-length-test
+ self.fail('{!r} has length of {}.'.format(container, len(container)), msg)
+
+ def assertNotEmpty(self, container, msg=None):
+ """Asserts that an object has non-zero length.
+
+ Args:
+ container: Anything that implements the collections.Sized interface.
+ msg: Optional message to report on failure.
+ """
+ if not isinstance(container, collections.Sized):
+ self.fail('Expected a Sized object, got: '
+ '{!r}'.format(type(container).__name__), msg)
+
+ # explicitly check the length since some Sized objects (e.g. numpy.ndarray)
+ # have strange __nonzero__/__bool__ behavior.
+ if not len(container): # pylint: disable=g-explicit-length-test
+ self.fail('{!r} has length of 0.'.format(container), msg)
+
+ def assertLen(self, container, expected_len, msg=None):
+ """Asserts that an object has the expected length.
+
+ Args:
+ container: Anything that implements the collections.Sized interface.
+ expected_len: The expected length of the container.
+ msg: Optional message to report on failure.
+ """
+ if not isinstance(container, collections.Sized):
+ self.fail('Expected a Sized object, got: '
+ '{!r}'.format(type(container).__name__), msg)
+ if len(container) != expected_len:
+ container_repr = unittest.util.safe_repr(container)
+ self.fail('{} has length of {}, expected {}.'.format(
+ container_repr, len(container), expected_len), msg)
+
+ def assertSequenceAlmostEqual(self, expected_seq, actual_seq, places=None,
+ msg=None, delta=None):
+ """An approximate equality assertion for ordered sequences.
+
+ Fail if the two sequences are unequal as determined by their value
+ differences rounded to the given number of decimal places (default 7) and
+ comparing to zero, or by comparing that the difference between each value
+ in the two sequences is more than the given delta.
+
+ Note that decimal places (from zero) are usually not the same as significant
+ digits (measured from the most signficant digit).
+
+ If the two sequences compare equal then they will automatically compare
+ almost equal.
+
+ Args:
+ expected_seq: A sequence containing elements we are expecting.
+ actual_seq: The sequence that we are testing.
+ places: The number of decimal places to compare.
+ msg: The message to be printed if the test fails.
+ delta: The OK difference between compared values.
+ """
+ if len(expected_seq) != len(actual_seq):
+ self.fail('Sequence size mismatch: {} vs {}'.format(
+ len(expected_seq), len(actual_seq)), msg)
+
+ err_list = []
+ for idx, (exp_elem, act_elem) in enumerate(zip(expected_seq, actual_seq)):
+ try:
+ self.assertAlmostEqual(exp_elem, act_elem, places=places, msg=msg,
+ delta=delta)
+ except self.failureException as err:
+ err_list.append('At index {}: {}'.format(idx, err))
+
+ if err_list:
+ if len(err_list) > 30:
+ err_list = err_list[:30] + ['...']
+ msg = self._formatMessage(msg, '\n'.join(err_list))
+ self.fail(msg)
+
+ def assertContainsSubset(self, expected_subset, actual_set, msg=None):
+ """Checks whether actual iterable is a superset of expected iterable."""
+ missing = set(expected_subset) - set(actual_set)
+ if not missing:
+ return
+
+ self.fail('Missing elements %s\nExpected: %s\nActual: %s' % (
+ missing, expected_subset, actual_set), msg)
+
+ def assertNoCommonElements(self, expected_seq, actual_seq, msg=None):
+ """Checks whether actual iterable and expected iterable are disjoint."""
+ common = set(expected_seq) & set(actual_seq)
+ if not common:
+ return
+
+ self.fail('Common elements %s\nExpected: %s\nActual: %s' % (
+ common, expected_seq, actual_seq), msg)
+
+ def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
+ """An unordered sequence specific comparison.
+
+ Equivalent to assertCountEqual(). This method is a compatibility layer
+ for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
+ assertCountEqual() calls.
+
+ Args:
+ expected_seq: A sequence containing elements we are expecting.
+ actual_seq: The sequence that we are testing.
+ msg: The message to be printed if the test fails.
+ """
+
+ if not hasattr(super(TestCase, self), 'assertItemsEqual'):
+ # The assertItemsEqual method was renamed assertCountEqual in Python 3.2
+ super(TestCase, self).assertCountEqual(expected_seq, actual_seq, msg)
+ return
+
+ super(TestCase, self).assertItemsEqual(expected_seq, actual_seq, msg)
+
+ def assertCountEqual(self, expected_seq, actual_seq, msg=None):
+ """An unordered sequence specific comparison.
+
+ It asserts that actual_seq and expected_seq have the same element counts.
+ Equivalent to::
+
+ self.assertEqual(Counter(iter(actual_seq)),
+ Counter(iter(expected_seq)))
+
+ Asserts that each element has the same count in both sequences.
+ Example:
+ - [0, 1, 1] and [1, 0, 1] compare equal.
+ - [0, 0, 1] and [0, 1] compare unequal.
+
+ Args:
+ expected_seq: A sequence containing elements we are expecting.
+ actual_seq: The sequence that we are testing.
+ msg: The message to be printed if the test fails.
+
+ """
+ self.assertItemsEqual(expected_seq, actual_seq, msg)
+
+ def assertSameElements(self, expected_seq, actual_seq, msg=None):
+ """Asserts that two sequences have the same elements (in any order).
+
+ This method, unlike assertCountEqual, doesn't care about any
+ duplicates in the expected and actual sequences.
+
+ >> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
+ # Doesn't raise an AssertionError
+
+ If possible, you should use assertCountEqual instead of
+ assertSameElements.
+
+ Args:
+ expected_seq: A sequence containing elements we are expecting.
+ actual_seq: The sequence that we are testing.
+ msg: The message to be printed if the test fails.
+ """
+ # `unittest2.TestCase` used to have assertSameElements, but it was
+ # removed in favor of assertItemsEqual. As there's a unit test
+ # that explicitly checks this behavior, I am leaving this method
+ # alone.
+ # Fail on strings: empirically, passing strings to this test method
+ # is almost always a bug. If comparing the character sets of two strings
+ # is desired, cast the inputs to sets or lists explicitly.
+ if (isinstance(expected_seq, _TEXT_OR_BINARY_TYPES) or
+ isinstance(actual_seq, _TEXT_OR_BINARY_TYPES)):
+ self.fail('Passing string/bytes to assertSameElements is usually a bug. '
+ 'Did you mean to use assertEqual?\n'
+ 'Expected: %s\nActual: %s' % (expected_seq, actual_seq))
+ try:
+ expected = dict([(element, None) for element in expected_seq])
+ actual = dict([(element, None) for element in actual_seq])
+ missing = [element for element in expected if element not in actual]
+ unexpected = [element for element in actual if element not in expected]
+ missing.sort()
+ unexpected.sort()
+ except TypeError:
+ # Fall back to slower list-compare if any of the objects are
+ # not hashable.
+ expected = list(expected_seq)
+ actual = list(actual_seq)
+ expected.sort()
+ actual.sort()
+ missing, unexpected = _sorted_list_difference(expected, actual)
+ errors = []
+ if msg:
+ errors.extend((msg, ':\n'))
+ if missing:
+ errors.append('Expected, but missing:\n %r\n' % missing)
+ if unexpected:
+ errors.append('Unexpected, but present:\n %r\n' % unexpected)
+ if missing or unexpected:
+ self.fail(''.join(errors))
+
+ # unittest.TestCase.assertMultiLineEqual works very similarly, but it
+ # has a different error format. However, I find this slightly more readable.
+ def assertMultiLineEqual(self, first, second, msg=None, **kwargs):
+ """Asserts that two multi-line strings are equal."""
+ assert isinstance(first, six.string_types), (
+ 'First argument is not a string: %r' % (first,))
+ assert isinstance(second, six.string_types), (
+ 'Second argument is not a string: %r' % (second,))
+ line_limit = kwargs.pop('line_limit', 0)
+ if kwargs:
+ raise TypeError('Unexpected keyword args {}'.format(tuple(kwargs)))
+
+ if first == second:
+ return
+ if msg:
+ failure_message = [msg + ':\n']
+ else:
+ failure_message = ['\n']
+ if line_limit:
+ line_limit += len(failure_message)
+ for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)):
+ failure_message.append(line)
+ if not line.endswith('\n'):
+ failure_message.append('\n')
+ if line_limit and len(failure_message) > line_limit:
+ n_omitted = len(failure_message) - line_limit
+ failure_message = failure_message[:line_limit]
+ failure_message.append(
+ '(... and {} more delta lines omitted for brevity.)\n'.format(
+ n_omitted))
+
+ raise self.failureException(''.join(failure_message))
+
+ def assertBetween(self, value, minv, maxv, msg=None):
+ """Asserts that value is between minv and maxv (inclusive)."""
+ msg = self._formatMessage(msg,
+ '"%r" unexpectedly not between "%r" and "%r"' %
+ (value, minv, maxv))
+ self.assertTrue(minv <= value, msg)
+ self.assertTrue(maxv >= value, msg)
+
+ def assertRegexMatch(self, actual_str, regexes, message=None):
+ r"""Asserts that at least one regex in regexes matches str.
+
+ If possible you should use assertRegexpMatches, which is a simpler
+ version of this method. assertRegexpMatches takes a single regular
+ expression (a string or re compiled object) instead of a list.
+
+ Notes:
+ 1. This function uses substring matching, i.e. the matching
+ succeeds if *any* substring of the error message matches *any*
+ regex in the list. This is more convenient for the user than
+ full-string matching.
+
+ 2. If regexes is the empty list, the matching will always fail.
+
+ 3. Use regexes=[''] for a regex that will always pass.
+
+ 4. '.' matches any single character *except* the newline. To
+ match any character, use '(.|\n)'.
+
+ 5. '^' matches the beginning of each line, not just the beginning
+ of the string. Similarly, '$' matches the end of each line.
+
+ 6. An exception will be thrown if regexes contains an invalid
+ regex.
+
+ Args:
+ actual_str: The string we try to match with the items in regexes.
+ regexes: The regular expressions we want to match against str.
+ See "Notes" above for detailed notes on how this is interpreted.
+ message: The message to be printed if the test fails.
+ """
+ if isinstance(regexes, _TEXT_OR_BINARY_TYPES):
+ self.fail('regexes is string or bytes; use assertRegexpMatches instead.',
+ message)
+ if not regexes:
+ self.fail('No regexes specified.', message)
+
+ regex_type = type(regexes[0])
+ for regex in regexes[1:]:
+ if type(regex) is not regex_type: # pylint: disable=unidiomatic-typecheck
+ self.fail('regexes list must all be the same type.', message)
+
+ if regex_type is bytes and isinstance(actual_str, six.text_type):
+ regexes = [regex.decode('utf-8') for regex in regexes]
+ regex_type = six.text_type
+ elif regex_type is six.text_type and isinstance(actual_str, bytes):
+ regexes = [regex.encode('utf-8') for regex in regexes]
+ regex_type = bytes
+
+ if regex_type is six.text_type:
+ regex = u'(?:%s)' % u')|(?:'.join(regexes)
+ elif regex_type is bytes:
+ regex = b'(?:' + (b')|(?:'.join(regexes)) + b')'
+ else:
+ self.fail('Only know how to deal with unicode str or bytes regexes.',
+ message)
+
+ if not re.search(regex, actual_str, re.MULTILINE):
+ self.fail('"%s" does not contain any of these regexes: %s.' %
+ (actual_str, regexes), message)
+
+ def assertCommandSucceeds(self, command, regexes=(b'',), env=None,
+ close_fds=True, msg=None):
+ """Asserts that a shell command succeeds (i.e. exits with code 0).
+
+ Args:
+ command: List or string representing the command to run.
+ regexes: List of regular expression byte strings that match success.
+ env: Dictionary of environment variable settings. If None, no environment
+ variables will be set for the child process. This is to make tests
+ more hermetic. NOTE: this behavior is different than the standard
+ subprocess module.
+ close_fds: Whether or not to close all open fd's in the child after
+ forking.
+ msg: Optional message to report on failure.
+ """
+ (ret_code, err) = get_command_stderr(command, env, close_fds)
+
+ # We need bytes regexes here because `err` is bytes.
+ # Accommodate code which listed their output regexes w/o the b'' prefix by
+ # converting them to bytes for the user.
+ if isinstance(regexes[0], six.text_type):
+ regexes = [regex.encode('utf-8') for regex in regexes]
+
+ command_string = get_command_string(command)
+ self.assertEqual(
+ ret_code, 0,
+ self._formatMessage(msg,
+ 'Running command\n'
+ '%s failed with error code %s and message\n'
+ '%s' % (_quote_long_string(command_string),
+ ret_code,
+ _quote_long_string(err)))
+ )
+ self.assertRegexMatch(
+ err,
+ regexes,
+ message=self._formatMessage(
+ msg,
+ 'Running command\n'
+ '%s failed with error code %s and message\n'
+ '%s which matches no regex in %s' % (
+ _quote_long_string(command_string),
+ ret_code,
+ _quote_long_string(err),
+ regexes)))
+
+ def assertCommandFails(self, command, regexes, env=None, close_fds=True,
+ msg=None):
+ """Asserts a shell command fails and the error matches a regex in a list.
+
+ Args:
+ command: List or string representing the command to run.
+ regexes: the list of regular expression strings.
+ env: Dictionary of environment variable settings. If None, no environment
+ variables will be set for the child process. This is to make tests
+ more hermetic. NOTE: this behavior is different than the standard
+ subprocess module.
+ close_fds: Whether or not to close all open fd's in the child after
+ forking.
+ msg: Optional message to report on failure.
+ """
+ (ret_code, err) = get_command_stderr(command, env, close_fds)
+
+ # We need bytes regexes here because `err` is bytes.
+ # Accommodate code which listed their output regexes w/o the b'' prefix by
+ # converting them to bytes for the user.
+ if isinstance(regexes[0], six.text_type):
+ regexes = [regex.encode('utf-8') for regex in regexes]
+
+ command_string = get_command_string(command)
+ self.assertNotEqual(
+ ret_code, 0,
+ self._formatMessage(msg, 'The following command succeeded '
+ 'while expected to fail:\n%s' %
+ _quote_long_string(command_string)))
+ self.assertRegexMatch(
+ err,
+ regexes,
+ message=self._formatMessage(
+ msg,
+ 'Running command\n'
+ '%s failed with error code %s and message\n'
+ '%s which matches no regex in %s' % (
+ _quote_long_string(command_string),
+ ret_code,
+ _quote_long_string(err),
+ regexes)))
+
+ class _AssertRaisesContext(object):
+
+ def __init__(self, expected_exception, test_case, test_func, msg=None):
+ self.expected_exception = expected_exception
+ self.test_case = test_case
+ self.test_func = test_func
+ self.msg = msg
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ self.test_case.fail(self.expected_exception.__name__ + ' not raised',
+ self.msg)
+ if not issubclass(exc_type, self.expected_exception):
+ return False
+ self.test_func(exc_value)
+ return True
+
+ def assertRaisesWithPredicateMatch(self, expected_exception, predicate,
+ callable_obj=None, *args, **kwargs):
+ """Asserts that exception is thrown and predicate(exception) is true.
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ predicate: Function of one argument that inspects the passed-in exception
+ and returns True (success) or False (please fail the test).
+ callable_obj: Function to be called.
+ *args: Extra args.
+ **kwargs: Extra keyword args.
+
+ Returns:
+ A context manager if callable_obj is None. Otherwise, None.
+
+ Raises:
+ self.failureException if callable_obj does not raise a matching exception.
+ """
+ def Check(err):
+ self.assertTrue(predicate(err),
+ '%r does not match predicate %r' % (err, predicate))
+
+ context = self._AssertRaisesContext(expected_exception, self, Check)
+ if callable_obj is None:
+ return context
+ with context:
+ callable_obj(*args, **kwargs)
+
+ def assertRaisesWithLiteralMatch(self, expected_exception,
+ expected_exception_message,
+ callable_obj=None, *args, **kwargs):
+ """Asserts that the message in a raised exception equals the given string.
+
+ Unlike assertRaisesRegexp, this method takes a literal string, not
+ a regular expression.
+
+ with self.assertRaisesWithLiteralMatch(ExType, 'message'):
+ DoSomething()
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ expected_exception_message: String message expected in the raised
+ exception. For a raise exception e, expected_exception_message must
+ equal str(e).
+ callable_obj: Function to be called, or None to return a context.
+ *args: Extra args.
+ **kwargs: Extra kwargs.
+
+ Returns:
+ A context manager if callable_obj is None. Otherwise, None.
+
+ Raises:
+ self.failureException if callable_obj does not raise a matching exception.
+ """
+ def Check(err):
+ actual_exception_message = str(err)
+ self.assertTrue(expected_exception_message == actual_exception_message,
+ 'Exception message does not match.\n'
+ 'Expected: %r\n'
+ 'Actual: %r' % (expected_exception_message,
+ actual_exception_message))
+
+ context = self._AssertRaisesContext(expected_exception, self, Check)
+ if callable_obj is None:
+ return context
+ with context:
+ callable_obj(*args, **kwargs)
+
+ def assertContainsInOrder(self, strings, target, msg=None):
+ """Asserts that the strings provided are found in the target in order.
+
+ This may be useful for checking HTML output.
+
+ Args:
+ strings: A list of strings, such as [ 'fox', 'dog' ]
+ target: A target string in which to look for the strings, such as
+ 'The quick brown fox jumped over the lazy dog'.
+ msg: Optional message to report on failure.
+ """
+ if isinstance(strings, (bytes, unicode if str is bytes else str)):
+ strings = (strings,)
+
+ current_index = 0
+ last_string = None
+ for string in strings:
+ index = target.find(str(string), current_index)
+ if index == -1 and current_index == 0:
+ self.fail("Did not find '%s' in '%s'" %
+ (string, target), msg)
+ elif index == -1:
+ self.fail("Did not find '%s' after '%s' in '%s'" %
+ (string, last_string, target), msg)
+ last_string = string
+ current_index = index
+
+ def assertContainsSubsequence(self, container, subsequence, msg=None):
+ """Asserts that "container" contains "subsequence" as a subsequence.
+
+ Asserts that "container" contains all the elements of "subsequence", in
+ order, but possibly with other elements interspersed. For example, [1, 2, 3]
+ is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
+
+ Args:
+ container: the list we're testing for subsequence inclusion.
+ subsequence: the list we hope will be a subsequence of container.
+ msg: Optional message to report on failure.
+ """
+ first_nonmatching = None
+ reversed_container = list(reversed(container))
+ subsequence = list(subsequence)
+
+ for e in subsequence:
+ if e not in reversed_container:
+ first_nonmatching = e
+ break
+ while e != reversed_container.pop():
+ pass
+
+ if first_nonmatching is not None:
+ self.fail('%s not a subsequence of %s. First non-matching element: %s' %
+ (subsequence, container, first_nonmatching), msg)
+
+ def assertContainsExactSubsequence(self, container, subsequence, msg=None):
+ """Asserts that "container" contains "subsequence" as an exact subsequence.
+
+ Asserts that "container" contains all the elements of "subsequence", in
+ order, and without other elements interspersed. For example, [1, 2, 3] is an
+ exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
+
+ Args:
+ container: the list we're testing for subsequence inclusion.
+ subsequence: the list we hope will be an exact subsequence of container.
+ msg: Optional message to report on failure.
+ """
+ container = list(container)
+ subsequence = list(subsequence)
+ longest_match = 0
+
+ for start in xrange(1 + len(container) - len(subsequence)):
+ if longest_match == len(subsequence):
+ break
+ index = 0
+ while (index < len(subsequence) and
+ subsequence[index] == container[start + index]):
+ index += 1
+ longest_match = max(longest_match, index)
+
+ if longest_match < len(subsequence):
+ self.fail('%s not an exact subsequence of %s. '
+ 'Longest matching prefix: %s' %
+ (subsequence, container, subsequence[:longest_match]), msg)
+
+ def assertTotallyOrdered(self, *groups, **kwargs):
+ """Asserts that total ordering has been implemented correctly.
+
+ For example, say you have a class A that compares only on its attribute x.
+ Comparators other than __lt__ are omitted for brevity.
+
+ class A(object):
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ def __hash__(self):
+ return hash(self.x)
+
+ def __lt__(self, other):
+ try:
+ return self.x < other.x
+ except AttributeError:
+ return NotImplemented
+
+ assertTotallyOrdered will check that instances can be ordered correctly.
+ For example,
+
+ self.assertTotallyOrdered(
+ [None], # None should come before everything else.
+ [1], # Integers sort earlier.
+ [A(1, 'a')],
+ [A(2, 'b')], # 2 is after 1.
+ [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
+ [A(4, 'z')],
+ ['foo']) # Strings sort last.
+
+ Args:
+ *groups: A list of groups of elements. Each group of elements is a list
+ of objects that are equal. The elements in each group must be less
+ than the elements in the group after it. For example, these groups are
+ totally ordered: [None], [1], [2, 2], [3].
+ **kwargs: optional msg keyword argument can be passed.
+ """
+
+ def CheckOrder(small, big):
+ """Ensures small is ordered before big."""
+ self.assertFalse(small == big,
+ self._formatMessage(msg, '%r unexpectedly equals %r' %
+ (small, big)))
+ self.assertTrue(small != big,
+ self._formatMessage(msg, '%r unexpectedly equals %r' %
+ (small, big)))
+ self.assertLess(small, big, msg)
+ self.assertFalse(big < small,
+ self._formatMessage(msg,
+ '%r unexpectedly less than %r' %
+ (big, small)))
+ self.assertLessEqual(small, big, msg)
+ self.assertFalse(big <= small, self._formatMessage(
+ '%r unexpectedly less than or equal to %r' % (big, small), msg
+ ))
+ self.assertGreater(big, small, msg)
+ self.assertFalse(small > big,
+ self._formatMessage(msg,
+ '%r unexpectedly greater than %r' %
+ (small, big)))
+ self.assertGreaterEqual(big, small)
+ self.assertFalse(small >= big, self._formatMessage(
+ msg,
+ '%r unexpectedly greater than or equal to %r' % (small, big)))
+
+ def CheckEqual(a, b):
+ """Ensures that a and b are equal."""
+ self.assertEqual(a, b, msg)
+ self.assertFalse(a != b,
+ self._formatMessage(msg, '%r unexpectedly unequals %r' %
+ (a, b)))
+ self.assertEqual(hash(a), hash(b), self._formatMessage(
+ msg,
+ 'hash %d of %r unexpectedly not equal to hash %d of %r' %
+ (hash(a), a, hash(b), b)))
+ self.assertFalse(a < b,
+ self._formatMessage(msg,
+ '%r unexpectedly less than %r' %
+ (a, b)))
+ self.assertFalse(b < a,
+ self._formatMessage(msg,
+ '%r unexpectedly less than %r' %
+ (b, a)))
+ self.assertLessEqual(a, b, msg)
+ self.assertLessEqual(b, a, msg)
+ self.assertFalse(a > b,
+ self._formatMessage(msg,
+ '%r unexpectedly greater than %r' %
+ (a, b)))
+ self.assertFalse(b > a,
+ self._formatMessage(msg,
+ '%r unexpectedly greater than %r' %
+ (b, a)))
+ self.assertGreaterEqual(a, b, msg)
+ self.assertGreaterEqual(b, a, msg)
+
+ msg = kwargs.get('msg')
+
+ # For every combination of elements, check the order of every pair of
+ # elements.
+ for elements in itertools.product(*groups):
+ elements = list(elements)
+ for index, small in enumerate(elements[:-1]):
+ for big in elements[index + 1:]:
+ CheckOrder(small, big)
+
+ # Check that every element in each group is equal.
+ for group in groups:
+ for a in group:
+ CheckEqual(a, a)
+ for a, b in itertools.product(group, group):
+ CheckEqual(a, b)
+
+ def assertDictEqual(self, a, b, msg=None):
+ """Raises AssertionError if a and b are not equal dictionaries.
+
+ Args:
+ a: A dict, the expected value.
+ b: A dict, the actual value.
+ msg: An optional str, the associated message.
+
+ Raises:
+ AssertionError: if the dictionaries are not equal.
+ """
+ self.assertIsInstance(a, dict, self._formatMessage(
+ msg,
+ 'First argument is not a dictionary'
+ ))
+ self.assertIsInstance(b, dict, self._formatMessage(
+ msg,
+ 'Second argument is not a dictionary'
+ ))
+
+ def Sorted(list_of_items):
+ try:
+ return sorted(list_of_items) # In 3.3, unordered are possible.
+ except TypeError:
+ return list_of_items
+
+ if a == b:
+ return
+ a_items = Sorted(list(six.iteritems(a)))
+ b_items = Sorted(list(six.iteritems(b)))
+
+ unexpected = []
+ missing = []
+ different = []
+
+ safe_repr = unittest.util.safe_repr
+
+ def Repr(dikt):
+ """Deterministic repr for dict."""
+ # Sort the entries based on their repr, not based on their sort order,
+ # which will be non-deterministic across executions, for many types.
+ entries = sorted((safe_repr(k), safe_repr(v))
+ for k, v in six.iteritems(dikt))
+ return '{%s}' % (', '.join('%s: %s' % pair for pair in entries))
+
+ message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')]
+
+ # The standard library default output confounds lexical difference with
+ # value difference; treat them separately.
+ for a_key, a_value in a_items:
+ if a_key not in b:
+ missing.append((a_key, a_value))
+ elif a_value != b[a_key]:
+ different.append((a_key, a_value, b[a_key]))
+
+ for b_key, b_value in b_items:
+ if b_key not in a:
+ unexpected.append((b_key, b_value))
+
+ if unexpected:
+ message.append(
+ 'Unexpected, but present entries:\n%s' % ''.join(
+ '%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected))
+
+ if different:
+ message.append(
+ 'repr() of differing entries:\n%s' % ''.join(
+ '%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value),
+ safe_repr(b_value))
+ for k, a_value, b_value in different))
+
+ if missing:
+ message.append(
+ 'Missing entries:\n%s' % ''.join(
+ ('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing)))
+
+ raise self.failureException('\n'.join(message))
+
+ def assertUrlEqual(self, a, b, msg=None):
+ """Asserts that urls are equal, ignoring ordering of query params."""
+ parsed_a = urllib.parse.urlparse(a)
+ parsed_b = urllib.parse.urlparse(b)
+ self.assertEqual(parsed_a.scheme, parsed_b.scheme, msg)
+ self.assertEqual(parsed_a.netloc, parsed_b.netloc, msg)
+ self.assertEqual(parsed_a.path, parsed_b.path, msg)
+ self.assertEqual(parsed_a.fragment, parsed_b.fragment, msg)
+ self.assertEqual(sorted(parsed_a.params.split(';')),
+ sorted(parsed_b.params.split(';')), msg)
+ self.assertDictEqual(
+ urllib.parse.parse_qs(parsed_a.query, keep_blank_values=True),
+ urllib.parse.parse_qs(parsed_b.query, keep_blank_values=True), msg)
+
+ def assertSameStructure(self, a, b, aname='a', bname='b', msg=None):
+ """Asserts that two values contain the same structural content.
+
+ The two arguments should be data trees consisting of trees of dicts and
+ lists. They will be deeply compared by walking into the contents of dicts
+ and lists; other items will be compared using the == operator.
+ If the two structures differ in content, the failure message will indicate
+ the location within the structures where the first difference is found.
+ This may be helpful when comparing large structures.
+
+ Args:
+ a: The first structure to compare.
+ b: The second structure to compare.
+ aname: Variable name to use for the first structure in assertion messages.
+ bname: Variable name to use for the second structure.
+ msg: Additional text to include in the failure message.
+ """
+
+ # Accumulate all the problems found so we can report all of them at once
+ # rather than just stopping at the first
+ problems = []
+
+ _walk_structure_for_problems(a, b, aname, bname, problems)
+
+ # Avoid spamming the user toooo much
+ if self.maxDiff is not None:
+ max_problems_to_show = self.maxDiff // 80
+ if len(problems) > max_problems_to_show:
+ problems = problems[0:max_problems_to_show-1] + ['...']
+
+ if problems:
+ self.fail('; '.join(problems), msg)
+
+ def assertJsonEqual(self, first, second, msg=None):
+ """Asserts that the JSON objects defined in two strings are equal.
+
+ A summary of the differences will be included in the failure message
+ using assertSameStructure.
+
+ Args:
+ first: A string contining JSON to decode and compare to second.
+ second: A string contining JSON to decode and compare to first.
+ msg: Additional text to include in the failure message.
+ """
+ try:
+ first_structured = json.loads(first)
+ except ValueError as e:
+ raise ValueError(self._formatMessage(
+ msg,
+ 'could not decode first JSON value %s: %s' % (first, e)))
+
+ try:
+ second_structured = json.loads(second)
+ except ValueError as e:
+ raise ValueError(self._formatMessage(
+ msg,
+ 'could not decode second JSON value %s: %s' % (second, e)))
+
+ self.assertSameStructure(first_structured, second_structured,
+ aname='first', bname='second', msg=msg)
+
+ def _getAssertEqualityFunc(self, first, second):
+ try:
+ return super(TestCase, self)._getAssertEqualityFunc(first, second)
+ except AttributeError:
+ # This is a workaround if unittest.TestCase.__init__ was never run.
+ # It usually means that somebody created a subclass just for the
+ # assertions and has overridden __init__. "assertTrue" is a safe
+ # value that will not make __init__ raise a ValueError.
+ test_method = getattr(self, '_testMethodName', 'assertTrue')
+ super(TestCase, self).__init__(test_method)
+
+ return super(TestCase, self)._getAssertEqualityFunc(first, second)
+
+ def fail(self, msg=None, prefix=None):
+ """Fail immediately with the given message, optionally prefixed."""
+ return super(TestCase, self).fail(self._formatMessage(prefix, msg))
+
+
+def _sorted_list_difference(expected, actual):
+ """Finds elements in only one or the other of two, sorted input lists.
+
+ Returns a two-element tuple of lists. The first list contains those
+ elements in the "expected" list but not in the "actual" list, and the
+ second contains those elements in the "actual" list but not in the
+ "expected" list. Duplicate elements in either input list are ignored.
+
+ Args:
+ expected: The list we expected.
+ actual: The list we actualy got.
+ Returns:
+ (missing, unexpected)
+ missing: items in expected that are not in actual.
+ unexpected: items in actual that are not in expected.
+ """
+ i = j = 0
+ missing = []
+ unexpected = []
+ while True:
+ try:
+ e = expected[i]
+ a = actual[j]
+ if e < a:
+ missing.append(e)
+ i += 1
+ while expected[i] == e:
+ i += 1
+ elif e > a:
+ unexpected.append(a)
+ j += 1
+ while actual[j] == a:
+ j += 1
+ else:
+ i += 1
+ try:
+ while expected[i] == e:
+ i += 1
+ finally:
+ j += 1
+ while actual[j] == a:
+ j += 1
+ except IndexError:
+ missing.extend(expected[i:])
+ unexpected.extend(actual[j:])
+ break
+ return missing, unexpected
+
+
+def _walk_structure_for_problems(a, b, aname, bname, problem_list):
+ """The recursive comparison behind assertSameStructure."""
+ if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck
+ isinstance(a, six.integer_types) and isinstance(b, six.integer_types)):
+ # We do not distinguish between int and long types as 99.99% of Python 2
+ # code should never care. They collapse into a single type in Python 3.
+ problem_list.append('%s is a %r but %s is a %r' %
+ (aname, type(a), bname, type(b)))
+ # If they have different types there's no point continuing
+ return
+
+ if isinstance(a, collections.Mapping):
+ for k in a:
+ if k in b:
+ _walk_structure_for_problems(
+ a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k),
+ problem_list)
+ else:
+ problem_list.append(
+ "%s has [%r] with value %r but it's missing in %s" %
+ (aname, k, a[k], bname))
+ for k in b:
+ if k not in a:
+ problem_list.append(
+ '%s lacks [%r] but %s has it with value %r' %
+ (aname, k, bname, b[k]))
+
+ # Strings/bytes are Sequences but we'll just do those with regular !=
+ elif (isinstance(a, collections.Sequence) and
+ not isinstance(a, _TEXT_OR_BINARY_TYPES)):
+ minlen = min(len(a), len(b))
+ for i in xrange(minlen):
+ _walk_structure_for_problems(
+ a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i),
+ problem_list)
+ for i in xrange(minlen, len(a)):
+ problem_list.append('%s has [%i] with value %r but %s does not' %
+ (aname, i, a[i], bname))
+ for i in xrange(minlen, len(b)):
+ problem_list.append('%s lacks [%i] but %s has it with value %r' %
+ (aname, i, bname, b[i]))
+
+ else:
+ if a != b:
+ problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b))
+
+
+def get_command_string(command):
+ """Returns an escaped string that can be used as a shell command.
+
+ Args:
+ command: List or string representing the command to run.
+ Returns:
+ A string suitable for use as a shell command.
+ """
+ if isinstance(command, six.string_types):
+ return command
+ else:
+ if os.name == 'nt':
+ return ' '.join(command)
+ else:
+ # The following is identical to Python 3's shlex.quote function.
+ command_string = ''
+ for word in command:
+ # Single quote word, and replace each ' in word with '"'"'
+ command_string += "'" + word.replace("'", "'\"'\"'") + "' "
+ return command_string[:-1]
+
+
+def get_command_stderr(command, env=None, close_fds=True):
+ """Runs the given shell command and returns a tuple.
+
+ Args:
+ command: List or string representing the command to run.
+ env: Dictionary of environment variable settings. If None, no environment
+ variables will be set for the child process. This is to make tests
+ more hermetic. NOTE: this behavior is different than the standard
+ subprocess module.
+ close_fds: Whether or not to close all open fd's in the child after forking.
+ On Windows, this is ignored and close_fds is always False.
+
+ Returns:
+ Tuple of (exit status, text printed to stdout and stderr by the command).
+ """
+ if env is None: env = {}
+ if os.name == 'nt':
+ # Windows does not support setting close_fds to True while also redirecting
+ # standard handles.
+ close_fds = False
+
+ use_shell = isinstance(command, six.string_types)
+ process = subprocess.Popen(
+ command,
+ close_fds=close_fds,
+ env=env,
+ shell=use_shell,
+ stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE)
+ output = process.communicate()[0]
+ exit_status = process.wait()
+ return (exit_status, output)
+
+
+def _quote_long_string(s):
+ """Quotes a potentially multi-line string to make the start and end obvious.
+
+ Args:
+ s: A string.
+
+ Returns:
+ The quoted string.
+ """
+ if isinstance(s, (bytes, bytearray)):
+ try:
+ s = s.decode('utf-8')
+ except UnicodeDecodeError:
+ s = str(s)
+ return ('8<-----------\n' +
+ s + '\n' +
+ '----------->8\n')
+
+
+class _TestProgramManualRun(unittest.TestProgram):
+ """A TestProgram which runs the tests manually."""
+
+ def runTests(self, do_run=False):
+ """Runs the tests."""
+ if do_run:
+ unittest.TestProgram.runTests(self)
+
+
+def print_python_version():
+ # Having this in the test output logs by default helps debugging when all
+ # you've got is the log and no other idea of which Python was used.
+ sys.stderr.write('Running tests under Python {0[0]}.{0[1]}.{0[2]}: '
+ '{1}\n'.format(
+ sys.version_info,
+ sys.executable if sys.executable else 'embedded.'))
+
+
+def main(*args, **kwargs):
+ """Executes a set of Python unit tests.
+
+ Usually this function is called without arguments, so the
+ unittest.TestProgram instance will get created with the default settings,
+ so it will run all test methods of all TestCase classes in the __main__
+ module.
+
+ Args:
+ *args: Positional arguments passed through to unittest.TestProgram.__init__.
+ **kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
+ """
+ print_python_version()
+ _run_in_app(run_tests, args, kwargs)
+
+
+def _is_in_app_main():
+ """Returns True iff app.run is active."""
+ f = sys._getframe().f_back # pylint: disable=protected-access
+ while f:
+ if f.f_code == six.get_function_code(app.run):
+ return True
+ f = f.f_back
+ return False
+
+
+class _SavedFlag(object):
+ """Helper class for saving and restoring a flag value."""
+
+ def __init__(self, flag):
+ self.flag = flag
+ self.value = flag.value
+ self.present = flag.present
+
+ def restore_flag(self):
+ self.flag.value = self.value
+ self.flag.present = self.present
+
+
+def _register_sigterm_with_faulthandler():
+ """Have faulthandler dump stacks on SIGTERM. Useful to diagnose timeouts."""
+ if faulthandler and getattr(faulthandler, 'register', None):
+ # faulthandler.register is not avaiable on Windows.
+ # faulthandler.enable() is already called by app.run.
+ try:
+ faulthandler.register(signal.SIGTERM, chain=True)
+ except Exception as e: # pylint: disable=broad-except
+ sys.stderr.write('faulthandler.register(SIGTERM) failed '
+ '%r; ignoring.\n' % e)
+
+
+def _run_in_app(function, args, kwargs):
+ """Executes a set of Python unit tests, ensuring app.run.
+
+ This is a private function, users should call absltest.main().
+
+ _run_in_app calculates argv to be the command-line arguments of this program
+ (without the flags), sets the default of FLAGS.alsologtostderr to True,
+ then it calls function(argv, args, kwargs), making sure that `function'
+ will get called within app.run(). _run_in_app does this by checking whether
+ it is called by app.run(), or by calling app.run() explicitly.
+
+ The reason why app.run has to be ensured is to make sure that
+ flags are parsed and stripped properly, and other initializations done by
+ the app module are also carried out, no matter if absltest.run() is called
+ from within or outside app.run().
+
+ If _run_in_app is called from within app.run(), then it will reparse
+ sys.argv and pass the result without command-line flags into the argv
+ argument of `function'. The reason why this parsing is needed is that
+ __main__.main() calls absltest.main() without passing its argv. So the
+ only way _run_in_app could get to know the argv without the flags is that
+ it reparses sys.argv.
+
+ _run_in_app changes the default of FLAGS.alsologtostderr to True so that the
+ test program's stderr will contain all the log messages unless otherwise
+ specified on the command-line. This overrides any explicit assignment to
+ FLAGS.alsologtostderr by the test program prior to the call to _run_in_app()
+ (e.g. in __main__.main).
+
+ Please note that _run_in_app (and the function it calls) is allowed to make
+ changes to kwargs.
+
+ Args:
+ function: absltest.run_tests or a similar function. It will be called as
+ function(argv, args, kwargs) where argv is a list containing the
+ elements of sys.argv without the command-line flags.
+ args: Positional arguments passed through to unittest.TestProgram.__init__.
+ kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
+ """
+ if _is_in_app_main():
+ _register_sigterm_with_faulthandler()
+
+ # Save command-line flags so the side effects of FLAGS(sys.argv) can be
+ # undone.
+ flag_objects = (FLAGS[name] for name in FLAGS)
+ saved_flags = dict((f.name, _SavedFlag(f)) for f in flag_objects)
+
+ # Change the default of alsologtostderr from False to True, so the test
+ # programs's stderr will contain all the log messages.
+ # If --alsologtostderr=false is specified in the command-line, or user
+ # has called FLAGS.alsologtostderr = False before, then the value is kept
+ # False.
+ FLAGS.set_default('alsologtostderr', True)
+ # Remove it from saved flags so it doesn't get restored later.
+ del saved_flags['alsologtostderr']
+
+ # The call FLAGS(sys.argv) parses sys.argv, returns the arguments
+ # without the flags, and -- as a side effect -- modifies flag values in
+ # FLAGS. We don't want the side effect, because we don't want to
+ # override flag changes the program did (e.g. in __main__.main)
+ # after the command-line has been parsed. So we have the for loop below
+ # to change back flags to their old values.
+ argv = FLAGS(sys.argv)
+ for saved_flag in six.itervalues(saved_flags):
+ saved_flag.restore_flag()
+
+
+ function(argv, args, kwargs)
+ else:
+ # Send logging to stderr. Use --alsologtostderr instead of --logtostderr
+ # in case tests are reading their own logs.
+ FLAGS.set_default('alsologtostderr', True)
+
+ def main_function(argv):
+ _register_sigterm_with_faulthandler()
+ function(argv, args, kwargs)
+
+ app.run(main=main_function)
+
+
+def _is_suspicious_attribute(testCaseClass, name):
+ """Returns True if an attribute is a method named like a test method."""
+ if name.startswith('Test') and len(name) > 4 and name[4].isupper():
+ attr = getattr(testCaseClass, name)
+ if inspect.isfunction(attr) or inspect.ismethod(attr):
+ args = inspect.getargspec(attr)
+ return (len(args.args) == 1 and args.args[0] == 'self'
+ and args.varargs is None and args.keywords is None)
+ return False
+
+
+class TestLoader(unittest.TestLoader):
+ """A test loader which supports common test features.
+
+ Supported features include:
+ * Banning untested methods with test-like names: methods attached to this
+ testCase with names starting with `Test` are ignored by the test runner,
+ and often represent mistakenly-omitted test cases. This loader will raise
+ a TypeError when attempting to load a TestCase with such methods.
+ * Randomization of test case execution order (optional).
+ """
+
+ _ERROR_MSG = textwrap.dedent("""Method '%s' is named like a test case but
+ is not one. This is often a bug. If you want it to be a test method,
+ name it with 'test' in lowercase. If not, rename the method to not begin
+ with 'Test'.""")
+
+ def __init__(self, *args, **kwds):
+ super(TestLoader, self).__init__(*args, **kwds)
+ seed = _get_default_randomize_ordering_seed()
+ if seed:
+ self._seed = seed
+ self._random = random.Random(self._seed)
+ else:
+ self._seed = None
+ self._random = None
+
+ def getTestCaseNames(self, testCaseClass): # pylint:disable=invalid-name
+ """Validates and returns a (possibly randomized) list of test case names."""
+ for name in dir(testCaseClass):
+ if _is_suspicious_attribute(testCaseClass, name):
+ raise TypeError(TestLoader._ERROR_MSG % name)
+ names = super(TestLoader, self).getTestCaseNames(testCaseClass)
+ if self._seed is not None:
+ logging.info('Randomizing test order with seed: %d', self._seed)
+ logging.info('To reproduce this order, re-run with '
+ '--test_randomize_ordering_seed=%d', self._seed)
+ self._random.shuffle(names)
+ return names
+
+
+def get_default_xml_output_filename():
+ if os.environ.get('XML_OUTPUT_FILE'):
+ return os.environ['XML_OUTPUT_FILE']
+ elif os.environ.get('RUNNING_UNDER_TEST_DAEMON'):
+ return os.path.join(os.path.dirname(FLAGS.test_tmpdir), 'test_detail.xml')
+ elif os.environ.get('TEST_XMLOUTPUTDIR'):
+ return os.path.join(
+ os.environ['TEST_XMLOUTPUTDIR'],
+ os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.xml')
+
+
+def _setup_filtering(argv):
+ """Implements the bazel test filtering protocol.
+
+ The following environment variable is used in this method:
+
+ TESTBRIDGE_TEST_ONLY: string, if set, is forwarded to the unittest
+ framework to use as a test filter. Its value is split with shlex
+ before being passed as positional arguments on argv.
+
+ Args:
+ argv: the argv to mutate in-place.
+ """
+ test_filter = os.environ.get('TESTBRIDGE_TEST_ONLY')
+ if argv is None or not test_filter:
+ return
+
+ argv[1:1] = shlex.split(test_filter)
+
+
+def _setup_sharding(custom_loader=None):
+ """Implements the bazel sharding protocol.
+
+ The following environment variables are used in this method:
+
+ TEST_SHARD_STATUS_FILE: string, if set, points to a file. We write a blank
+ file to tell the test runner that this test implements the test sharding
+ protocol.
+
+ TEST_TOTAL_SHARDS: int, if set, sharding is requested.
+
+ TEST_SHARD_INDEX: int, must be set if TEST_TOTAL_SHARDS is set. Specifies
+ the shard index for this instance of the test process. Must satisfy:
+ 0 <= TEST_SHARD_INDEX < TEST_TOTAL_SHARDS.
+
+ Args:
+ custom_loader: A TestLoader to be made sharded.
+
+ Returns:
+ The test loader for shard-filtering or the standard test loader, depending
+ on the sharding environment variables.
+ """
+
+ # It may be useful to write the shard file even if the other sharding
+ # environment variables are not set. Test runners may use this functionality
+ # to query whether a test binary implements the test sharding protocol.
+ if 'TEST_SHARD_STATUS_FILE' in os.environ:
+ try:
+ f = None
+ try:
+ f = open(os.environ['TEST_SHARD_STATUS_FILE'], 'w')
+ f.write('')
+ except IOError:
+ sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.'
+ % os.environ['TEST_SHARD_STATUS_FILE'])
+ sys.exit(1)
+ finally:
+ if f is not None: f.close()
+
+ base_loader = custom_loader or TestLoader()
+ if 'TEST_TOTAL_SHARDS' not in os.environ:
+ # Not using sharding, use the expected test loader.
+ return base_loader
+
+ total_shards = int(os.environ['TEST_TOTAL_SHARDS'])
+ shard_index = int(os.environ['TEST_SHARD_INDEX'])
+
+ if shard_index < 0 or shard_index >= total_shards:
+ sys.stderr.write('ERROR: Bad sharding values. index=%d, total=%d\n' %
+ (shard_index, total_shards))
+ sys.exit(1)
+
+ # Replace the original getTestCaseNames with one that returns
+ # the test case names for this shard.
+ delegate_get_names = base_loader.getTestCaseNames
+
+ bucket_iterator = itertools.cycle(xrange(total_shards))
+
+ def getShardedTestCaseNames(testCaseClass):
+ filtered_names = []
+ for testcase in sorted(delegate_get_names(testCaseClass)):
+ bucket = next(bucket_iterator)
+ if bucket == shard_index:
+ filtered_names.append(testcase)
+ return filtered_names
+
+ base_loader.getTestCaseNames = getShardedTestCaseNames
+ return base_loader
+
+
+def _run_and_get_tests_result(argv, args, kwargs, xml_test_runner_class):
+ """Executes a set of Python unit tests and returns the result."""
+
+ # Set up test filtering if requested in environment.
+ _setup_filtering(argv)
+
+ # Shard the (default or custom) loader if sharding is turned on.
+ kwargs['testLoader'] = _setup_sharding(kwargs.get('testLoader', None))
+
+ # XML file name is based upon (sorted by priority):
+ # --xml_output_file flag, XML_OUTPUT_FILE variable,
+ # TEST_XMLOUTPUTDIR variable or RUNNING_UNDER_TEST_DAEMON variable.
+ if not FLAGS.xml_output_file:
+ FLAGS.xml_output_file = get_default_xml_output_filename()
+ xml_output_file = FLAGS.xml_output_file
+
+ xml_output = None
+ if xml_output_file:
+ xml_output_dir = os.path.dirname(xml_output_file)
+ if xml_output_dir and not os.path.isdir(xml_output_dir):
+ try:
+ os.makedirs(xml_output_dir)
+ except OSError as e:
+ # File exists error can occur with concurrent tests
+ if e.errno != errno.EEXIST:
+ raise
+ if sys.version_info.major == 2:
+ xml_output = open(xml_output_file, 'w')
+ else:
+ xml_output = open(xml_output_file, 'w', encoding='utf-8')
+ # We can reuse testRunner if it supports XML output (e. g. by inheriting
+ # from xml_reporter.TextAndXMLTestRunner). Otherwise we need to use
+ # xml_reporter.TextAndXMLTestRunner.
+ if (kwargs.get('testRunner') is not None
+ and not hasattr(kwargs['testRunner'], 'set_default_xml_stream')):
+ sys.stderr.write('WARNING: XML_OUTPUT_FILE or --xml_output_file setting '
+ 'overrides testRunner=%r setting (possibly from --pdb)'
+ % (kwargs['testRunner']))
+ # Passing a class object here allows TestProgram to initialize
+ # instances based on its kwargs and/or parsed command-line args.
+ kwargs['testRunner'] = xml_test_runner_class
+ if kwargs.get('testRunner') is None:
+ kwargs['testRunner'] = xml_test_runner_class
+ kwargs['testRunner'].set_default_xml_stream(xml_output)
+
+ # Make sure tmpdir exists.
+ if not os.path.isdir(FLAGS.test_tmpdir):
+ try:
+ os.makedirs(FLAGS.test_tmpdir)
+ except OSError as e:
+ # Concurrent test might have created the directory.
+ if e.errno != errno.EEXIST:
+ raise
+
+ # Let unittest.TestProgram.__init__ do its own argv parsing, e.g. for '-v',
+ # on argv, which is sys.argv without the command-line flags.
+ kwargs.setdefault('argv', argv)
+
+ try:
+ test_program = unittest.TestProgram(*args, **kwargs)
+ return test_program.result
+ finally:
+ if xml_output:
+ xml_output.close()
+
+
+def run_tests(argv, args, kwargs):
+ """Executes a set of Python unit tests.
+
+ Most users should call absltest.main() instead of run_tests.
+
+ Please note that run_tests should be called from app.run.
+ Calling absltest.main() would ensure that.
+
+ Please note that run_tests is allowed to make changes to kwargs.
+
+ Args:
+ argv: sys.argv with the command-line flags removed from the front, i.e. the
+ argv with which app.run() has called __main__.main.
+ args: Positional arguments passed through to unittest.TestProgram.__init__.
+ kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
+ """
+ result = _run_and_get_tests_result(
+ argv, args, kwargs, xml_reporter.TextAndXMLTestRunner)
+ sys.exit(not result.wasSuccessful())
diff --git a/third_party/py/abseil/absl/testing/flagsaver.py b/third_party/py/abseil/absl/testing/flagsaver.py
new file mode 100755
index 0000000000..a95b742ba8
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/flagsaver.py
@@ -0,0 +1,183 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Decorator and context manager for saving and restoring flag values.
+
+There are many ways to save and restore. Always use the most convenient method
+for a given use case.
+
+Here are examples of each method. They all call do_stuff() while FLAGS.someflag
+is temporarily set to 'foo'.
+
+ # Use a decorator which can optionally override flags via arguments.
+ @flagsaver.flagsaver(someflag='foo')
+ def some_func():
+ do_stuff()
+
+ # Use a decorator which does not override flags itself.
+ @flagsaver.flagsaver
+ def some_func():
+ FLAGS.someflag = 'foo'
+ do_stuff()
+
+ # Use a context manager which can optionally override flags via arguments.
+ with flagsaver.flagsaver(someflag='foo'):
+ do_stuff()
+
+ # Save and restore the flag values yourself.
+ saved_flag_values = flagsaver.save_flag_values()
+ try:
+ FLAGS.someflag = 'foo'
+ do_stuff()
+ finally:
+ flagsaver.restore_flag_values(saved_flag_values)
+
+We save and restore a shallow copy of each Flag object's __dict__ attribute.
+This preserves all attributes of the flag, such as whether or not it was
+overridden from its default value.
+
+WARNING: Currently a flag that is saved and then deleted cannot be restored. An
+exception will be raised. However if you *add* a flag after saving flag values,
+and then restore flag values, the added flag will be deleted with no errors.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import functools
+import inspect
+
+from absl import flags
+import six
+
+FLAGS = flags.FLAGS
+
+
+def flagsaver(*args, **kwargs):
+ """The main flagsaver interface. See module doc for usage."""
+ if not args:
+ return _FlagOverrider(**kwargs)
+ elif len(args) == 1:
+ if kwargs:
+ raise ValueError(
+ "It's invalid to specify both positional and keyword parameters.")
+ func = args[0]
+ if inspect.isclass(func):
+ raise TypeError('@flagsaver.flagsaver cannot be applied to a class.')
+ return _wrap(func, {})
+ else:
+ raise ValueError(
+ "It's invalid to specify more than one positional parameters.")
+
+
+def save_flag_values(flag_values=FLAGS):
+ """Returns copy of flag values as a dict.
+
+ Args:
+ flag_values: FlagValues, the FlagValues instance with which the flag will
+ be saved. This should almost never need to be overridden.
+ Returns:
+ Dictionary mapping keys to values. Keys are flag names, values are
+ corresponding __dict__ members. E.g. {'key': value_dict, ...}.
+ """
+ return {name: _copy_flag_dict(flag_values[name]) for name in flag_values}
+
+
+def restore_flag_values(saved_flag_values, flag_values=FLAGS):
+ """Restores flag values based on the dictionary of flag values.
+
+ Args:
+ saved_flag_values: {'flag_name': value_dict, ...}
+ flag_values: FlagValues, the FlagValues instance from which the flag will
+ be restored. This should almost never need to be overridden.
+ """
+ new_flag_names = list(flag_values)
+ for name in new_flag_names:
+ saved = saved_flag_values.get(name)
+ if saved is None:
+ # If __dict__ was not saved delete "new" flag.
+ delattr(flag_values, name)
+ else:
+ if flag_values[name].value != saved['_value']:
+ flag_values[name].value = saved['_value'] # Ensure C++ value is set.
+ flag_values[name].__dict__ = saved
+
+
+def _wrap(func, overrides):
+ """Creates a wrapper function that saves/restores flag values.
+
+ Args:
+ func: function object - This will be called between saving flags and
+ restoring flags.
+ overrides: {str: object} - Flag names mapped to their values. These flags
+ will be set after saving the original flag state.
+
+ Returns:
+ return value from func()
+ """
+ @functools.wraps(func)
+ def _flagsaver_wrapper(*args, **kwargs):
+ """Wrapper function that saves and restores flags."""
+ with _FlagOverrider(**overrides):
+ return func(*args, **kwargs)
+ return _flagsaver_wrapper
+
+
+class _FlagOverrider(object):
+ """Overrides flags for the duration of the decorated function call.
+
+ It also restores all original values of flags after decorated method
+ completes.
+ """
+
+ def __init__(self, **overrides):
+ self._overrides = overrides
+ self._saved_flag_values = None
+
+ def __call__(self, func):
+ if inspect.isclass(func):
+ raise TypeError('flagsaver cannot be applied to a class.')
+ return _wrap(func, self._overrides)
+
+ def __enter__(self):
+ self._saved_flag_values = save_flag_values(FLAGS)
+ try:
+ for name, value in six.iteritems(self._overrides):
+ setattr(FLAGS, name, value)
+ except:
+ # It may fail because of flag validators.
+ restore_flag_values(self._saved_flag_values, FLAGS)
+ raise
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ restore_flag_values(self._saved_flag_values, FLAGS)
+
+
+def _copy_flag_dict(flag):
+ """Returns a copy of the flag object's __dict__.
+
+ It's mostly a shallow copy of the __dict__, except it also does a shallow
+ copy of the validator list.
+
+ Args:
+ flag: flags.Flag, the flag to copy.
+
+ Returns:
+ A copy of the flag object's __dict__.
+ """
+ copy = flag.__dict__.copy()
+ copy['_value'] = flag.value # Ensure correct restore for C++ flags.
+ copy['validators'] = list(flag.validators)
+ return copy
diff --git a/third_party/py/abseil/absl/testing/parameterized.py b/third_party/py/abseil/absl/testing/parameterized.py
new file mode 100755
index 0000000000..d9301baad7
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/parameterized.py
@@ -0,0 +1,520 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adds support for parameterized tests to Python's unittest TestCase class.
+
+A parameterized test is a method in a test case that is invoked with different
+argument tuples.
+
+A simple example:
+
+ class AdditionExample(parameterized.TestCase):
+ @parameterized.parameters(
+ (1, 2, 3),
+ (4, 5, 9),
+ (1, 1, 3))
+ def testAddition(self, op1, op2, result):
+ self.assertEqual(result, op1 + op2)
+
+
+Each invocation is a separate test case and properly isolated just
+like a normal test method, with its own setUp/tearDown cycle. In the
+example above, there are three separate testcases, one of which will
+fail due to an assertion error (1 + 1 != 3).
+
+Parameters for invididual test cases can be tuples (with positional parameters)
+or dictionaries (with named parameters):
+
+ class AdditionExample(parameterized.TestCase):
+ @parameterized.parameters(
+ {'op1': 1, 'op2': 2, 'result': 3},
+ {'op1': 4, 'op2': 5, 'result': 9},
+ )
+ def testAddition(self, op1, op2, result):
+ self.assertEqual(result, op1 + op2)
+
+If a parameterized test fails, the error message will show the
+original test name (which is modified internally) and the arguments
+for the specific invocation, which are part of the string returned by
+the shortDescription() method on test cases.
+
+The id method of the test, used internally by the unittest framework,
+is also modified to show the arguments. To make sure that test names
+stay the same across several invocations, object representations like
+
+ >>> class Foo(object):
+ ... pass
+ >>> repr(Foo())
+ '<__main__.Foo object at 0x23d8610>'
+
+are turned into '<__main__.Foo>'. For even more descriptive names,
+especially in test logs, you can use the named_parameters decorator. In
+this case, only tuples or dicts are supported. For tuples, the first parameters
+has to be a string (or an object that returns an apt name when converted via
+str()). For dicts, a value for the key 'testcase_name' must be present and must
+be a string (or an object that returns an apt name when converted via str()):
+
+ class NamedExample(parameterized.TestCase):
+ @parameterized.named_parameters(
+ ('Normal', 'aa', 'aaa', True),
+ ('EmptyPrefix', '', 'abc', True),
+ ('BothEmpty', '', '', True))
+ def testStartsWith(self, prefix, string, result):
+ self.assertEqual(result, string.startswith(prefix))
+
+ class NamedExample(parameterized.TestCase):
+ @parameterized.named_parameters(
+ {'testcase_name': 'Normal',
+ 'result': True, 'string': 'aaa', 'prefix': 'aa'},
+ {'testcase_name': 'EmptyPrefix',
+ 'result': True, 'string: 'abc', 'prefix': ''},
+ {'testcase_name': 'BothEmpty',
+ 'result': True, 'string': '', 'prefix': ''})
+ def testStartsWith(self, prefix, string, result):
+ self.assertEqual(result, string.startswith(prefix))
+
+Named tests also have the benefit that they can be run individually
+from the command line:
+
+ $ testmodule.py NamedExample.testStartsWithNormal
+ .
+ --------------------------------------------------------------------
+ Ran 1 test in 0.000s
+
+ OK
+
+Parameterized Classes
+=====================
+If invocation arguments are shared across test methods in a single
+TestCase class, instead of decorating all test methods
+individually, the class itself can be decorated:
+
+ @parameterized.parameters(
+ (1, 2, 3),
+ (4, 5, 9))
+ class ArithmeticTest(parameterized.TestCase):
+ def testAdd(self, arg1, arg2, result):
+ self.assertEqual(arg1 + arg2, result)
+
+ def testSubtract(self, arg1, arg2, result):
+ self.assertEqual(result - arg1, arg2)
+
+Inputs from Iterables
+=====================
+If parameters should be shared across several test cases, or are dynamically
+created from other sources, a single non-tuple iterable can be passed into
+the decorator. This iterable will be used to obtain the test cases:
+
+ class AdditionExample(parameterized.TestCase):
+ @parameterized.parameters(
+ c.op1, c.op2, c.result for c in testcases
+ )
+ def testAddition(self, op1, op2, result):
+ self.assertEqual(result, op1 + op2)
+
+
+Single-Argument Test Methods
+============================
+If a test method takes only one argument, the single arguments must not be
+wrapped into a tuple:
+
+ class NegativeNumberExample(parameterized.TestCase):
+ @parameterized.parameters(
+ -1, -3, -4, -5
+ )
+ def testIsNegative(self, arg):
+ self.assertTrue(IsNegative(arg))
+
+
+List/tuple as a Single Argument
+===============================
+If a test method takes a single argument of a list/tuple, it must be wrapped
+inside a tuple:
+
+ class ZeroSumExample(parameterized.TestCase):
+ @parameterized.parameters(
+ ([-1, 0, 1], ),
+ ([-2, 0, 2], ),
+ )
+ def testSumIsZero(self, arg):
+ self.assertEqual(0, sum(arg))
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import functools
+import re
+import types
+import unittest
+
+from absl.testing import absltest
+import six
+
+_ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
+_NAMED = object()
+_ARGUMENT_REPR = object()
+_NAMED_DICT_KEY = 'testcase_name'
+
+
+class NoTestsError(Exception):
+ """Raised when parameterized decorators do not generate any tests."""
+
+
+def _clean_repr(obj):
+ return _ADDR_RE.sub(r'<\1>', repr(obj))
+
+
+def _non_string_or_bytes_iterable(obj):
+ return (isinstance(obj, collections.Iterable) and
+ not isinstance(obj, six.text_type) and
+ not isinstance(obj, six.binary_type))
+
+
+def _format_parameter_list(testcase_params):
+ if isinstance(testcase_params, collections.Mapping):
+ return ', '.join('%s=%s' % (argname, _clean_repr(value))
+ for argname, value in six.iteritems(testcase_params))
+ elif _non_string_or_bytes_iterable(testcase_params):
+ return ', '.join(map(_clean_repr, testcase_params))
+ else:
+ return _format_parameter_list((testcase_params,))
+
+
+class _ParameterizedTestIter(object):
+ """Callable and iterable class for producing new test cases."""
+
+ def __init__(self, test_method, testcases, naming_type):
+ """Returns concrete test functions for a test and a list of parameters.
+
+ The naming_type is used to determine the name of the concrete
+ functions as reported by the unittest framework. If naming_type is
+ _FIRST_ARG, the testcases must be tuples, and the first element must
+ have a string representation that is a valid Python identifier.
+
+ Args:
+ test_method: The decorated test method.
+ testcases: (list of tuple/dict) A list of parameter
+ tuples/dicts for individual test invocations.
+ naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
+ """
+ self._test_method = test_method
+ self.testcases = testcases
+ self._naming_type = naming_type
+ self.__name__ = _ParameterizedTestIter.__name__
+
+ def __call__(self, *args, **kwargs):
+ raise RuntimeError('You appear to be running a parameterized test case '
+ 'without having inherited from parameterized.'
+ 'TestCase. This is bad because none of '
+ 'your test cases are actually being run. You may also '
+ 'be using a mock annotation before the parameterized '
+ 'one, in which case you should reverse the order.')
+
+ def __iter__(self):
+ test_method = self._test_method
+ naming_type = self._naming_type
+ extra_ids = collections.defaultdict(int)
+
+ def make_bound_param_test(testcase_params):
+ @functools.wraps(test_method)
+ def bound_param_test(self):
+ if isinstance(testcase_params, collections.Mapping):
+ test_method(self, **testcase_params)
+ elif _non_string_or_bytes_iterable(testcase_params):
+ test_method(self, *testcase_params)
+ else:
+ test_method(self, testcase_params)
+
+ if naming_type is _NAMED:
+ # Signal the metaclass that the name of the test function is unique
+ # and descriptive.
+ bound_param_test.__x_use_name__ = True
+
+ testcase_name = None
+ if isinstance(testcase_params, collections.Mapping):
+ if _NAMED_DICT_KEY not in testcase_params:
+ raise RuntimeError(
+ 'Dict for named tests must contain key "%s"' % _NAMED_DICT_KEY)
+ # Create a new dict to avoid modifying the supplied testcase_params.
+ testcase_name = testcase_params[_NAMED_DICT_KEY]
+ testcase_params = {k: v for k, v in six.iteritems(testcase_params)
+ if k != _NAMED_DICT_KEY}
+ elif _non_string_or_bytes_iterable(testcase_params):
+ testcase_name = testcase_params[0]
+ testcase_params = testcase_params[1:]
+ else:
+ raise RuntimeError(
+ 'Named tests must be passed a dict or non-string iterable.')
+
+ # Support PEP-8 underscore style for test naming if used.
+ if (bound_param_test.__name__.startswith('test_')
+ and testcase_name
+ and not testcase_name.startswith('_')):
+ bound_param_test.__name__ += '_'
+
+ bound_param_test.__name__ += str(testcase_name)
+ elif naming_type is _ARGUMENT_REPR:
+ # If it's a generator, convert it to a tuple and treat them as
+ # parameters.
+ if isinstance(testcase_params, types.GeneratorType):
+ testcase_params = tuple(testcase_params)
+ # The metaclass creates a unique, but non-descriptive method name for
+ # _ARGUMENT_REPR tests using an indexed suffix.
+ # To keep test names descriptive, only the original method name is used.
+ # To make sure test names are unique, we add a unique descriptive suffix
+ # __x_extra_id__ for every test.
+ extra_id = '(%s)' % (_format_parameter_list(testcase_params),)
+ extra_ids[extra_id] += 1
+ while extra_ids[extra_id] > 1:
+ extra_id = '%s (%d)' % (extra_id, extra_ids[extra_id])
+ extra_ids[extra_id] += 1
+ bound_param_test.__x_extra_id__ = extra_id
+ else:
+ raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
+
+ bound_param_test.__doc__ = '%s(%s)' % (
+ bound_param_test.__name__, _format_parameter_list(testcase_params))
+ if test_method.__doc__:
+ bound_param_test.__doc__ += '\n%s' % (test_method.__doc__,)
+ return bound_param_test
+
+ return (make_bound_param_test(c) for c in self.testcases)
+
+
+def _modify_class(class_object, testcases, naming_type):
+ assert not getattr(class_object, '_test_method_ids', None), (
+ 'Cannot add parameters to %s. Either it already has parameterized '
+ 'methods, or its super class is also a parameterized class.' % (
+ class_object,))
+ class_object._test_method_ids = test_method_ids = {}
+ for name, obj in six.iteritems(class_object.__dict__.copy()):
+ if (name.startswith(unittest.TestLoader.testMethodPrefix)
+ and isinstance(obj, types.FunctionType)):
+ delattr(class_object, name)
+ methods = {}
+ _update_class_dict_for_param_test_case(
+ methods, test_method_ids, name,
+ _ParameterizedTestIter(obj, testcases, naming_type))
+ for name, meth in six.iteritems(methods):
+ setattr(class_object, name, meth)
+
+
+def _parameter_decorator(naming_type, testcases):
+ """Implementation of the parameterization decorators.
+
+ Args:
+ naming_type: The naming type.
+ testcases: Testcase parameters.
+
+ Raises:
+ NoTestsError: Raised when the decorator generates no tests.
+
+ Returns:
+ A function for modifying the decorated object.
+ """
+ def _apply(obj):
+ if isinstance(obj, type):
+ _modify_class(obj, testcases, naming_type)
+ return obj
+ else:
+ return _ParameterizedTestIter(obj, testcases, naming_type)
+
+ if (len(testcases) == 1 and
+ not isinstance(testcases[0], tuple) and
+ not (naming_type == _NAMED and
+ isinstance(testcases[0], collections.Mapping))):
+ # Support using a single non-tuple parameter as a list of test cases.
+ # Note in named parameters case, the single non-tuple parameter can't be
+ # Mapping either, which means a single named parameter case.
+ assert _non_string_or_bytes_iterable(testcases[0]), (
+ 'Single parameter argument must be a non-string iterable')
+ testcases = testcases[0]
+
+ if not isinstance(testcases, collections.Sequence):
+ testcases = list(testcases)
+ if not testcases:
+ raise NoTestsError(
+ 'parameterized test decorators did not generate any tests. '
+ 'Make sure you specify non-empty parameters, '
+ 'and do not reuse generators more than once.')
+
+ return _apply
+
+
+def parameters(*testcases):
+ """A decorator for creating parameterized tests.
+
+ See the module docstring for a usage example.
+
+ Args:
+ *testcases: Parameters for the decorated method, either a single
+ iterable, or a list of tuples/dicts/objects (for tests with only one
+ argument).
+
+ Raises:
+ NoTestsError: Raised when the decorator generates no tests.
+
+ Returns:
+ A test generator to be handled by TestGeneratorMetaclass.
+ """
+ return _parameter_decorator(_ARGUMENT_REPR, testcases)
+
+
+def named_parameters(*testcases):
+ """A decorator for creating parameterized tests.
+
+ See the module docstring for a usage example. For every parameter tuple
+ passed, the first element of the tuple should be a string and will be appended
+ to the name of the test method. Each parameter dict passed must have a value
+ for the key "testcase_name", the string representation of that value will be
+ appended to the name of the test method.
+
+ Args:
+ *testcases: Parameters for the decorated method, either a single iterable,
+ or a list of tuples or dicts.
+
+ Raises:
+ NoTestsError: Raised when the decorator generates no tests.
+
+ Returns:
+ A test generator to be handled by TestGeneratorMetaclass.
+ """
+ return _parameter_decorator(_NAMED, testcases)
+
+
+class TestGeneratorMetaclass(type):
+ """Metaclass for test cases with test generators.
+
+ A test generator is an iterable in a testcase that produces callables. These
+ callables must be single-argument methods. These methods are injected into
+ the class namespace and the original iterable is removed. If the name of the
+ iterable conforms to the test pattern, the injected methods will be picked
+ up as tests by the unittest framework.
+
+ In general, it is supposed to be used in conjuction with the
+ parameters decorator.
+ """
+
+ def __new__(mcs, class_name, bases, dct):
+ test_method_ids = dct.setdefault('_test_method_ids', {})
+ for name, obj in six.iteritems(dct.copy()):
+ if (name.startswith(unittest.TestLoader.testMethodPrefix) and
+ _non_string_or_bytes_iterable(obj)):
+ iterator = iter(obj)
+ dct.pop(name)
+ _update_class_dict_for_param_test_case(
+ dct, test_method_ids, name, iterator)
+ # If the base class is a subclass of parameterized.TestCase, inherit its
+ # _test_method_ids too.
+ for base in bases:
+ # Check if the base has _test_method_ids first, then check if it's a
+ # subclass of parameterized.TestCase. Otherwise when this is called for
+ # the parameterized.TestCase definition itself, this raises because
+ # itself is not defined yet. This works as long as absltest.TestCase does
+ # not define _test_method_ids.
+ if getattr(base, '_test_method_ids', None) and issubclass(base, TestCase):
+ for test_method, test_method_id in six.iteritems(base._test_method_ids):
+ # test_method may both exists in base and this class.
+ # This class's method overrides base class's.
+ # That's why it should only inherit it if it does not exist.
+ test_method_ids.setdefault(test_method, test_method_id)
+
+ return type.__new__(mcs, class_name, bases, dct)
+
+
+def _update_class_dict_for_param_test_case(
+ dct, test_method_ids, name, iterator):
+ """Adds individual test cases to a dictionary.
+
+ Args:
+ dct: The target dictionary.
+ test_method_ids: The dictionary for mapping names to test IDs.
+ name: The original name of the test case.
+ iterator: The iterator generating the individual test cases.
+ """
+ for idx, func in enumerate(iterator):
+ assert callable(func), 'Test generators must yield callables, got %r' % (
+ func,)
+ if getattr(func, '__x_use_name__', False):
+ original_name = func.__name__
+ new_name = original_name
+ else:
+ original_name = name
+ new_name = '%s%d' % (original_name, idx)
+ assert new_name not in dct, (
+ 'Name of parameterized test case "%s" not unique' % (new_name,))
+ dct[new_name] = func
+ test_method_id = original_name + getattr(func, '__x_extra_id__', '')
+ assert test_method_id not in test_method_ids.values(), (
+ 'Id of parameterized test case "%s" not unique' % (test_method_id,))
+ test_method_ids[new_name] = test_method_id
+
+
+class TestCase(six.with_metaclass(TestGeneratorMetaclass, absltest.TestCase)):
+ """Base class for test cases using the parameters decorator."""
+
+ def __str__(self):
+ return '%s (%s)' % (
+ self._test_method_ids.get(self._testMethodName, self._testMethodName),
+ unittest.util.strclass(self.__class__))
+
+ def id(self):
+ """Returns the descriptive ID of the test.
+
+ This is used internally by the unittesting framework to get a name
+ for the test to be used in reports.
+
+ Returns:
+ The test id.
+ """
+ return '%s.%s' % (
+ unittest.util.strclass(self.__class__),
+ # When a test method is NOT decorated, it doesn't exist in
+ # _test_method_ids. Use the _testMethodName directly.
+ self._test_method_ids.get(self._testMethodName, self._testMethodName))
+
+
+# This function is kept CamelCase because it's used as a class's base class.
+def CoopTestCase(other_base_class): # pylint: disable=invalid-name
+ """Returns a new base class with a cooperative metaclass base.
+
+ This enables the TestCase to be used in combination
+ with other base classes that have custom metaclasses, such as
+ mox.MoxTestBase.
+
+ Only works with metaclasses that do not override type.__new__.
+
+ Example:
+
+ from absl.testing import parameterized
+
+ class ExampleTest(parameterized.CoopTestCase(OtherTestCase)):
+ ...
+
+ Args:
+ other_base_class: (class) A test case base class.
+
+ Returns:
+ A new class object.
+ """
+ metaclass = type(
+ 'CoopMetaclass',
+ (other_base_class.__metaclass__,
+ TestGeneratorMetaclass), {})
+ return metaclass(
+ 'CoopTestCase',
+ (other_base_class, TestCase), {})
diff --git a/third_party/py/abseil/absl/testing/xml_reporter.py b/third_party/py/abseil/absl/testing/xml_reporter.py
new file mode 100755
index 0000000000..9c095e780f
--- /dev/null
+++ b/third_party/py/abseil/absl/testing/xml_reporter.py
@@ -0,0 +1,445 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A Python test reporter that generates test reports in JUnit XML format."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import re
+import sys
+import threading
+import time
+import traceback
+import unittest
+from xml.sax import saxutils
+
+import six
+
+
+# See http://www.w3.org/TR/REC-xml/#NT-Char
+_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD}
+
+
+_control_character_conversions = {
+ chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes}
+
+
+_escape_xml_attr_conversions = {
+ '"': '&quot;',
+ "'": '&apos;',
+ '\n': '&#xA;',
+ '\t': '&#x9;',
+ '\r': '&#xD;',
+ ' ': '&#x20;'}
+_escape_xml_attr_conversions.update(_control_character_conversions)
+
+
+# When class or module level function fails, unittest/suite.py adds a
+# _ErrorHolder instance instead of a real TestCase, and it has a description
+# like "setUpClass (__main__.MyTestCase)".
+_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$')
+
+
+# NOTE: while saxutils.quoteattr() theoretically does the same thing; it
+# seems to often end up being too smart for it's own good not escaping properly.
+# This function is much more reliable.
+def _escape_xml_attr(content):
+ """Escapes xml attributes."""
+ # Note: saxutils doesn't escape the quotes.
+ return saxutils.escape(content, _escape_xml_attr_conversions)
+
+
+def _escape_cdata(s):
+ """Escapes a string to be used as XML CDATA.
+
+ CDATA characters are treated strictly as character data, not as XML markup,
+ but there are still certain restrictions on them.
+
+ Args:
+ s: the string to be escaped.
+ Returns:
+ An escaped version of the input string.
+ """
+ for char, escaped in six.iteritems(_control_character_conversions):
+ s = s.replace(char, escaped)
+ return s.replace(']]>', ']] >')
+
+
+# Copy time.time which ensures the real time is used internally.
+# This prevents bad interactions with tests that stub out time.
+_time_copy = time.time
+
+if hasattr(traceback, '_some_str'):
+ # Use the traceback module str function to format safely.
+ _safe_str = traceback._some_str
+else:
+ _safe_str = str # pylint: disable=invalid-name
+
+
+class _TestCaseResult(object):
+ """Private helper for _TextAndXMLTestResult that represents a test result.
+
+ Attributes:
+ test: A TestCase instance of an individual test method.
+ name: The name of the individual test method.
+ full_class_name: The full name of the test class.
+ run_time: The duration (in seconds) it took to run the test.
+ errors: A list of error 4-tuples. Error tuple entries are
+ 1) a string identifier of either "failure" or "error"
+ 2) an exception_type
+ 3) an exception_message
+ 4) a string version of a sys.exc_info()-style tuple of values
+ ('error', err[0], err[1], self._exc_info_to_string(err))
+ If the length of errors is 0, then the test is either passed or
+ skipped.
+ skip_reason: A string explaining why the test was skipped.
+ """
+
+ def __init__(self, test):
+ self.run_time = -1
+ self.skip_reason = None
+ self.errors = []
+ self.test = test
+
+ # Parse the test id to get its test name and full class path.
+ # Unfortunately there is no better way of knowning the test and class.
+ # Worse, unittest uses _ErrorHandler instances to represent class / module
+ # level failures.
+ test_desc = test.id() or str(test)
+ # Check if it's something like "setUpClass (__main__.TestCase)".
+ match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc)
+ if match:
+ name = match.group(1)
+ full_class_name = match.group(2)
+ else:
+ class_name = unittest.util.strclass(test.__class__)
+ if test_desc.startswith(class_name + '.'):
+ # In a typical unittest.TestCase scenario, test.id() returns with
+ # a class name formatted using unittest.util.strclass.
+ name = test_desc[len(class_name)+1:]
+ full_class_name = class_name
+ else:
+ # Otherwise make a best effort to guess the test name and full class
+ # path.
+ parts = test_desc.rsplit('.', 1)
+ name = parts[-1]
+ full_class_name = parts[0] if len(parts) == 2 else ''
+ self.name = _escape_xml_attr(name)
+ self.full_class_name = _escape_xml_attr(full_class_name)
+
+ def set_run_time(self, time_in_secs):
+ self.run_time = time_in_secs
+
+ def print_xml_summary(self, stream):
+ """Prints an XML Summary of a TestCase.
+
+ Status and result are populated as per JUnit XML test result reporter.
+ A test that has been skipped will always have a skip reason,
+ as every skip method in Python's unittest requires the reason arg to be
+ passed.
+
+ Args:
+ stream: output stream to write test report XML to
+ """
+
+ if self.skip_reason is None:
+ status = 'run'
+ result = 'completed'
+ else:
+ status = 'notrun'
+ result = 'suppressed'
+
+ stream.write(
+ ' <testcase name="%s" status="%s" result="%s" time="%.1f" '
+ 'classname="%s">\n' % (
+ self.name, status, result, self.run_time, self.full_class_name))
+ self._print_testcase_details(stream)
+ stream.write(' </testcase>\n')
+
+ def _print_testcase_details(self, stream):
+ for error in self.errors:
+ outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence
+ message = _escape_xml_attr(_safe_str(message))
+ exception_type = _escape_xml_attr(str(exception_type))
+ error_msg = _escape_cdata(error_msg)
+ stream.write(' <%s message="%s" type="%s"><![CDATA[%s]]></%s>\n'
+ % (outcome, message, exception_type, error_msg, outcome))
+
+
+class _TestSuiteResult(object):
+ """Private helper for _TextAndXMLTestResult."""
+
+ def __init__(self):
+ self.suites = {}
+ self.failure_counts = {}
+ self.error_counts = {}
+
+ def add_test_case_result(self, test_case_result):
+ suite_name = type(test_case_result.test).__name__
+ if suite_name == '_ErrorHolder':
+ # _ErrorHolder is a special case created by unittest for class / module
+ # level functions.
+ suite_name = test_case_result.full_class_name.rsplit('.')[-1]
+ self._setup_test_suite(suite_name)
+ self.suites[suite_name].append(test_case_result)
+ for error in test_case_result.errors:
+ # Only count the first failure or error so that the sum is equal to the
+ # total number of *testcases* that have failures or errors.
+ if error[0] == 'failure':
+ self.failure_counts[suite_name] += 1
+ break
+ elif error[0] == 'error':
+ self.error_counts[suite_name] += 1
+ break
+
+ def print_xml_summary(self, stream):
+ overall_test_count = sum([len(x) for x in self.suites.values()])
+ overall_failures = sum(self.failure_counts.values())
+ overall_errors = sum(self.error_counts.values())
+ overall_time = 0
+ for tests in self.suites.values():
+ overall_time += sum([x.run_time for x in tests])
+ overall_args = (overall_test_count, overall_failures, overall_errors,
+ overall_time)
+ stream.write('<testsuites name="" tests="%d" failures="%d" '
+ 'errors="%d" time="%.1f">\n' % overall_args)
+ for suite_name in self.suites:
+ suite = self.suites[suite_name]
+ suite_time = sum([x.run_time for x in suite])
+ failures = self.failure_counts[suite_name]
+ errors = self.error_counts[suite_name]
+ args = (suite_name, len(suite), failures, errors, suite_time)
+ stream.write('<testsuite name="%s" tests="%d" failures="%d" '
+ 'errors="%d" time="%.1f">\n' % args)
+ for test_case_result in suite:
+ test_case_result.print_xml_summary(stream)
+ stream.write('</testsuite>\n')
+ stream.write('</testsuites>\n')
+
+ def _setup_test_suite(self, suite_name):
+ """Adds a test suite to the set of suites tracked by this test run.
+
+ Args:
+ suite_name: string, The name of the test suite being initialized.
+ """
+ if suite_name in self.suites:
+ return
+ self.suites[suite_name] = []
+ self.failure_counts[suite_name] = 0
+ self.error_counts[suite_name] = 0
+
+
+class _TextAndXMLTestResult(unittest.TextTestResult):
+ """Private TestResult class that produces both formatted text results and XML.
+
+ Used by TextAndXMLTestRunner.
+ """
+
+ _TEST_SUITE_RESULT_CLASS = _TestSuiteResult
+ _TEST_CASE_RESULT_CLASS = _TestCaseResult
+
+ def __init__(self, xml_stream, stream, descriptions, verbosity,
+ time_getter=_time_copy):
+ super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity)
+ self.xml_stream = xml_stream
+ self.pending_test_case_results = {}
+ self.suite = self._TEST_SUITE_RESULT_CLASS()
+ self.time_getter = time_getter
+
+ # This lock guards any mutations on pending_test_case_results.
+ self._pending_test_case_results_lock = threading.Lock()
+
+ def startTest(self, test):
+ self.start_time = self.time_getter()
+ super(_TextAndXMLTestResult, self).startTest(test)
+
+ def stopTest(self, test):
+ # Grabbing the write lock to avoid conflicting with stopTestRun.
+ with self._pending_test_case_results_lock:
+ super(_TextAndXMLTestResult, self).stopTest(test)
+ result = self.get_pending_test_case_result(test)
+ if not result:
+ test_name = test.id() or str(test)
+ sys.stderr.write('No pending test case: %s\n' % test_name)
+ return
+ test_id = id(test)
+ run_time = self.time_getter() - self.start_time
+ result.set_run_time(run_time)
+ self.suite.add_test_case_result(result)
+ del self.pending_test_case_results[test_id]
+
+ def stopTestRun(self):
+ # All pending_test_case_results will be added to the suite and removed from
+ # the pending_test_case_results dictionary. Grabing the write lock to avoid
+ # results from being added during this process to avoid duplicating adds or
+ # accidentally erasing newly appended pending results.
+ with self._pending_test_case_results_lock:
+ # Errors in the test fixture (setUpModule, tearDownModule,
+ # setUpClass, tearDownClass) can leave a pending result which
+ # never gets added to the suite. The runner calls stopTestRun
+ # which gives us an opportunity to add these errors for
+ # reporting here.
+ for test_id in self.pending_test_case_results:
+ result = self.pending_test_case_results[test_id]
+ if hasattr(self, 'start_time'):
+ run_time = self.time_getter() - self.start_time
+ result.set_run_time(run_time)
+ self.suite.add_test_case_result(result)
+ self.pending_test_case_results.clear()
+
+ def _exc_info_to_string(self, err, test=None):
+ """Converts a sys.exc_info()-style tuple of values into a string.
+
+ This method must be overridden because the method signature in
+ unittest.TestResult changed between Python 2.2 and 2.4.
+
+ Args:
+ err: A sys.exc_info() tuple of values for an error.
+ test: The test method.
+
+ Returns:
+ A formatted exception string.
+ """
+ if test:
+ return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test)
+ return ''.join(traceback.format_exception(*err))
+
+ def add_pending_test_case_result(self, test, error_summary=None,
+ skip_reason=None):
+ """Adds result information to a test case result which may still be running.
+
+ If a result entry for the test already exists, add_pending_test_case_result
+ will add error summary tuples and/or overwrite skip_reason for the result.
+ If it does not yet exist, a result entry will be created.
+ Note that a test result is considered to have been run and passed
+ only if there are no errors or skip_reason.
+
+ Args:
+ test: A test method as defined by unittest
+ error_summary: A 4-tuple with the following entries:
+ 1) a string identifier of either "failure" or "error"
+ 2) an exception_type
+ 3) an exception_message
+ 4) a string version of a sys.exc_info()-style tuple of values
+ ('error', err[0], err[1], self._exc_info_to_string(err))
+ If the length of errors is 0, then the test is either passed or
+ skipped.
+ skip_reason: a string explaining why the test was skipped
+ """
+ with self._pending_test_case_results_lock:
+ test_id = id(test)
+ if test_id not in self.pending_test_case_results:
+ self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS(
+ test)
+ if error_summary:
+ self.pending_test_case_results[test_id].errors.append(error_summary)
+ if skip_reason:
+ self.pending_test_case_results[test_id].skip_reason = skip_reason
+
+ def delete_pending_test_case_result(self, test):
+ with self._pending_test_case_results_lock:
+ test_id = id(test)
+ del self.pending_test_case_results[test_id]
+
+ def get_pending_test_case_result(self, test):
+ test_id = id(test)
+ return self.pending_test_case_results.get(test_id, None)
+
+ def addSuccess(self, test):
+ super(_TextAndXMLTestResult, self).addSuccess(test)
+ self.add_pending_test_case_result(test)
+
+ def addError(self, test, err):
+ super(_TextAndXMLTestResult, self).addError(test, err)
+ error_summary = ('error', err[0], err[1], self._exc_info_to_string(err))
+ self.add_pending_test_case_result(test, error_summary=error_summary)
+
+ def addFailure(self, test, err):
+ super(_TextAndXMLTestResult, self).addFailure(test, err)
+ error_summary = ('failure', err[0], err[1], self._exc_info_to_string(err))
+ self.add_pending_test_case_result(test, error_summary=error_summary)
+
+ def addSkip(self, test, reason):
+ super(_TextAndXMLTestResult, self).addSkip(test, reason)
+ self.add_pending_test_case_result(test, skip_reason=reason)
+
+ def addExpectedFailure(self, test, err):
+ super(_TextAndXMLTestResult, self).addExpectedFailure(test, err)
+ if callable(getattr(test, 'recordProperty', None)):
+ test.recordProperty('EXPECTED_FAILURE', self._exc_info_to_string(err))
+ self.add_pending_test_case_result(test)
+
+ def addUnexpectedSuccess(self, test):
+ super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test)
+ test_name = test.id() or str(test)
+ error_summary = ('error', '', '',
+ 'Test case %s should have failed, but passed.'
+ % (test_name))
+ self.add_pending_test_case_result(test, error_summary=error_summary)
+
+ def printErrors(self):
+ super(_TextAndXMLTestResult, self).printErrors()
+ self.xml_stream.write('<?xml version="1.0"?>\n')
+ self.suite.print_xml_summary(self.xml_stream)
+
+
+class TextAndXMLTestRunner(unittest.TextTestRunner):
+ """A test runner that produces both formatted text results and XML.
+
+ It prints out the names of tests as they are run, errors as they
+ occur, and a summary of the results at the end of the test run.
+ """
+
+ _TEST_RESULT_CLASS = _TextAndXMLTestResult
+
+ _xml_stream = None
+
+ def __init__(self, xml_stream=None, *args, **kwargs):
+ """Initialize a TextAndXMLTestRunner.
+
+ Args:
+ xml_stream: file-like or None; XML-formatted test results are output
+ via this object's write() method. If None (the default), the
+ new instance behaves as described in the set_default_xml_stream method
+ documentation below.
+ *args: passed unmodified to unittest.TextTestRunner.__init__.
+ **kwargs: passed unmodified to unittest.TextTestRunner.__init__.
+ """
+ super(TextAndXMLTestRunner, self).__init__(*args, **kwargs)
+ if xml_stream is not None:
+ self._xml_stream = xml_stream
+ # else, do not set self._xml_stream to None -- this allows implicit fallback
+ # to the class attribute's value.
+
+ @classmethod
+ def set_default_xml_stream(cls, xml_stream):
+ """Sets the default XML stream for the class.
+
+ Args:
+ xml_stream: file-like or None; used for instances when xml_stream is None
+ or not passed to their constructors. If None is passed, instances
+ created with xml_stream=None will act as ordinary TextTestRunner
+ instances; this is the default state before any calls to this method
+ have been made.
+ """
+ cls._xml_stream = xml_stream
+
+ def _makeResult(self):
+ if self._xml_stream is None:
+ return super(TextAndXMLTestRunner, self)._makeResult()
+ else:
+ return self._TEST_RESULT_CLASS(self._xml_stream, self.stream,
+ self.descriptions, self.verbosity)
diff --git a/third_party/py/abseil/absl_py.egg-info/PKG-INFO b/third_party/py/abseil/absl_py.egg-info/PKG-INFO
new file mode 100644
index 0000000000..c9b961612a
--- /dev/null
+++ b/third_party/py/abseil/absl_py.egg-info/PKG-INFO
@@ -0,0 +1,21 @@
+Metadata-Version: 1.1
+Name: absl-py
+Version: 0.1.1
+Summary: Abseil Python Common Libraries
+Home-page: https://github.com/abseil/abseil-py
+Author: The Abseil Authors
+Author-email: UNKNOWN
+License: Apache 2.0
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
diff --git a/third_party/py/abseil/absl_py.egg-info/SOURCES.txt b/third_party/py/abseil/absl_py.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..4e12af5403
--- /dev/null
+++ b/third_party/py/abseil/absl_py.egg-info/SOURCES.txt
@@ -0,0 +1,25 @@
+setup.py
+absl/__init__.py
+absl/app.py
+absl/command_name.py
+absl/flags/__init__.py
+absl/flags/_argument_parser.py
+absl/flags/_defines.py
+absl/flags/_exceptions.py
+absl/flags/_flag.py
+absl/flags/_flagvalues.py
+absl/flags/_helpers.py
+absl/flags/_validators.py
+absl/logging/__init__.py
+absl/logging/converter.py
+absl/testing/__init__.py
+absl/testing/_bazelize_command.py
+absl/testing/absltest.py
+absl/testing/flagsaver.py
+absl/testing/parameterized.py
+absl/testing/xml_reporter.py
+absl_py.egg-info/PKG-INFO
+absl_py.egg-info/SOURCES.txt
+absl_py.egg-info/dependency_links.txt
+absl_py.egg-info/requires.txt
+absl_py.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/py/abseil/absl_py.egg-info/dependency_links.txt b/third_party/py/abseil/absl_py.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/py/abseil/absl_py.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/py/abseil/absl_py.egg-info/requires.txt b/third_party/py/abseil/absl_py.egg-info/requires.txt
new file mode 100644
index 0000000000..64c56a3640
--- /dev/null
+++ b/third_party/py/abseil/absl_py.egg-info/requires.txt
@@ -0,0 +1 @@
+six \ No newline at end of file
diff --git a/third_party/py/abseil/absl_py.egg-info/top_level.txt b/third_party/py/abseil/absl_py.egg-info/top_level.txt
new file mode 100644
index 0000000000..46022f6ff2
--- /dev/null
+++ b/third_party/py/abseil/absl_py.egg-info/top_level.txt
@@ -0,0 +1 @@
+absl
diff --git a/third_party/py/abseil/setup.cfg b/third_party/py/abseil/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/third_party/py/abseil/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/third_party/py/abseil/setup.py b/third_party/py/abseil/setup.py
new file mode 100644
index 0000000000..cc596671cb
--- /dev/null
+++ b/third_party/py/abseil/setup.py
@@ -0,0 +1,61 @@
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Abseil setup configuration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import platform
+
+try:
+ import setuptools
+except ImportError:
+ from ez_setup import use_setuptools
+ use_setuptools()
+ import setuptools
+
+py_version = platform.python_version_tuple()
+if py_version < ('2', '7') or py_version[0] == '3' and py_version < ('3', '4'):
+ raise RuntimeError('Python version 2.7 or 3.4+ is required.')
+
+
+setuptools.setup(
+ name='absl-py',
+ version='0.1.1',
+ description='Abseil Python Common Libraries',
+ author='The Abseil Authors',
+ url='https://github.com/abseil/abseil-py',
+ packages=setuptools.find_packages(exclude=[
+ '*.tests', '*.tests.*', 'tests.*', 'tests',
+ ]),
+ install_requires=[
+ 'six',
+ ],
+ license='Apache 2.0',
+ classifiers=[
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Intended Audience :: Developers',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ ],
+)