aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/tests/base_unittest.py
diff options
context:
space:
mode:
authorGravatar epoger <epoger@google.com>2014-07-17 12:54:16 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-07-17 12:54:16 -0700
commit66ed8dc4bfd63e4552a213cb17909f9fbbf59abd (patch)
tree4d48cfa8ba43750c051f542f23b48b5f953e58ea /tools/tests/base_unittest.py
parent733418f91e4d90f4ec250d9a97120618a5a89b49 (diff)
combine base_unittest.py modules from gm and tools
general cleanup, which will also help with http://skbug.com/2752 ('split existing "gpu" GM results into "gl" and "gles"') R=rmistry@google.com Author: epoger@google.com Review URL: https://codereview.chromium.org/397103003
Diffstat (limited to 'tools/tests/base_unittest.py')
-rwxr-xr-xtools/tests/base_unittest.py127
1 files changed, 92 insertions, 35 deletions
diff --git a/tools/tests/base_unittest.py b/tools/tests/base_unittest.py
index f7ee570a24..9ff710f4a1 100755
--- a/tools/tests/base_unittest.py
+++ b/tools/tests/base_unittest.py
@@ -8,22 +8,77 @@ found in the LICENSE file.
A wrapper around the standard Python unittest library, adding features we need
for various unittests within this directory.
+
+TODO(epoger): Move this into the common repo for broader use? Or at least in
+a more common place within the Skia repo?
"""
import errno
+import filecmp
import os
import shutil
-import sys
+import tempfile
import unittest
-# Set the PYTHONPATH to include the tools directory.
-sys.path.append(
- os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
-import find_run_binary
+TRUNK_DIR = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), os.pardir, os.pardir))
class TestCase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+ # Subclasses should override this default value if they want their output
+ # to be automatically compared against expectations (see setUp and tearDown)
+ self._testdata_dir = None
+
+ def setUp(self):
+ """Called before each test."""
+ # Get the name of this test, in such a way that it will be consistent
+ # regardless of the directory it is run from (throw away package names,
+ # if any).
+ self._test_name = '.'.join(self.id().split('.')[-3:])
+
+ self._temp_dir = tempfile.mkdtemp()
+ if self._testdata_dir:
+ self.create_empty_dir(self.output_dir_actual)
+
+ def tearDown(self):
+ """Called after each test."""
+ shutil.rmtree(self._temp_dir)
+ if self._testdata_dir and os.path.exists(self.output_dir_expected):
+ different_files = _find_different_files(self.output_dir_actual,
+ self.output_dir_expected)
+ # Don't add any cleanup code below this assert!
+ # Then if tests fail, the artifacts will not be cleaned up.
+ assert (not different_files), \
+ ('found differing files:\n' +
+ '\n'.join(['tkdiff %s %s &' % (
+ os.path.join(self.output_dir_actual, basename),
+ os.path.join(self.output_dir_expected, basename))
+ for basename in different_files]))
+
+ @property
+ def temp_dir(self):
+ return self._temp_dir
+
+ @property
+ def input_dir(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(self._testdata_dir, 'inputs')
+
+ @property
+ def output_dir_actual(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(
+ self._testdata_dir, 'outputs', 'actual', self._test_name)
+
+ @property
+ def output_dir_expected(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(
+ self._testdata_dir, 'outputs', 'expected', self._test_name)
+
def shortDescription(self):
"""Tell unittest framework to not print docstrings for test cases."""
return None
@@ -34,42 +89,48 @@ class TestCase(unittest.TestCase):
Args:
path: path on local disk
"""
- shutil.rmtree(path=path, ignore_errors=True)
+ # Delete the old one, if any.
+ if os.path.isdir(path):
+ shutil.rmtree(path=path, ignore_errors=True)
+ elif os.path.lexists(path):
+ os.remove(path)
+
+ # Create the new one.
try:
os.makedirs(path)
except OSError as exc:
+ # Guard against race condition (somebody else is creating the same dir)
if exc.errno != errno.EEXIST:
raise
return path
- def run_command(self, args):
- """Runs a program from the command line and returns stdout.
- Args:
- args: Command line to run, as a list of string parameters. args[0] is the
- binary to run.
-
- Returns:
- stdout from the program, as a single string.
-
- Raises:
- Exception: the program exited with a nonzero return code.
- """
- return find_run_binary.run_command(args)
+def _find_different_files(dir1, dir2, ignore_subtree_names=None):
+ """Returns a list of any files that differ between the directory trees rooted
+ at dir1 and dir2.
- def find_path_to_program(self, program):
- """Returns path to an existing program binary.
+ Args:
+ dir1: root of a directory tree; if nonexistent, will raise OSError
+ dir2: root of another directory tree; if nonexistent, will raise OSError
+ ignore_subtree_names: list of subtree directory names to ignore;
+ defaults to ['.svn'], so all SVN files are ignores
- Args:
- program: Basename of the program to find (e.g., 'render_pictures').
-
- Returns:
- Absolute path to the program binary, as a string.
-
- Raises:
- Exception: unable to find the program binary.
- """
- return find_run_binary.find_path_to_program(program)
+ TODO(epoger): include the dirname within each filename (not just the
+ basename), to make it easier to locate any differences
+ """
+ differing_files = []
+ if ignore_subtree_names is None:
+ ignore_subtree_names = ['.svn']
+ dircmp = filecmp.dircmp(dir1, dir2, ignore=ignore_subtree_names)
+ differing_files.extend(dircmp.left_only)
+ differing_files.extend(dircmp.right_only)
+ differing_files.extend(dircmp.common_funny)
+ differing_files.extend(dircmp.diff_files)
+ differing_files.extend(dircmp.funny_files)
+ for common_dir in dircmp.common_dirs:
+ differing_files.extend(_find_different_files(
+ os.path.join(dir1, common_dir), os.path.join(dir2, common_dir)))
+ return differing_files
def main(test_case_class):
@@ -77,10 +138,6 @@ def main(test_case_class):
Raises an Exception if any of those tests fail (in case we are running in the
context of run_all.py, which depends on that Exception to signal failures).
-
- TODO(epoger): Make all of our unit tests use the Python unittest framework,
- so we can leverage its ability to run *all* the tests and report failures at
- the end.
"""
suite = unittest.TestLoader().loadTestsFromTestCase(test_case_class)
results = unittest.TextTestRunner(verbosity=2).run(suite)