diff options
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/tests/base_unittest.py | 127 | ||||
-rwxr-xr-x | tools/tests/fix_pythonpath.py | 20 | ||||
-rwxr-xr-x | tools/tests/render_pictures_test.py | 74 |
3 files changed, 158 insertions, 63 deletions
diff --git a/tools/tests/base_unittest.py b/tools/tests/base_unittest.py index f7ee570a24..9ff710f4a1 100755 --- a/tools/tests/base_unittest.py +++ b/tools/tests/base_unittest.py @@ -8,22 +8,77 @@ found in the LICENSE file. A wrapper around the standard Python unittest library, adding features we need for various unittests within this directory. + +TODO(epoger): Move this into the common repo for broader use? Or at least in +a more common place within the Skia repo? """ import errno +import filecmp import os import shutil -import sys +import tempfile import unittest -# Set the PYTHONPATH to include the tools directory. -sys.path.append( - os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) -import find_run_binary +TRUNK_DIR = os.path.abspath(os.path.join( + os.path.dirname(__file__), os.pardir, os.pardir)) class TestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(TestCase, self).__init__(*args, **kwargs) + # Subclasses should override this default value if they want their output + # to be automatically compared against expectations (see setUp and tearDown) + self._testdata_dir = None + + def setUp(self): + """Called before each test.""" + # Get the name of this test, in such a way that it will be consistent + # regardless of the directory it is run from (throw away package names, + # if any). + self._test_name = '.'.join(self.id().split('.')[-3:]) + + self._temp_dir = tempfile.mkdtemp() + if self._testdata_dir: + self.create_empty_dir(self.output_dir_actual) + + def tearDown(self): + """Called after each test.""" + shutil.rmtree(self._temp_dir) + if self._testdata_dir and os.path.exists(self.output_dir_expected): + different_files = _find_different_files(self.output_dir_actual, + self.output_dir_expected) + # Don't add any cleanup code below this assert! + # Then if tests fail, the artifacts will not be cleaned up. + assert (not different_files), \ + ('found differing files:\n' + + '\n'.join(['tkdiff %s %s &' % ( + os.path.join(self.output_dir_actual, basename), + os.path.join(self.output_dir_expected, basename)) + for basename in different_files])) + + @property + def temp_dir(self): + return self._temp_dir + + @property + def input_dir(self): + assert self._testdata_dir, 'self._testdata_dir must be set' + return os.path.join(self._testdata_dir, 'inputs') + + @property + def output_dir_actual(self): + assert self._testdata_dir, 'self._testdata_dir must be set' + return os.path.join( + self._testdata_dir, 'outputs', 'actual', self._test_name) + + @property + def output_dir_expected(self): + assert self._testdata_dir, 'self._testdata_dir must be set' + return os.path.join( + self._testdata_dir, 'outputs', 'expected', self._test_name) + def shortDescription(self): """Tell unittest framework to not print docstrings for test cases.""" return None @@ -34,42 +89,48 @@ class TestCase(unittest.TestCase): Args: path: path on local disk """ - shutil.rmtree(path=path, ignore_errors=True) + # Delete the old one, if any. + if os.path.isdir(path): + shutil.rmtree(path=path, ignore_errors=True) + elif os.path.lexists(path): + os.remove(path) + + # Create the new one. try: os.makedirs(path) except OSError as exc: + # Guard against race condition (somebody else is creating the same dir) if exc.errno != errno.EEXIST: raise return path - def run_command(self, args): - """Runs a program from the command line and returns stdout. - Args: - args: Command line to run, as a list of string parameters. args[0] is the - binary to run. - - Returns: - stdout from the program, as a single string. - - Raises: - Exception: the program exited with a nonzero return code. - """ - return find_run_binary.run_command(args) +def _find_different_files(dir1, dir2, ignore_subtree_names=None): + """Returns a list of any files that differ between the directory trees rooted + at dir1 and dir2. - def find_path_to_program(self, program): - """Returns path to an existing program binary. + Args: + dir1: root of a directory tree; if nonexistent, will raise OSError + dir2: root of another directory tree; if nonexistent, will raise OSError + ignore_subtree_names: list of subtree directory names to ignore; + defaults to ['.svn'], so all SVN files are ignores - Args: - program: Basename of the program to find (e.g., 'render_pictures'). - - Returns: - Absolute path to the program binary, as a string. - - Raises: - Exception: unable to find the program binary. - """ - return find_run_binary.find_path_to_program(program) + TODO(epoger): include the dirname within each filename (not just the + basename), to make it easier to locate any differences + """ + differing_files = [] + if ignore_subtree_names is None: + ignore_subtree_names = ['.svn'] + dircmp = filecmp.dircmp(dir1, dir2, ignore=ignore_subtree_names) + differing_files.extend(dircmp.left_only) + differing_files.extend(dircmp.right_only) + differing_files.extend(dircmp.common_funny) + differing_files.extend(dircmp.diff_files) + differing_files.extend(dircmp.funny_files) + for common_dir in dircmp.common_dirs: + differing_files.extend(_find_different_files( + os.path.join(dir1, common_dir), os.path.join(dir2, common_dir))) + return differing_files def main(test_case_class): @@ -77,10 +138,6 @@ def main(test_case_class): Raises an Exception if any of those tests fail (in case we are running in the context of run_all.py, which depends on that Exception to signal failures). - - TODO(epoger): Make all of our unit tests use the Python unittest framework, - so we can leverage its ability to run *all* the tests and report failures at - the end. """ suite = unittest.TestLoader().loadTestsFromTestCase(test_case_class) results = unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/tools/tests/fix_pythonpath.py b/tools/tests/fix_pythonpath.py new file mode 100755 index 0000000000..a746a18937 --- /dev/null +++ b/tools/tests/fix_pythonpath.py @@ -0,0 +1,20 @@ +#!/usr/bin/python + +""" +Copyright 2014 Google Inc. + +Use of this source code is governed by a BSD-style license that can be +found in the LICENSE file. + +Adds possibly-needed directories to PYTHONPATH, if they aren't already there. +""" + +import os +import sys + +TRUNK_DIRECTORY = os.path.abspath(os.path.join( + os.path.dirname(__file__), os.pardir, os.pardir)) +for subdir in ['tools']: + fullpath = os.path.join(TRUNK_DIRECTORY, subdir) + if fullpath not in sys.path: + sys.path.append(fullpath) diff --git a/tools/tests/render_pictures_test.py b/tools/tests/render_pictures_test.py index 8feef67318..a7636dc943 100755 --- a/tools/tests/render_pictures_test.py +++ b/tools/tests/render_pictures_test.py @@ -16,8 +16,12 @@ import os import shutil import tempfile +# Must fix up PYTHONPATH before importing from within Skia +import fix_pythonpath # pylint: disable=W0611 + # Imports from within Skia import base_unittest +import find_run_binary # Maximum length of text diffs to show when tests fail MAX_DIFF_LENGTH = 30000 @@ -292,7 +296,7 @@ class RenderPicturesTest(base_unittest.TestCase): def test_untiled_empty_expectations_file(self): """Same as test_untiled, but with an empty expectations file.""" expectations_path = os.path.join(self._expectations_dir, 'empty') - with open(expectations_path, 'w') as fh: + with open(expectations_path, 'w'): pass expected_summary_dict = { "header" : EXPECTED_HEADER_CONTENTS, @@ -329,7 +333,8 @@ class RenderPicturesTest(base_unittest.TestCase): "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 11092453015575919668, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_11092453015575919668.png", + "filepath" : + "red_skp/bitmap-64bitMD5_11092453015575919668.png", }, }, "green.skp": { @@ -338,7 +343,8 @@ class RenderPicturesTest(base_unittest.TestCase): "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 8891695120562235492, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_8891695120562235492.png", + "filepath" : + "green_skp/bitmap-64bitMD5_8891695120562235492.png", }, } } @@ -467,32 +473,38 @@ class RenderPicturesTest(base_unittest.TestCase): "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 5815827069051002745, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_5815827069051002745.png", + "filepath" : + "red_skp/bitmap-64bitMD5_5815827069051002745.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 9323613075234140270, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_9323613075234140270.png", + "filepath" : + "red_skp/bitmap-64bitMD5_9323613075234140270.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 16670399404877552232, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_16670399404877552232.png", + "filepath" : + "red_skp/bitmap-64bitMD5_16670399404877552232.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 2507897274083364964, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_2507897274083364964.png", + "filepath" : + "red_skp/bitmap-64bitMD5_2507897274083364964.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 7325267995523877959, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_7325267995523877959.png", + "filepath" : + "red_skp/bitmap-64bitMD5_7325267995523877959.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 2181381724594493116, "comparisonResult" : "no-comparison", - "filepath" : "red_skp/bitmap-64bitMD5_2181381724594493116.png", + "filepath" : + "red_skp/bitmap-64bitMD5_2181381724594493116.png", }], }, "green.skp": { @@ -503,32 +515,38 @@ class RenderPicturesTest(base_unittest.TestCase): "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 12587324416545178013, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_12587324416545178013.png", + "filepath" : + "green_skp/bitmap-64bitMD5_12587324416545178013.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 7624374914829746293, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_7624374914829746293.png", + "filepath" : + "green_skp/bitmap-64bitMD5_7624374914829746293.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 5686489729535631913, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_5686489729535631913.png", + "filepath" : + "green_skp/bitmap-64bitMD5_5686489729535631913.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 7980646035555096146, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_7980646035555096146.png", + "filepath" : + "green_skp/bitmap-64bitMD5_7980646035555096146.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 17817086664365875131, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_17817086664365875131.png", + "filepath" : + "green_skp/bitmap-64bitMD5_17817086664365875131.png", }, { "checksumAlgorithm" : "bitmap-64bitMD5", "checksumValue" : 10673669813016809363, "comparisonResult" : "no-comparison", - "filepath" : "green_skp/bitmap-64bitMD5_10673669813016809363.png", + "filepath" : + "green_skp/bitmap-64bitMD5_10673669813016809363.png", }], } } @@ -553,10 +571,9 @@ class RenderPicturesTest(base_unittest.TestCase): 'bitmap-64bitMD5_10673669813016809363.png']) def _run_render_pictures(self, args): - binary = self.find_path_to_program('render_pictures') - return self.run_command([binary, - '--config', '8888', - ] + args) + binary = find_run_binary.find_path_to_program('render_pictures') + return find_run_binary.run_command( + [binary, '--config', '8888'] + args) def _create_expectations(self, missing_some_images=False, rel_path='expectations.json'): @@ -614,15 +631,16 @@ class RenderPicturesTest(base_unittest.TestCase): width: Width of canvas to create. height: Height of canvas to create. """ - binary = self.find_path_to_program('skpmaker') - return self.run_command([binary, - '--red', str(red), - '--green', str(green), - '--blue', str(blue), - '--width', str(width), - '--height', str(height), - '--writePath', str(output_path), - ]) + binary = find_run_binary.find_path_to_program('skpmaker') + return find_run_binary.run_command([ + binary, + '--red', str(red), + '--green', str(green), + '--blue', str(blue), + '--width', str(width), + '--height', str(height), + '--writePath', str(output_path), + ]) def _assert_directory_contents(self, dir_path, expected_filenames): """Asserts that files found in a dir are identical to expected_filenames. |