aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rwxr-xr-xgm/rebaseline_server/base_unittest.py131
-rwxr-xr-xgm/rebaseline_server/compare_configs_test.py17
-rwxr-xr-xgm/rebaseline_server/compare_rendered_pictures_test.py26
-rwxr-xr-xgm/rebaseline_server/compare_to_expectations_test.py12
-rwxr-xr-xgm/rebaseline_server/download_actuals_test.py10
-rw-r--r--gm/rebaseline_server/imagediffdb.py11
-rwxr-xr-xgm/rebaseline_server/imagediffdb_test.py10
-rwxr-xr-xgm/rebaseline_server/imagepair_test.py6
-rwxr-xr-xtools/tests/base_unittest.py127
-rwxr-xr-xtools/tests/fix_pythonpath.py20
-rwxr-xr-xtools/tests/render_pictures_test.py74
11 files changed, 224 insertions, 220 deletions
diff --git a/gm/rebaseline_server/base_unittest.py b/gm/rebaseline_server/base_unittest.py
index 32b4bb9d74..f8fdff19c9 100755
--- a/gm/rebaseline_server/base_unittest.py
+++ b/gm/rebaseline_server/base_unittest.py
@@ -10,124 +10,27 @@ A wrapper around the standard Python unittest library, adding features we need
for various unittests within this directory.
"""
-import filecmp
+# System-level imports.
import os
-import shutil
-import tempfile
-import unittest
+import sys
-PARENT_DIR = os.path.dirname(os.path.realpath(__file__))
-TRUNK_DIR = os.path.dirname(os.path.dirname(PARENT_DIR))
-TESTDATA_DIR = os.path.join(PARENT_DIR, 'testdata')
-OUTPUT_DIR_ACTUAL = os.path.join(TESTDATA_DIR, 'outputs', 'actual')
-OUTPUT_DIR_EXPECTED = os.path.join(TESTDATA_DIR, 'outputs', 'expected')
+PARENT_DIR = os.path.abspath(os.path.dirname(__file__))
+TRUNK_DIR = os.path.abspath(os.path.join(PARENT_DIR, os.pardir, os.pardir))
+# Import the superclass base_unittest module from the tools dir.
+TOOLS_DIR = os.path.join(TRUNK_DIR, 'tools')
+if TOOLS_DIR not in sys.path:
+ sys.path.append(TOOLS_DIR)
+import tests.base_unittest as superclass_module
-class TestCase(unittest.TestCase):
- def setUp(self):
- # Get the name of this test, in such a way that it will be consistent
- # regardless of the directory it is run from (throw away package names,
- # if any).
- test_name = '.'.join(self.id().split('.')[-3:])
+class TestCase(superclass_module.TestCase):
- self._input_dir = os.path.join(TESTDATA_DIR, 'inputs')
- self._output_dir_actual = os.path.join(OUTPUT_DIR_ACTUAL, test_name)
- self._output_dir_expected = os.path.join(OUTPUT_DIR_EXPECTED, test_name)
- create_empty_dir(self._output_dir_actual)
- self._temp_dir = tempfile.mkdtemp()
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+ # Some of the tests within this package want their output validated,
+ # so we declare where the expected and actual output will be.
+ self._testdata_dir = os.path.join(PARENT_DIR, 'testdata')
- def tearDown(self):
- shutil.rmtree(self._temp_dir)
- if os.path.exists(self._output_dir_expected):
- different_files = find_different_files(self._output_dir_actual,
- self._output_dir_expected)
- # Maybe we should move this assert elsewhere? It's unusual to see an
- # assert within tearDown(), but my thinking was:
- # 1. Every test case will have some collection of output files that need
- # to be validated.
- # 2. So put that validation within tearDown(), which will be called after
- # every test case!
- #
- # I have confirmed that the test really does fail if this assert is
- # triggered.
- #
- # Ravi notes: if somebody later comes along and adds cleanup code below
- # this assert, then if tests fail, the artifacts will not be cleaned up.
- assert (not different_files), \
- ('found differing files:\n' +
- '\n'.join(['tkdiff %s %s &' % (
- os.path.join(self._output_dir_actual, basename),
- os.path.join(self._output_dir_expected, basename))
- for basename in different_files]))
-
- def shortDescription(self):
- """Tell unittest framework to not print docstrings for test cases."""
- return None
-
- def find_path_to_program(self, program):
- """Returns path to an existing program binary.
-
- Args:
- program: Basename of the program to find (e.g., 'render_pictures').
-
- Returns:
- Absolute path to the program binary, as a string.
-
- Raises:
- Exception: unable to find the program binary.
- """
- possible_paths = [os.path.join(TRUNK_DIR, 'out', 'Release', program),
- os.path.join(TRUNK_DIR, 'out', 'Debug', program),
- os.path.join(TRUNK_DIR, 'out', 'Release',
- program + '.exe'),
- os.path.join(TRUNK_DIR, 'out', 'Debug',
- program + '.exe')]
- for try_path in possible_paths:
- if os.path.isfile(try_path):
- return try_path
- raise Exception('cannot find %s in paths %s; maybe you need to '
- 'build %s?' % (program, possible_paths, program))
-
-
-def create_empty_dir(path):
- """Create an empty directory at the given path."""
- if os.path.isdir(path):
- shutil.rmtree(path)
- elif os.path.lexists(path):
- os.remove(path)
- os.makedirs(path)
-
-
-def find_different_files(dir1, dir2, ignore_subtree_names=None):
- """Returns a list of any files that differ between the directory trees rooted
- at dir1 and dir2.
-
- Args:
- dir1: root of a directory tree; if nonexistent, will raise OSError
- dir2: root of another directory tree; if nonexistent, will raise OSError
- ignore_subtree_names: list of subtree directory names to ignore;
- defaults to ['.svn'], so all SVN files are ignores
-
- TODO(epoger): include the dirname within each filename (not just the
- basename), to make it easier to locate any differences
- """
- differing_files = []
- if ignore_subtree_names is None:
- ignore_subtree_names = ['.svn']
- dircmp = filecmp.dircmp(dir1, dir2, ignore=ignore_subtree_names)
- differing_files.extend(dircmp.left_only)
- differing_files.extend(dircmp.right_only)
- differing_files.extend(dircmp.common_funny)
- differing_files.extend(dircmp.diff_files)
- differing_files.extend(dircmp.funny_files)
- for common_dir in dircmp.common_dirs:
- differing_files.extend(find_different_files(
- os.path.join(dir1, common_dir), os.path.join(dir2, common_dir)))
- return differing_files
-
-
-def main(test_case_class):
- """Run the unit tests within the given class."""
- suite = unittest.TestLoader().loadTestsFromTestCase(test_case_class)
- unittest.TextTestRunner(verbosity=2).run(suite)
+def main(*args, **kwargs):
+ superclass_module.main(*args, **kwargs)
diff --git a/gm/rebaseline_server/compare_configs_test.py b/gm/rebaseline_server/compare_configs_test.py
index 756af66156..0c701e44bf 100755
--- a/gm/rebaseline_server/compare_configs_test.py
+++ b/gm/rebaseline_server/compare_configs_test.py
@@ -10,22 +10,25 @@ Test compare_configs.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
-1. examine the results in self._output_dir_actual and make sure they are ok
+1. examine the results in self.output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
-3. mv self._output_dir_actual self._output_dir_expected
+3. mv self.output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
+# System-level imports
import os
-import sys
+
+# Must fix up PYTHONPATH before importing from within Skia
+import fix_pythonpath # pylint: disable=W0611
# Imports from within Skia
import base_unittest
import compare_configs
+import gm_json
import results
-import gm_json # must import results first, so that gm_json will be in sys.path
class CompareConfigsTest(base_unittest.TestCase):
@@ -34,14 +37,14 @@ class CompareConfigsTest(base_unittest.TestCase):
"""Process results of a GM run with the ConfigComparisons object."""
results_obj = compare_configs.ConfigComparisons(
configs=('8888', 'gpu'),
- actuals_root=os.path.join(self._input_dir, 'gm-actuals'),
- generated_images_root=self._temp_dir,
+ actuals_root=os.path.join(self.input_dir, 'gm-actuals'),
+ generated_images_root=self.temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
- os.path.join(self._output_dir_actual, 'gm.json'))
+ os.path.join(self.output_dir_actual, 'gm.json'))
def mock_get_timestamp():
diff --git a/gm/rebaseline_server/compare_rendered_pictures_test.py b/gm/rebaseline_server/compare_rendered_pictures_test.py
index 7656d11d10..a8041ec802 100755
--- a/gm/rebaseline_server/compare_rendered_pictures_test.py
+++ b/gm/rebaseline_server/compare_rendered_pictures_test.py
@@ -10,23 +10,27 @@ Test compare_rendered_pictures.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
-1. examine the results in self._output_dir_actual and make sure they are ok
+1. examine the results in self.output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
-3. mv self._output_dir_actual self._output_dir_expected
+3. mv self.output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
+# System-level imports
import os
import subprocess
-import sys
+
+# Must fix up PYTHONPATH before importing from within Skia
+import fix_pythonpath # pylint: disable=W0611
# Imports from within Skia
import base_unittest
import compare_rendered_pictures
+import find_run_binary
+import gm_json
import results
-import gm_json # must import results first, so that gm_json will be in sys.path
class CompareRenderedPicturesTest(base_unittest.TestCase):
@@ -48,25 +52,25 @@ class CompareRenderedPicturesTest(base_unittest.TestCase):
})
results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
- actuals_root=self._temp_dir,
+ actuals_root=self.temp_dir,
subdirs=('before_patch', 'after_patch'),
- generated_images_root=self._temp_dir,
+ generated_images_root=self.temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
- os.path.join(self._output_dir_actual, 'compare_rendered_pictures.json'))
+ os.path.join(self.output_dir_actual, 'compare_rendered_pictures.json'))
def _generate_skps_and_run_render_pictures(self, subdir, skpdict):
"""Generate SKPs and run render_pictures on them.
Args:
- subdir: subdirectory (within self._temp_dir) to write all files into
+ subdir: subdirectory (within self.temp_dir) to write all files into
skpdict: {skpname: redvalue} dictionary describing the SKP files to render
"""
- out_path = os.path.join(self._temp_dir, subdir)
+ out_path = os.path.join(self.temp_dir, subdir)
os.makedirs(out_path)
for skpname, redvalue in skpdict.iteritems():
self._run_skpmaker(
@@ -75,7 +79,7 @@ class CompareRenderedPicturesTest(base_unittest.TestCase):
# TODO(epoger): Add --mode tile 256 256 --writeWholeImage to the unittest,
# and fix its result! (imageURLs within whole-image entries are wrong when
# I tried adding that)
- binary = self.find_path_to_program('render_pictures')
+ binary = find_run_binary.find_path_to_program('render_pictures')
return subprocess.check_output([
binary,
'--config', '8888',
@@ -96,7 +100,7 @@ class CompareRenderedPicturesTest(base_unittest.TestCase):
width: Width of canvas to create.
height: Height of canvas to create.
"""
- binary = self.find_path_to_program('skpmaker')
+ binary = find_run_binary.find_path_to_program('skpmaker')
return subprocess.check_output([
binary,
'--red', str(red),
diff --git a/gm/rebaseline_server/compare_to_expectations_test.py b/gm/rebaseline_server/compare_to_expectations_test.py
index d2b2dd60ee..2997cde67f 100755
--- a/gm/rebaseline_server/compare_to_expectations_test.py
+++ b/gm/rebaseline_server/compare_to_expectations_test.py
@@ -10,9 +10,9 @@ Test compare_to_expectations.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
-1. examine the results in self._output_dir_actual and make sure they are ok
+1. examine the results in self.output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
-3. mv self._output_dir_actual self._output_dir_expected
+3. mv self.output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
@@ -32,17 +32,17 @@ class CompareToExpectationsTest(base_unittest.TestCase):
def test_gm(self):
"""Process results of a GM run with the ExpectationComparisons object."""
- image_diff_db = imagediffdb.ImageDiffDB(storage_root=self._temp_dir)
+ image_diff_db = imagediffdb.ImageDiffDB(storage_root=self.temp_dir)
results_obj = compare_to_expectations.ExpectationComparisons(
image_diff_db=image_diff_db,
- actuals_root=os.path.join(self._input_dir, 'gm-actuals'),
- expected_root=os.path.join(self._input_dir, 'gm-expectations'),
+ actuals_root=os.path.join(self.input_dir, 'gm-actuals'),
+ expected_root=os.path.join(self.input_dir, 'gm-expectations'),
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
- os.path.join(self._output_dir_actual, 'gm.json'))
+ os.path.join(self.output_dir_actual, 'gm.json'))
def mock_get_timestamp():
diff --git a/gm/rebaseline_server/download_actuals_test.py b/gm/rebaseline_server/download_actuals_test.py
index a74389f459..c6e7dea8e5 100755
--- a/gm/rebaseline_server/download_actuals_test.py
+++ b/gm/rebaseline_server/download_actuals_test.py
@@ -10,9 +10,9 @@ Test download.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
-1. examine the results in self._output_dir_actual and make sure they are ok
+1. examine the results in self.output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
-3. mv self._output_dir_actual self._output_dir_expected
+3. mv self.output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
@@ -36,12 +36,12 @@ class DownloadTest(base_unittest.TestCase):
"""Tests fetch() of GM results from actual-results.json ."""
downloader = download_actuals.Download(
actuals_base_url=url_utils.create_filepath_url(
- os.path.join(self._input_dir, 'gm-actuals')),
+ os.path.join(self.input_dir, 'gm-actuals')),
gm_actuals_root_url=url_utils.create_filepath_url(
- os.path.join(self._input_dir, 'fake-gm-imagefiles')))
+ os.path.join(self.input_dir, 'fake-gm-imagefiles')))
downloader.fetch(
builder_name='Test-Android-GalaxyNexus-SGX540-Arm7-Release',
- dest_dir=self._output_dir_actual)
+ dest_dir=self.output_dir_actual)
def main():
diff --git a/gm/rebaseline_server/imagediffdb.py b/gm/rebaseline_server/imagediffdb.py
index f6071f9700..89f9fef319 100644
--- a/gm/rebaseline_server/imagediffdb.py
+++ b/gm/rebaseline_server/imagediffdb.py
@@ -9,21 +9,20 @@ found in the LICENSE file.
Calulate differences between image pairs, and store them in a database.
"""
+# System-level imports
import contextlib
import json
import logging
import os
import re
import shutil
-import sys
import tempfile
import urllib
-# Set the PYTHONPATH to include the tools directory.
-sys.path.append(
- os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir,
- 'tools'))
+# Must fix up PYTHONPATH before importing from within Skia
+import fix_pythonpath # pylint: disable=W0611
+
+# Imports from within Skia
import find_run_binary
SKPDIFF_BINARY = find_run_binary.find_path_to_program('skpdiff')
diff --git a/gm/rebaseline_server/imagediffdb_test.py b/gm/rebaseline_server/imagediffdb_test.py
index 4fc8c66f2b..186b2f1324 100755
--- a/gm/rebaseline_server/imagediffdb_test.py
+++ b/gm/rebaseline_server/imagediffdb_test.py
@@ -10,7 +10,6 @@ Test imagediffdb.py
"""
# System-level imports
-import logging
import shutil
import tempfile
import unittest
@@ -26,11 +25,11 @@ IMG_URL_BASE = ('http://chromium-skia-gm.commondatastorage.googleapis.com/gm/'
class ImageDiffDbTest(unittest.TestCase):
def setUp(self):
- self._temp_dir = tempfile.mkdtemp()
+ self.temp_dir = tempfile.mkdtemp()
self.maxDiff = None
def tearDown(self):
- shutil.rmtree(self._temp_dir)
+ shutil.rmtree(self.temp_dir)
def shortDescription(self):
"""Tell unittest framework to not print docstrings for test cases."""
@@ -38,6 +37,7 @@ class ImageDiffDbTest(unittest.TestCase):
def test_sanitize_locator(self):
"""Test _sanitize_locator()."""
+ # pylint: disable=W0212
self.assertEqual(imagediffdb._sanitize_locator('simple'), 'simple')
self.assertEqual(imagediffdb._sanitize_locator(1234), '1234')
self.assertEqual(imagediffdb._sanitize_locator('one/two'), 'one_two')
@@ -76,9 +76,9 @@ class ImageDiffDbTest(unittest.TestCase):
]
# Add all image pairs to the database
- db = imagediffdb.ImageDiffDB(self._temp_dir)
+ db = imagediffdb.ImageDiffDB(self.temp_dir)
for selftest in selftests:
- retval = db.add_image_pair(
+ db.add_image_pair(
expected_image_locator=selftest[0], expected_image_url=selftest[1],
actual_image_locator=selftest[2], actual_image_url=selftest[3])
diff --git a/gm/rebaseline_server/imagepair_test.py b/gm/rebaseline_server/imagepair_test.py
index 14fe4c1143..ef7695acac 100755
--- a/gm/rebaseline_server/imagepair_test.py
+++ b/gm/rebaseline_server/imagepair_test.py
@@ -26,11 +26,11 @@ IMG_URL_BASE = ('http://chromium-skia-gm.commondatastorage.googleapis.com/'
class ImagePairTest(unittest.TestCase):
def setUp(self):
- self._temp_dir = tempfile.mkdtemp()
+ self.temp_dir = tempfile.mkdtemp()
self.maxDiff = None
def tearDown(self):
- shutil.rmtree(self._temp_dir)
+ shutil.rmtree(self.temp_dir)
def shortDescription(self):
"""Tells unittest framework to not print docstrings for test cases."""
@@ -163,7 +163,7 @@ class ImagePairTest(unittest.TestCase):
],
]
- db = imagediffdb.ImageDiffDB(self._temp_dir)
+ db = imagediffdb.ImageDiffDB(self.temp_dir)
for selftest in selftests:
image_pair = imagepair.ImagePair(
image_diff_db=db,
diff --git a/tools/tests/base_unittest.py b/tools/tests/base_unittest.py
index f7ee570a24..9ff710f4a1 100755
--- a/tools/tests/base_unittest.py
+++ b/tools/tests/base_unittest.py
@@ -8,22 +8,77 @@ found in the LICENSE file.
A wrapper around the standard Python unittest library, adding features we need
for various unittests within this directory.
+
+TODO(epoger): Move this into the common repo for broader use? Or at least in
+a more common place within the Skia repo?
"""
import errno
+import filecmp
import os
import shutil
-import sys
+import tempfile
import unittest
-# Set the PYTHONPATH to include the tools directory.
-sys.path.append(
- os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
-import find_run_binary
+TRUNK_DIR = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), os.pardir, os.pardir))
class TestCase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(TestCase, self).__init__(*args, **kwargs)
+ # Subclasses should override this default value if they want their output
+ # to be automatically compared against expectations (see setUp and tearDown)
+ self._testdata_dir = None
+
+ def setUp(self):
+ """Called before each test."""
+ # Get the name of this test, in such a way that it will be consistent
+ # regardless of the directory it is run from (throw away package names,
+ # if any).
+ self._test_name = '.'.join(self.id().split('.')[-3:])
+
+ self._temp_dir = tempfile.mkdtemp()
+ if self._testdata_dir:
+ self.create_empty_dir(self.output_dir_actual)
+
+ def tearDown(self):
+ """Called after each test."""
+ shutil.rmtree(self._temp_dir)
+ if self._testdata_dir and os.path.exists(self.output_dir_expected):
+ different_files = _find_different_files(self.output_dir_actual,
+ self.output_dir_expected)
+ # Don't add any cleanup code below this assert!
+ # Then if tests fail, the artifacts will not be cleaned up.
+ assert (not different_files), \
+ ('found differing files:\n' +
+ '\n'.join(['tkdiff %s %s &' % (
+ os.path.join(self.output_dir_actual, basename),
+ os.path.join(self.output_dir_expected, basename))
+ for basename in different_files]))
+
+ @property
+ def temp_dir(self):
+ return self._temp_dir
+
+ @property
+ def input_dir(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(self._testdata_dir, 'inputs')
+
+ @property
+ def output_dir_actual(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(
+ self._testdata_dir, 'outputs', 'actual', self._test_name)
+
+ @property
+ def output_dir_expected(self):
+ assert self._testdata_dir, 'self._testdata_dir must be set'
+ return os.path.join(
+ self._testdata_dir, 'outputs', 'expected', self._test_name)
+
def shortDescription(self):
"""Tell unittest framework to not print docstrings for test cases."""
return None
@@ -34,42 +89,48 @@ class TestCase(unittest.TestCase):
Args:
path: path on local disk
"""
- shutil.rmtree(path=path, ignore_errors=True)
+ # Delete the old one, if any.
+ if os.path.isdir(path):
+ shutil.rmtree(path=path, ignore_errors=True)
+ elif os.path.lexists(path):
+ os.remove(path)
+
+ # Create the new one.
try:
os.makedirs(path)
except OSError as exc:
+ # Guard against race condition (somebody else is creating the same dir)
if exc.errno != errno.EEXIST:
raise
return path
- def run_command(self, args):
- """Runs a program from the command line and returns stdout.
- Args:
- args: Command line to run, as a list of string parameters. args[0] is the
- binary to run.
-
- Returns:
- stdout from the program, as a single string.
-
- Raises:
- Exception: the program exited with a nonzero return code.
- """
- return find_run_binary.run_command(args)
+def _find_different_files(dir1, dir2, ignore_subtree_names=None):
+ """Returns a list of any files that differ between the directory trees rooted
+ at dir1 and dir2.
- def find_path_to_program(self, program):
- """Returns path to an existing program binary.
+ Args:
+ dir1: root of a directory tree; if nonexistent, will raise OSError
+ dir2: root of another directory tree; if nonexistent, will raise OSError
+ ignore_subtree_names: list of subtree directory names to ignore;
+ defaults to ['.svn'], so all SVN files are ignores
- Args:
- program: Basename of the program to find (e.g., 'render_pictures').
-
- Returns:
- Absolute path to the program binary, as a string.
-
- Raises:
- Exception: unable to find the program binary.
- """
- return find_run_binary.find_path_to_program(program)
+ TODO(epoger): include the dirname within each filename (not just the
+ basename), to make it easier to locate any differences
+ """
+ differing_files = []
+ if ignore_subtree_names is None:
+ ignore_subtree_names = ['.svn']
+ dircmp = filecmp.dircmp(dir1, dir2, ignore=ignore_subtree_names)
+ differing_files.extend(dircmp.left_only)
+ differing_files.extend(dircmp.right_only)
+ differing_files.extend(dircmp.common_funny)
+ differing_files.extend(dircmp.diff_files)
+ differing_files.extend(dircmp.funny_files)
+ for common_dir in dircmp.common_dirs:
+ differing_files.extend(_find_different_files(
+ os.path.join(dir1, common_dir), os.path.join(dir2, common_dir)))
+ return differing_files
def main(test_case_class):
@@ -77,10 +138,6 @@ def main(test_case_class):
Raises an Exception if any of those tests fail (in case we are running in the
context of run_all.py, which depends on that Exception to signal failures).
-
- TODO(epoger): Make all of our unit tests use the Python unittest framework,
- so we can leverage its ability to run *all* the tests and report failures at
- the end.
"""
suite = unittest.TestLoader().loadTestsFromTestCase(test_case_class)
results = unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/tools/tests/fix_pythonpath.py b/tools/tests/fix_pythonpath.py
new file mode 100755
index 0000000000..a746a18937
--- /dev/null
+++ b/tools/tests/fix_pythonpath.py
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+
+"""
+Copyright 2014 Google Inc.
+
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+
+Adds possibly-needed directories to PYTHONPATH, if they aren't already there.
+"""
+
+import os
+import sys
+
+TRUNK_DIRECTORY = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), os.pardir, os.pardir))
+for subdir in ['tools']:
+ fullpath = os.path.join(TRUNK_DIRECTORY, subdir)
+ if fullpath not in sys.path:
+ sys.path.append(fullpath)
diff --git a/tools/tests/render_pictures_test.py b/tools/tests/render_pictures_test.py
index 8feef67318..a7636dc943 100755
--- a/tools/tests/render_pictures_test.py
+++ b/tools/tests/render_pictures_test.py
@@ -16,8 +16,12 @@ import os
import shutil
import tempfile
+# Must fix up PYTHONPATH before importing from within Skia
+import fix_pythonpath # pylint: disable=W0611
+
# Imports from within Skia
import base_unittest
+import find_run_binary
# Maximum length of text diffs to show when tests fail
MAX_DIFF_LENGTH = 30000
@@ -292,7 +296,7 @@ class RenderPicturesTest(base_unittest.TestCase):
def test_untiled_empty_expectations_file(self):
"""Same as test_untiled, but with an empty expectations file."""
expectations_path = os.path.join(self._expectations_dir, 'empty')
- with open(expectations_path, 'w') as fh:
+ with open(expectations_path, 'w'):
pass
expected_summary_dict = {
"header" : EXPECTED_HEADER_CONTENTS,
@@ -329,7 +333,8 @@ class RenderPicturesTest(base_unittest.TestCase):
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 11092453015575919668,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_11092453015575919668.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_11092453015575919668.png",
},
},
"green.skp": {
@@ -338,7 +343,8 @@ class RenderPicturesTest(base_unittest.TestCase):
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 8891695120562235492,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_8891695120562235492.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_8891695120562235492.png",
},
}
}
@@ -467,32 +473,38 @@ class RenderPicturesTest(base_unittest.TestCase):
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 5815827069051002745,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_5815827069051002745.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_5815827069051002745.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 9323613075234140270,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_9323613075234140270.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_9323613075234140270.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 16670399404877552232,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_16670399404877552232.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_16670399404877552232.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 2507897274083364964,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_2507897274083364964.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_2507897274083364964.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 7325267995523877959,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_7325267995523877959.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_7325267995523877959.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 2181381724594493116,
"comparisonResult" : "no-comparison",
- "filepath" : "red_skp/bitmap-64bitMD5_2181381724594493116.png",
+ "filepath" :
+ "red_skp/bitmap-64bitMD5_2181381724594493116.png",
}],
},
"green.skp": {
@@ -503,32 +515,38 @@ class RenderPicturesTest(base_unittest.TestCase):
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 12587324416545178013,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_12587324416545178013.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_12587324416545178013.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 7624374914829746293,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_7624374914829746293.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_7624374914829746293.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 5686489729535631913,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_5686489729535631913.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_5686489729535631913.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 7980646035555096146,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_7980646035555096146.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_7980646035555096146.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 17817086664365875131,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_17817086664365875131.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_17817086664365875131.png",
}, {
"checksumAlgorithm" : "bitmap-64bitMD5",
"checksumValue" : 10673669813016809363,
"comparisonResult" : "no-comparison",
- "filepath" : "green_skp/bitmap-64bitMD5_10673669813016809363.png",
+ "filepath" :
+ "green_skp/bitmap-64bitMD5_10673669813016809363.png",
}],
}
}
@@ -553,10 +571,9 @@ class RenderPicturesTest(base_unittest.TestCase):
'bitmap-64bitMD5_10673669813016809363.png'])
def _run_render_pictures(self, args):
- binary = self.find_path_to_program('render_pictures')
- return self.run_command([binary,
- '--config', '8888',
- ] + args)
+ binary = find_run_binary.find_path_to_program('render_pictures')
+ return find_run_binary.run_command(
+ [binary, '--config', '8888'] + args)
def _create_expectations(self, missing_some_images=False,
rel_path='expectations.json'):
@@ -614,15 +631,16 @@ class RenderPicturesTest(base_unittest.TestCase):
width: Width of canvas to create.
height: Height of canvas to create.
"""
- binary = self.find_path_to_program('skpmaker')
- return self.run_command([binary,
- '--red', str(red),
- '--green', str(green),
- '--blue', str(blue),
- '--width', str(width),
- '--height', str(height),
- '--writePath', str(output_path),
- ])
+ binary = find_run_binary.find_path_to_program('skpmaker')
+ return find_run_binary.run_command([
+ binary,
+ '--red', str(red),
+ '--green', str(green),
+ '--blue', str(blue),
+ '--width', str(width),
+ '--height', str(height),
+ '--writePath', str(output_path),
+ ])
def _assert_directory_contents(self, dir_path, expected_filenames):
"""Asserts that files found in a dir are identical to expected_filenames.