aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2015-01-30 14:08:39 -0800
committerGravatar Craig Tiller <ctiller@google.com>2015-01-30 14:08:39 -0800
commit547db2b3eff96b9382cc96d5ea890d595575934b (patch)
treeb563ffc18dcf140d62b8736af627124574788e1b /tools/run_tests
parenteb272bc6ca8dea18fc4f20c9a12bfd5507293ae8 (diff)
Allow specifying environment variables.
Refactor the code a little to make this easier to munge around.
Diffstat (limited to 'tools/run_tests')
-rwxr-xr-xtools/run_tests/jobset.py67
-rwxr-xr-xtools/run_tests/run_tests.py91
2 files changed, 97 insertions, 61 deletions
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index 8f16a4ff2c..19ae52ef3b 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -86,19 +86,49 @@ def which(filename):
raise Exception('%s not found' % filename)
+class JobSpec(object):
+ """Specifies what to run for a job."""
+
+ def __init__(self, cmdline, shortname=None, environ={}, hash_targets=[]):
+ """
+ Arguments:
+ cmdline: a list of arguments to pass as the command line
+ environ: a dictionary of environment variables to set in the child process
+ hash_targets: which files to include in the hash representing the jobs version
+ (or empty, indicating the job should not be hashed)
+ """
+ self.cmdline = cmdline
+ self.environ = environ
+ self.shortname = cmdline[0] if shortname is None else shortname
+ self.hash_targets = hash_targets or []
+
+ def identity(self):
+ return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
+
+ def __hash__(self):
+ return hash(self.identity())
+
+ def __cmp__(self, other):
+ return self.identity() == other.identity()
+
+
class Job(object):
"""Manages one job."""
- def __init__(self, cmdline, bin_hash, newline_on_success):
- self._cmdline = cmdline
+ def __init__(self, spec, bin_hash, newline_on_success):
+ self._spec = spec
self._bin_hash = bin_hash
self._tempfile = tempfile.TemporaryFile()
- self._process = subprocess.Popen(args=cmdline,
+ env = os.environ.copy()
+ for k, v in spec.environ.iteritems():
+ env[k] = v
+ self._process = subprocess.Popen(args=spec.cmdline,
stderr=subprocess.STDOUT,
- stdout=self._tempfile)
+ stdout=self._tempfile,
+ env=env)
self._state = _RUNNING
self._newline_on_success = newline_on_success
- message('START', ' '.join(self._cmdline))
+ message('START', spec.shortname)
def state(self, update_cache):
"""Poll current state of the job. Prints messages at completion."""
@@ -108,12 +138,13 @@ class Job(object):
self._tempfile.seek(0)
stdout = self._tempfile.read()
message('FAILED', '%s [ret=%d]' % (
- ' '.join(self._cmdline), self._process.returncode), stdout)
+ self._spec.shortname, self._process.returncode), stdout)
else:
self._state = _SUCCESS
- message('PASSED', '%s' % ' '.join(self._cmdline),
+ message('PASSED', self._spec.shortname,
do_newline=self._newline_on_success)
- update_cache.finished(self._cmdline, self._bin_hash)
+ if self._bin_hash:
+ update_cache.finished(self._spec.identity(), self._bin_hash)
return self._state
def kill(self):
@@ -135,16 +166,26 @@ class Jobset(object):
self._newline_on_success = newline_on_success
self._cache = cache
- def start(self, cmdline):
+ def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while len(self._running) >= self._maxjobs:
if self.cancelled(): return False
self.reap()
if self.cancelled(): return False
- with open(which(cmdline[0])) as f:
- bin_hash = hashlib.sha1(f.read()).hexdigest()
- if self._cache.should_run(cmdline, bin_hash):
- self._running.add(Job(cmdline, bin_hash, self._newline_on_success))
+ if spec.hash_targets:
+ bin_hash = hashlib.sha1()
+ for fn in spec.hash_targets:
+ with open(which(fn)) as f:
+ bin_hash.update(f.read())
+ bin_hash = bin_hash.hexdigest()
+ should_run = self._cache.should_run(spec.identity(), bin_hash)
+ else:
+ bin_hash = None
+ should_run = True
+ if should_run:
+ self._running.add(Job(spec,
+ bin_hash,
+ self._newline_on_success))
return True
def reap(self):
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index a4cb08fa87..8cc029e3cc 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -17,13 +17,17 @@ import watch_dirs
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class SimpleConfig(object):
- def __init__(self, config):
+ def __init__(self, config, environ={}):
self.build_config = config
self.maxjobs = 2 * multiprocessing.cpu_count()
self.allow_hashing = (config != 'gcov')
+ self.environ = environ
- def run_command(self, binary):
- return [binary]
+ def job_spec(self, binary, hash_targets):
+ return jobset.JobSpec(cmdline=[binary],
+ environ=self.environ,
+ hash_targets=hash_targets
+ if self.allow_hashing else None)
# ValgrindConfig: compile with some CONFIG=config, but use valgrind to run
@@ -35,14 +39,14 @@ class ValgrindConfig(object):
self.maxjobs = 2 * multiprocessing.cpu_count()
self.allow_hashing = False
- def run_command(self, binary):
- return ['valgrind', '--tool=%s' % self.tool, binary]
+ def job_spec(self, binary, hash_targets):
+ return JobSpec(cmdline=['valgrind', '--tool=%s' % self.tool, binary],
+ hash_targets=None)
class CLanguage(object):
def __init__(self, make_target, test_lang):
- self.allow_hashing = True
self.make_target = make_target
with open('tools/run_tests/tests.json') as f:
js = json.load(f)
@@ -50,8 +54,12 @@ class CLanguage(object):
for tgt in js
if tgt['language'] == test_lang]
- def test_binaries(self, config):
- return ['bins/%s/%s' % (config, binary) for binary in self.binaries]
+ def test_specs(self, config):
+ out = []
+ for name in self.binaries:
+ binary = 'bins/%s/%s' % (config.build_config, name)
+ out.append(config.job_spec(binary, [binary]))
+ return out
def make_targets(self):
return ['buildtests_%s' % self.make_target]
@@ -62,11 +70,8 @@ class CLanguage(object):
class NodeLanguage(object):
- def __init__(self):
- self.allow_hashing = False
-
- def test_binaries(self, config):
- return ['tools/run_tests/run_node.sh']
+ def test_specs(self, config):
+ return [config.job_spec('tools/run_tests/run_node.sh', None)]
def make_targets(self):
return ['static_c']
@@ -77,11 +82,8 @@ class NodeLanguage(object):
class PhpLanguage(object):
- def __init__(self):
- self.allow_hashing = False
-
- def test_binaries(self, config):
- return ['src/php/bin/run_tests.sh']
+ def test_specs(self, config):
+ return [config.job_spec('src/php/bin/run_tests.sh', None)]
def make_targets(self):
return ['static_c']
@@ -92,11 +94,8 @@ class PhpLanguage(object):
class PythonLanguage(object):
- def __init__(self):
- self.allow_hashing = False
-
- def test_binaries(self, config):
- return ['tools/run_tests/run_python.sh']
+ def test_specs(self, config):
+ return [config.job_spec('tools/run_tests/run_python.sh', None)]
def make_targets(self):
return[]
@@ -111,7 +110,8 @@ _CONFIGS = {
'opt': SimpleConfig('opt'),
'tsan': SimpleConfig('tsan'),
'msan': SimpleConfig('msan'),
- 'asan': SimpleConfig('asan'),
+ 'asan': SimpleConfig('asan', environ={
+ 'ASAN_OPTIONS': 'detect_leaks=1:color=always'}),
'gcov': SimpleConfig('gcov'),
'memcheck': ValgrindConfig('valgrind', 'memcheck'),
'helgrind': ValgrindConfig('dbg', 'helgrind')
@@ -157,14 +157,20 @@ build_configs = set(cfg.build_config for cfg in run_configs)
make_targets = []
languages = set(_LANGUAGES[l] for l in args.language)
-build_steps = [['make',
- '-j', '%d' % (multiprocessing.cpu_count() + 1),
- 'CONFIG=%s' % cfg] + list(set(
- itertools.chain.from_iterable(l.make_targets()
- for l in languages)))
- for cfg in build_configs] + list(
- itertools.chain.from_iterable(l.build_steps()
- for l in languages))
+build_steps = [jobset.JobSpec(['make',
+ '-j', '%d' % (multiprocessing.cpu_count() + 1),
+ 'CONFIG=%s' % cfg] + list(set(
+ itertools.chain.from_iterable(
+ l.make_targets() for l in languages))))
+ for cfg in build_configs] + list(set(
+ jobset.JobSpec(cmdline)
+ for l in languages
+ for cmdline in l.build_steps()))
+one_run = set(
+ spec
+ for config in run_configs
+ for language in args.language
+ for spec in _LANGUAGES[language].test_specs(config))
runs_per_test = args.runs_per_test
forever = args.forever
@@ -177,7 +183,6 @@ class TestCache(object):
self._last_successful_run = {}
def should_run(self, cmdline, bin_hash):
- cmdline = ' '.join(cmdline)
if cmdline not in self._last_successful_run:
return True
if self._last_successful_run[cmdline] != bin_hash:
@@ -185,7 +190,7 @@ class TestCache(object):
return False
def finished(self, cmdline, bin_hash):
- self._last_successful_run[' '.join(cmdline)] = bin_hash
+ self._last_successful_run[cmdline] = bin_hash
def dump(self):
return [{'cmdline': k, 'hash': v}
@@ -211,12 +216,6 @@ def _build_and_run(check_cancelled, newline_on_success, cache):
return 1
# run all the tests
- one_run = dict(
- (' '.join(config.run_command(x)), config.run_command(x))
- for config in run_configs
- for language in args.language
- for x in _LANGUAGES[language].test_binaries(config.build_config)
- ).values()
all_runs = itertools.chain.from_iterable(
itertools.repeat(one_run, runs_per_test))
if not jobset.run(all_runs, check_cancelled,
@@ -228,12 +227,8 @@ def _build_and_run(check_cancelled, newline_on_success, cache):
return 0
-test_cache = (None
- if not all(x.allow_hashing
- for x in itertools.chain(languages, run_configs))
- else TestCache())
-if test_cache:
- test_cache.maybe_load()
+test_cache = TestCache()
+test_cache.maybe_load()
if forever:
success = True
@@ -250,7 +245,7 @@ if forever:
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
- if test_cache: test_cache.save()
+ test_cache.save()
while not have_files_changed():
time.sleep(1)
else:
@@ -261,5 +256,5 @@ else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
- if test_cache: test_cache.save()
+ test_cache.save()
sys.exit(result)