aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Googler <noreply@google.com>2015-08-25 17:57:13 +0000
committerGravatar Lukacs Berki <lberki@google.com>2015-08-26 07:40:24 +0000
commit9c2989d009294f8ffe9de15b09ceab15012552d6 (patch)
tree861534234cd080be993afbd5f22d26731ce69aa6 /tools
parent000494327d4efab63362f3a940db74ffa8b9d712 (diff)
Use sorted lists instead of sets for generated file list comparison
-- MOS_MIGRATED_REVID=101482632
Diffstat (limited to 'tools')
-rw-r--r--tools/build_rules/test_rules.bzl72
1 files changed, 37 insertions, 35 deletions
diff --git a/tools/build_rules/test_rules.bzl b/tools/build_rules/test_rules.bzl
index acaf13cef8..a594ed328e 100644
--- a/tools/build_rules/test_rules.bzl
+++ b/tools/build_rules/test_rules.bzl
@@ -17,7 +17,6 @@
### First, trivial tests that either always pass, always fail,
### or sometimes pass depending on a trivial computation.
-
def success_target(ctx, msg):
"""Return a success for an analysis test.
@@ -42,15 +41,15 @@ def success_target(ctx, msg):
executable=True)
return struct(runfiles=ctx.runfiles([exe, dat]))
-
def _successful_test_impl(ctx):
return success_target(ctx, ctx.attr.msg)
successful_test = rule(
- implementation=_successful_test_impl,
- attrs={"msg": attr.string(mandatory=True)},
- test=True, executable=True)
-
+ attrs = {"msg": attr.string(mandatory = True)},
+ executable = True,
+ test = True,
+ implementation = _successful_test_impl,
+)
def failure_target(ctx, msg):
"""Return a failure for an analysis test.
@@ -77,19 +76,18 @@ def failure_target(ctx, msg):
executable=True)
return struct(runfiles=ctx.runfiles([exe, dat]))
-
def _failed_test_impl(ctx):
return failure_target(ctx, ctx.attr.msg)
failed_test = rule(
- implementation=_failed_test_impl,
- attrs={"msg": attr.string(mandatory=True)},
- test=True, executable=True)
-
+ attrs = {"msg": attr.string(mandatory = True)},
+ executable = True,
+ test = True,
+ implementation = _failed_test_impl,
+)
### Second, general purpose utilities
-
def assert_(condition, string="assertion failed", *args):
"""Trivial assertion mechanism.
@@ -108,13 +106,11 @@ def assert_(condition, string="assertion failed", *args):
if not condition:
fail(string % args)
-
def strip_prefix(prefix, string):
assert_(string.startswith(prefix),
"%s does not start with %s", string, prefix)
return string[len(prefix):len(string)]
-
def expectation_description(expect=None, expect_failure=None):
"""Turn expectation of result or error into a string."""
if expect_failure:
@@ -122,7 +118,6 @@ def expectation_description(expect=None, expect_failure=None):
else:
return "result " + repr(expect)
-
def check_results(result, failure, expect, expect_failure):
"""See if actual computation results match expectations.
@@ -142,7 +137,6 @@ def check_results(result, failure, expect, expect_failure):
else:
return (False, "expect " + wanted + " but found " + found)
-
def load_results(name, result=None, failure=None,
expect=None, expect_failure=None):
"""issue load-time results of a test.
@@ -162,7 +156,6 @@ def load_results(name, result=None, failure=None,
this_test = successful_test if is_success else failed_test
return this_test(name=name, msg=msg)
-
def analysis_results(ctx, result=None, failure=None,
expect=None, expect_failure=None):
"""issue analysis-time results of a test.
@@ -183,10 +176,8 @@ def analysis_results(ctx, result=None, failure=None,
this_test = success_target if is_success else failure_target
return this_test(ctx, msg)
-
### Simple tests
-
def _rule_test_impl(ctx):
"""check that a rule generates the desired outputs and providers."""
rule_ = ctx.attr.rule
@@ -194,8 +185,11 @@ def _rule_test_impl(ctx):
exe = ctx.outputs.executable
if ctx.attr.generates:
prefix = rule_.label.package + "/"
- generates = set(ctx.attr.generates)
- generated = set([strip_prefix(prefix, f.short_path) for f in rule_.files])
+ # TODO(bazel-team): Use set() instead of sorted() once
+ # set comparison is implemented.
+ generates = sorted(ctx.attr.generates)
+ generated = sorted([strip_prefix(prefix, f.short_path)
+ for f in rule_.files])
if generates != generated:
fail("rule %s generates %s not %s"
% (rule_name, repr(generated), repr(generates)))
@@ -223,15 +217,16 @@ def _rule_test_impl(ctx):
else:
return success_target(ctx, "success")
-
rule_test = rule(
- implementation=_rule_test_impl,
- attrs={
- "rule": attr.label(mandatory=True),
+ attrs = {
+ "rule": attr.label(mandatory = True),
"generates": attr.string_list(),
- "provides": attr.string_dict()},
- test=True, executable=True)
-
+ "provides": attr.string_dict(),
+ },
+ executable = True,
+ test = True,
+ implementation = _rule_test_impl,
+)
def _file_test_impl(ctx):
"""check that a file has a given content."""
@@ -265,10 +260,17 @@ def _file_test_impl(ctx):
return struct(runfiles=ctx.runfiles([exe, file_]))
file_test = rule(
- implementation=_file_test_impl,
- attrs={
- "file": attr.label(mandatory=True, allow_files=True, single_file=True),
- "content": attr.string(default=""),
- "regexp": attr.string(default=""),
- "matches": attr.int(default=-1)},
- test=True, executable=True)
+ attrs = {
+ "file": attr.label(
+ mandatory = True,
+ allow_files = True,
+ single_file = True,
+ ),
+ "content": attr.string(default = ""),
+ "regexp": attr.string(default = ""),
+ "matches": attr.int(default = -1),
+ },
+ executable = True,
+ test = True,
+ implementation = _file_test_impl,
+)