aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--distdir.bzl23
-rw-r--r--scripts/docs/jekyll.bzl121
-rw-r--r--scripts/packages/self_extract_binary.bzl81
-rw-r--r--src/embedded_tools.bzl57
-rw-r--r--src/java_tools/buildjar/java/com/google/devtools/build/java/bazel/build_defs.bzl32
-rw-r--r--src/java_tools/import_deps_checker/javatests/com/google/devtools/build/importdeps/tests.bzl120
-rw-r--r--src/test/shell/bazel/list_source_repository.bzl30
-rw-r--r--src/tools/launcher/win_rules.bzl43
-rw-r--r--tools/android/android_sdk_repository_template.bzl587
-rw-r--r--tools/build_defs/apple/shared.bzl73
-rw-r--r--tools/build_defs/docker/build.bzl727
-rw-r--r--tools/build_defs/docker/bundle.bzl131
-rw-r--r--tools/build_defs/docker/label.bzl14
-rw-r--r--tools/build_defs/docker/layers.bzl112
-rw-r--r--tools/build_defs/docker/list.bzl9
-rw-r--r--tools/build_defs/docker/path.bzl84
-rw-r--r--tools/build_defs/docker/serialize.bzl5
-rw-r--r--tools/build_defs/hash/hash.bzl29
-rw-r--r--tools/build_defs/pkg/path.bzl70
-rw-r--r--tools/build_defs/pkg/pkg.bzl390
-rw-r--r--tools/build_defs/pkg/rpm.bzl197
-rw-r--r--tools/build_defs/repo/git.bzl153
-rw-r--r--tools/build_defs/repo/git_repositories.bzl16
-rw-r--r--tools/build_defs/repo/http.bzl94
-rw-r--r--tools/build_defs/repo/java.bzl10
-rw-r--r--tools/build_defs/repo/jvm.bzl207
-rw-r--r--tools/build_defs/repo/maven_rules.bzl334
-rw-r--r--tools/build_defs/repo/utils.bzl89
-rw-r--r--tools/build_rules/genproto.bzl44
-rw-r--r--tools/build_rules/java_rules_skylark.bzl392
-rw-r--r--tools/build_rules/test_rules.bzl444
-rw-r--r--tools/build_rules/utilities.bzl33
-rw-r--r--tools/cpp/alias_rules.bzl9
-rw-r--r--tools/cpp/cc_configure.bzl71
-rw-r--r--tools/cpp/compiler_flag.bzl8
-rw-r--r--tools/cpp/crosstool_lib.bzl782
-rw-r--r--tools/cpp/crosstool_utils.bzl389
-rw-r--r--tools/cpp/dummy_toolchain.bzl8
-rw-r--r--tools/cpp/lib_cc_configure.bzl310
-rw-r--r--tools/cpp/osx_cc_configure.bzl229
-rw-r--r--tools/cpp/toolchain_utils.bzl26
-rw-r--r--tools/cpp/unix_cc_configure.bzl823
-rw-r--r--tools/cpp/windows_cc_configure.bzl763
-rw-r--r--tools/jdk/alias_rules.bzl8
-rw-r--r--tools/jdk/default_java_toolchain.bzl14
-rw-r--r--tools/osx/alias_rules.bzl9
-rw-r--r--tools/osx/xcode_configure.bzl410
-rw-r--r--tools/osx/xcode_version_flag.bzl151
-rw-r--r--tools/sh/sh_configure.bzl64
-rw-r--r--tools/sh/sh_toolchain.bzl4
50 files changed, 4582 insertions, 4247 deletions
diff --git a/distdir.bzl b/distdir.bzl
index f5a130de27..fb237d7e85 100644
--- a/distdir.bzl
+++ b/distdir.bzl
@@ -13,7 +13,7 @@
# limitations under the License.
"""Defines a repository rule that generates an archive consisting of the specified files to fetch"""
-_BUILD="""
+_BUILD = """
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
pkg_tar(
@@ -26,20 +26,21 @@ pkg_tar(
"""
def _distdir_tar_impl(ctx):
- for name in ctx.attr.archives:
- ctx.download(ctx.attr.urls[name], name, ctx.attr.sha256[name], False)
- ctx.file("WORKSPACE", "")
- ctx.file("BUILD",
- _BUILD.format(srcs=ctx.attr.archives, dirname=ctx.attr.dirname))
+ for name in ctx.attr.archives:
+ ctx.download(ctx.attr.urls[name], name, ctx.attr.sha256[name], False)
+ ctx.file("WORKSPACE", "")
+ ctx.file(
+ "BUILD",
+ _BUILD.format(srcs = ctx.attr.archives, dirname = ctx.attr.dirname),
+ )
_distdir_tar_attrs = {
- "archives" : attr.string_list(),
- "sha256" : attr.string_dict(),
- "urls" : attr.string_list_dict(),
- "dirname" : attr.string(default="distdir"),
+ "archives": attr.string_list(),
+ "sha256": attr.string_dict(),
+ "urls": attr.string_list_dict(),
+ "dirname": attr.string(default = "distdir"),
}
-
distdir_tar = repository_rule(
implementation = _distdir_tar_impl,
attrs = _distdir_tar_attrs,
diff --git a/scripts/docs/jekyll.bzl b/scripts/docs/jekyll.bzl
index 3552f4a23b..0ca0c3eee9 100644
--- a/scripts/docs/jekyll.bzl
+++ b/scripts/docs/jekyll.bzl
@@ -14,70 +14,77 @@
"""Quick rule to build a Jekyll site."""
def _bucket_from_workspace_name(wname):
- """Try to assert the bucket name from the workspace name.
+ """Try to assert the bucket name from the workspace name.
- E.g. it will answer www.bazel.build if the workspace name is build_bazel_www.
+ E.g. it will answer www.bazel.build if the workspace name is build_bazel_www.
- Args:
- wname: workspace name
+ Args:
+ wname: workspace name
- Returns:
- the guessed name of the bucket for this workspace.
- """
- revlist = []
- for part in wname.split("_"):
- revlist.insert(0, part)
- return ".".join(revlist)
+ Returns:
+ the guessed name of the bucket for this workspace.
+ """
+ revlist = []
+ for part in wname.split("_"):
+ revlist.insert(0, part)
+ return ".".join(revlist)
def _impl(ctx):
- """Quick and non-hermetic rule to build a Jekyll site."""
- source = ctx.actions.declare_directory(ctx.attr.name + "-srcs")
- output = ctx.actions.declare_directory(ctx.attr.name + "-build")
+ """Quick and non-hermetic rule to build a Jekyll site."""
+ source = ctx.actions.declare_directory(ctx.attr.name + "-srcs")
+ output = ctx.actions.declare_directory(ctx.attr.name + "-build")
- ctx.actions.run_shell(inputs = ctx.files.srcs,
- outputs = [source],
- command = ("mkdir -p %s\n" % (source.path)) +
- "\n".join([
- "tar xf %s -C %s" % (src.path, source.path) for src in ctx.files.srcs])
- )
- ctx.actions.run(
- inputs = [source],
- outputs = [output],
- executable = "jekyll",
- use_default_shell_env = True,
- arguments = ["build", "-q", "-s", source.path, "-d", output.path]
- )
- ctx.actions.run(
- inputs = [output],
- outputs = [ctx.outputs.out],
- executable = "tar",
- arguments = ["cf", ctx.outputs.out.path, "-C", output.path, "."]
- )
+ ctx.actions.run_shell(
+ inputs = ctx.files.srcs,
+ outputs = [source],
+ command = ("mkdir -p %s\n" % (source.path)) +
+ "\n".join([
+ "tar xf %s -C %s" % (src.path, source.path)
+ for src in ctx.files.srcs
+ ]),
+ )
+ ctx.actions.run(
+ inputs = [source],
+ outputs = [output],
+ executable = "jekyll",
+ use_default_shell_env = True,
+ arguments = ["build", "-q", "-s", source.path, "-d", output.path],
+ )
+ ctx.actions.run(
+ inputs = [output],
+ outputs = [ctx.outputs.out],
+ executable = "tar",
+ arguments = ["cf", ctx.outputs.out.path, "-C", output.path, "."],
+ )
- # Create a shell script to serve the site locally or push with the --push
- # flag.
- bucket = ctx.attr.bucket if ctx.attr.bucket else _bucket_from_workspace_name(ctx.workspace_name)
+ # Create a shell script to serve the site locally or push with the --push
+ # flag.
+ bucket = ctx.attr.bucket if ctx.attr.bucket else _bucket_from_workspace_name(ctx.workspace_name)
- ctx.actions.expand_template(
- template=ctx.file._jekyll_build_tpl,
- output=ctx.outputs.executable,
- substitutions={
- "%{workspace_name}": ctx.workspace_name,
- "%{source_dir}": source.short_path,
- "%{prod_dir}": output.short_path,
- "%{bucket}": bucket,
- },
- is_executable=True)
- return [DefaultInfo(runfiles=ctx.runfiles(files=[source, output]))]
+ ctx.actions.expand_template(
+ template = ctx.file._jekyll_build_tpl,
+ output = ctx.outputs.executable,
+ substitutions = {
+ "%{workspace_name}": ctx.workspace_name,
+ "%{source_dir}": source.short_path,
+ "%{prod_dir}": output.short_path,
+ "%{bucket}": bucket,
+ },
+ is_executable = True,
+ )
+ return [DefaultInfo(runfiles = ctx.runfiles(files = [source, output]))]
jekyll_build = rule(
- implementation = _impl,
- executable = True,
- attrs = {
- "srcs": attr.label_list(allow_empty=False),
- "bucket": attr.string(),
- "_jekyll_build_tpl": attr.label(
- default=":jekyll_build.sh.tpl",
- allow_files=True,
- single_file=True)},
- outputs = {"out": "%{name}.tar"})
+ implementation = _impl,
+ executable = True,
+ attrs = {
+ "srcs": attr.label_list(allow_empty = False),
+ "bucket": attr.string(),
+ "_jekyll_build_tpl": attr.label(
+ default = ":jekyll_build.sh.tpl",
+ allow_files = True,
+ single_file = True,
+ ),
+ },
+ outputs = {"out": "%{name}.tar"},
+)
diff --git a/scripts/packages/self_extract_binary.bzl b/scripts/packages/self_extract_binary.bzl
index 75f19279c3..24e3ddf2b5 100644
--- a/scripts/packages/self_extract_binary.bzl
+++ b/scripts/packages/self_extract_binary.bzl
@@ -33,45 +33,48 @@ and a ZIP footer with the following entries:
"""
def _self_extract_binary(ctx):
- """Implementation for the self_extract_binary rule."""
- # This is a bit complex for stripping out timestamps
- zip_artifact = ctx.new_file(ctx.label.name + ".zip")
- touch_empty_files = [
- "mkdir -p $(dirname ${tmpdir}/%s); touch ${tmpdir}/%s" % (f, f)
- for f in ctx.attr.empty_files
- ]
- cp_resources = [
- ("mkdir -p $(dirname ${tmpdir}/%s)\n" % r.short_path +
- "cp %s ${tmpdir}/%s" % (r.path, r.short_path))
- for r in ctx.files.resources
- ]
- cp_flatten_resources = [
- "cp %s ${tmpdir}/%s" % (r.path, r.basename)
- for r in ctx.files.flatten_resources
- ]
- ctx.action(
- inputs = ctx.files.resources + ctx.files.flatten_resources,
- outputs = [zip_artifact],
- command = "\n".join([
- "tmpdir=$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)",
- "trap \"rm -fr ${tmpdir}\" EXIT"
- ] + touch_empty_files + cp_resources + cp_flatten_resources + [
- "find ${tmpdir} -exec touch -t 198001010000.00 '{}' ';'",
- "(d=${PWD}; cd ${tmpdir}; zip -rq ${d}/%s *)" % zip_artifact.path,
- ]),
- mnemonic = "ZipBin",
- )
- ctx.action(
- inputs = [ctx.file.launcher, zip_artifact],
- outputs = [ctx.outputs.executable],
- command = "\n".join([
- "cat %s %s > %s" % (ctx.file.launcher.path,
- zip_artifact.path,
- ctx.outputs.executable.path),
- "zip -qA %s" % ctx.outputs.executable.path
- ]),
- mnemonic = "BuildSelfExtractable",
- )
+ """Implementation for the self_extract_binary rule."""
+
+ # This is a bit complex for stripping out timestamps
+ zip_artifact = ctx.new_file(ctx.label.name + ".zip")
+ touch_empty_files = [
+ "mkdir -p $(dirname ${tmpdir}/%s); touch ${tmpdir}/%s" % (f, f)
+ for f in ctx.attr.empty_files
+ ]
+ cp_resources = [
+ ("mkdir -p $(dirname ${tmpdir}/%s)\n" % r.short_path +
+ "cp %s ${tmpdir}/%s" % (r.path, r.short_path))
+ for r in ctx.files.resources
+ ]
+ cp_flatten_resources = [
+ "cp %s ${tmpdir}/%s" % (r.path, r.basename)
+ for r in ctx.files.flatten_resources
+ ]
+ ctx.action(
+ inputs = ctx.files.resources + ctx.files.flatten_resources,
+ outputs = [zip_artifact],
+ command = "\n".join([
+ "tmpdir=$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)",
+ "trap \"rm -fr ${tmpdir}\" EXIT",
+ ] + touch_empty_files + cp_resources + cp_flatten_resources + [
+ "find ${tmpdir} -exec touch -t 198001010000.00 '{}' ';'",
+ "(d=${PWD}; cd ${tmpdir}; zip -rq ${d}/%s *)" % zip_artifact.path,
+ ]),
+ mnemonic = "ZipBin",
+ )
+ ctx.action(
+ inputs = [ctx.file.launcher, zip_artifact],
+ outputs = [ctx.outputs.executable],
+ command = "\n".join([
+ "cat %s %s > %s" % (
+ ctx.file.launcher.path,
+ zip_artifact.path,
+ ctx.outputs.executable.path,
+ ),
+ "zip -qA %s" % ctx.outputs.executable.path,
+ ]),
+ mnemonic = "BuildSelfExtractable",
+ )
self_extract_binary = rule(
_self_extract_binary,
diff --git a/src/embedded_tools.bzl b/src/embedded_tools.bzl
index b36cc142cf..097fb426e5 100644
--- a/src/embedded_tools.bzl
+++ b/src/embedded_tools.bzl
@@ -15,36 +15,43 @@
"""Contains Skylark rules used to build the embedded_tools.zip."""
def _embedded_tools(ctx):
- # The list of arguments we pass to the script.
- args_file = ctx.new_file(ctx.label.name + ".params")
- ctx.file_action(output=args_file, content="\n".join([f.path for f in ctx.files.srcs]))
- # Action to call the script.
- ctx.action(
- inputs=ctx.files.srcs,
- outputs=[ctx.outputs.out],
- arguments=[ctx.outputs.out.path, args_file.path],
- progress_message="Creating embedded tools: %s" % ctx.outputs.out.short_path,
- executable=ctx.executable.tool)
+ # The list of arguments we pass to the script.
+ args_file = ctx.new_file(ctx.label.name + ".params")
+ ctx.file_action(output = args_file, content = "\n".join([f.path for f in ctx.files.srcs]))
+
+ # Action to call the script.
+ ctx.action(
+ inputs = ctx.files.srcs,
+ outputs = [ctx.outputs.out],
+ arguments = [ctx.outputs.out.path, args_file.path],
+ progress_message = "Creating embedded tools: %s" % ctx.outputs.out.short_path,
+ executable = ctx.executable.tool,
+ )
embedded_tools = rule(
- implementation=_embedded_tools,
- attrs={
- "srcs": attr.label_list(allow_files=True),
- "out": attr.output(mandatory=True),
- "tool": attr.label(executable=True, cfg="host", allow_files=True,
- default=Label("//src:create_embedded_tools_sh"))
- }
+ implementation = _embedded_tools,
+ attrs = {
+ "srcs": attr.label_list(allow_files = True),
+ "out": attr.output(mandatory = True),
+ "tool": attr.label(
+ executable = True,
+ cfg = "host",
+ allow_files = True,
+ default = Label("//src:create_embedded_tools_sh"),
+ ),
+ },
)
def _srcsfile(ctx):
- ctx.file_action(
- output=ctx.outputs.out,
- content="\n".join([f.path for f in ctx.files.srcs]))
+ ctx.file_action(
+ output = ctx.outputs.out,
+ content = "\n".join([f.path for f in ctx.files.srcs]),
+ )
srcsfile = rule(
- implementation=_srcsfile,
- attrs={
- "srcs": attr.label_list(allow_files=True),
- "out": attr.output(mandatory=True),
- }
+ implementation = _srcsfile,
+ attrs = {
+ "srcs": attr.label_list(allow_files = True),
+ "out": attr.output(mandatory = True),
+ },
)
diff --git a/src/java_tools/buildjar/java/com/google/devtools/build/java/bazel/build_defs.bzl b/src/java_tools/buildjar/java/com/google/devtools/build/java/bazel/build_defs.bzl
index 2bf8c78538..6f1016704f 100644
--- a/src/java_tools/buildjar/java/com/google/devtools/build/java/bazel/build_defs.bzl
+++ b/src/java_tools/buildjar/java/com/google/devtools/build/java/bazel/build_defs.bzl
@@ -15,25 +15,27 @@
"""Rules to make the default javacopts available as a Java API."""
def _default_javacopts(ctx):
- javacopts = java_common.default_javac_opts(
- ctx, java_toolchain_attr = "_java_toolchain")
- ctx.template_action(
- template = ctx.file.template,
- output = ctx.outputs.out,
- substitutions = {
- "%javacopts%": '"%s"' % '", "'.join(javacopts),
- }
- )
+ javacopts = java_common.default_javac_opts(
+ ctx,
+ java_toolchain_attr = "_java_toolchain",
+ )
+ ctx.template_action(
+ template = ctx.file.template,
+ output = ctx.outputs.out,
+ substitutions = {
+ "%javacopts%": '"%s"' % '", "'.join(javacopts),
+ },
+ )
default_javacopts = rule(
- implementation=_default_javacopts,
- attrs={
+ implementation = _default_javacopts,
+ attrs = {
"template": attr.label(
- mandatory=True,
- allow_files=True,
- single_file=True,
+ mandatory = True,
+ allow_files = True,
+ single_file = True,
),
- "out": attr.output(mandatory=True),
+ "out": attr.output(mandatory = True),
"_java_toolchain": attr.label(
default = Label("//tools/jdk:current_java_toolchain"),
),
diff --git a/src/java_tools/import_deps_checker/javatests/com/google/devtools/build/importdeps/tests.bzl b/src/java_tools/import_deps_checker/javatests/com/google/devtools/build/importdeps/tests.bzl
index e05e3bafea..452b138796 100644
--- a/src/java_tools/import_deps_checker/javatests/com/google/devtools/build/importdeps/tests.bzl
+++ b/src/java_tools/import_deps_checker/javatests/com/google/devtools/build/importdeps/tests.bzl
@@ -11,66 +11,76 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-'''Helpers to create golden tests, to minimize code duplication.'''
+"""Helpers to create golden tests, to minimize code duplication."""
-def create_golden_test(name, golden_output_file, golden_stderr_file, expect_errors, checking_mode,
- has_bootclasspath, testdata_pkg, import_deps_checker, rt_jar,
- missing_jar = None, replacing_jar = None, direct_jars = []):
- '''Create a golden test for the dependency checker.'''
- all_dep_jars = [
- "testdata_client",
- "testdata_lib_Library",
- "testdata_lib_LibraryAnnotations",
- "testdata_lib_LibraryException",
- "testdata_lib_LibraryInterface",
- ]
- client_jar = testdata_pkg + ":testdata_client"
- data = [
- golden_output_file,
- golden_stderr_file,
- import_deps_checker,
- rt_jar,
- ] + [testdata_pkg + ":" + x for x in all_dep_jars]
- if (replacing_jar):
- data.append(testdata_pkg + ":" + replacing_jar)
+def create_golden_test(
+ name,
+ golden_output_file,
+ golden_stderr_file,
+ expect_errors,
+ checking_mode,
+ has_bootclasspath,
+ testdata_pkg,
+ import_deps_checker,
+ rt_jar,
+ missing_jar = None,
+ replacing_jar = None,
+ direct_jars = []):
+ """Create a golden test for the dependency checker."""
+ all_dep_jars = [
+ "testdata_client",
+ "testdata_lib_Library",
+ "testdata_lib_LibraryAnnotations",
+ "testdata_lib_LibraryException",
+ "testdata_lib_LibraryInterface",
+ ]
+ client_jar = testdata_pkg + ":testdata_client"
+ data = [
+ golden_output_file,
+ golden_stderr_file,
+ import_deps_checker,
+ rt_jar,
+ ] + [testdata_pkg + ":" + x for x in all_dep_jars]
+ if (replacing_jar):
+ data.append(testdata_pkg + ":" + replacing_jar)
- args = [
- "$(location %s)" % golden_output_file,
- "$(location %s)" % golden_stderr_file,
- # The exit code 199 means the checker emits errors on dependency issues.
- "199" if expect_errors else "0",
- "$(location %s)" % import_deps_checker,
- "--checking_mode=%s" % checking_mode,
- ]
- args.append("--bootclasspath_entry")
- if has_bootclasspath:
- args.append("$(location %s)" % rt_jar)
- else:
- args.append("$(location %s)" % client_jar) # Fake bootclasspath.
+ args = [
+ "$(location %s)" % golden_output_file,
+ "$(location %s)" % golden_stderr_file,
+ # The exit code 199 means the checker emits errors on dependency issues.
+ "199" if expect_errors else "0",
+ "$(location %s)" % import_deps_checker,
+ "--checking_mode=%s" % checking_mode,
+ ]
+ args.append("--bootclasspath_entry")
+ if has_bootclasspath:
+ args.append("$(location %s)" % rt_jar)
+ else:
+ args.append("$(location %s)" % client_jar) # Fake bootclasspath.
- for dep in all_dep_jars:
- if dep == missing_jar:
- if replacing_jar:
+ for dep in all_dep_jars:
+ if dep == missing_jar:
+ if replacing_jar:
+ args.append("--classpath_entry")
+ args.append("$(location %s:%s)" % (testdata_pkg, replacing_jar))
+ continue
args.append("--classpath_entry")
- args.append("$(location %s:%s)" % (testdata_pkg, replacing_jar))
- continue
- args.append("--classpath_entry")
- args.append("$(location %s:%s)" % (testdata_pkg, dep))
+ args.append("$(location %s:%s)" % (testdata_pkg, dep))
- for dep in direct_jars:
- args.append("--directdep")
- args.append("$(location %s:%s)" % (testdata_pkg, dep))
+ for dep in direct_jars:
+ args.append("--directdep")
+ args.append("$(location %s:%s)" % (testdata_pkg, dep))
- args = args + [
- "--input",
- "$(location %s:testdata_client)" % testdata_pkg,
- ]
+ args = args + [
+ "--input",
+ "$(location %s:testdata_client)" % testdata_pkg,
+ ]
- args.append("--rule_label=:%s" % name)
+ args.append("--rule_label=:%s" % name)
- native.sh_test(
- name=name,
- srcs = ["golden_test.sh"],
- args = args,
- data = data,
- )
+ native.sh_test(
+ name = name,
+ srcs = ["golden_test.sh"],
+ args = args,
+ data = data,
+ )
diff --git a/src/test/shell/bazel/list_source_repository.bzl b/src/test/shell/bazel/list_source_repository.bzl
index bb19a6011d..d114dae4fe 100644
--- a/src/test/shell/bazel/list_source_repository.bzl
+++ b/src/test/shell/bazel/list_source_repository.bzl
@@ -19,20 +19,21 @@
"""A repository definition to fetch all sources in Bazel."""
def _impl(rctx):
- workspace = rctx.path(Label("//:BUILD")).dirname
- srcs_excludes = "XXXXXXXXXXXXXX1268778dfsdf4"
- # Depending in ~/.git/logs/HEAD is a trick to depends on something that
- # change everytime the workspace content change.
- r = rctx.execute(["test", "-f", "%s/.git/logs/HEAD" % workspace])
- if r.return_code == 0:
- # We only add the dependency if it exists.
- unused_var = rctx.path(Label("//:.git/logs/HEAD")) # pylint: disable=unused-variable
+ workspace = rctx.path(Label("//:BUILD")).dirname
+ srcs_excludes = "XXXXXXXXXXXXXX1268778dfsdf4"
- if "SRCS_EXCLUDES" in rctx.os.environ:
- srcs_excludes = rctx.os.environ["SRCS_EXCLUDES"]
- r = rctx.execute(["find", str(workspace), "-type", "f"])
- rctx.file("find.result.raw", r.stdout.replace(str(workspace) + "/", ""))
- rctx.file("BUILD", """
+ # Depending in ~/.git/logs/HEAD is a trick to depends on something that
+ # change everytime the workspace content change.
+ r = rctx.execute(["test", "-f", "%s/.git/logs/HEAD" % workspace])
+ if r.return_code == 0:
+ # We only add the dependency if it exists.
+ unused_var = rctx.path(Label("//:.git/logs/HEAD")) # pylint: disable=unused-variable
+
+ if "SRCS_EXCLUDES" in rctx.os.environ:
+ srcs_excludes = rctx.os.environ["SRCS_EXCLUDES"]
+ r = rctx.execute(["find", str(workspace), "-type", "f"])
+ rctx.file("find.result.raw", r.stdout.replace(str(workspace) + "/", ""))
+ rctx.file("BUILD", """
genrule(
name = "sources",
outs = ["sources.txt"],
@@ -49,7 +50,8 @@ genrule(
list_source_repository = repository_rule(
implementation = _impl,
- environ = ["SRCS_EXCLUDES"])
+ environ = ["SRCS_EXCLUDES"],
+)
"""Create a //:sources target containing the list of sources of Bazel.
SRCS_EXCLUDES give a regex of files to excludes in the list."""
diff --git a/src/tools/launcher/win_rules.bzl b/src/tools/launcher/win_rules.bzl
index 093d8448eb..869b747342 100644
--- a/src/tools/launcher/win_rules.bzl
+++ b/src/tools/launcher/win_rules.bzl
@@ -15,33 +15,36 @@
# This is a quick and dirty rule to make Bazel compile itself. It
# only supports Java.
-def cc_library(srcs=[], hdrs=[], **kwargs):
- """Replace srcs and hdrs with a dummy.cc on non-Windows platforms."""
- native.cc_library(
- srcs = select({
+def cc_library(srcs = [], hdrs = [], **kwargs):
+ """Replace srcs and hdrs with a dummy.cc on non-Windows platforms."""
+ native.cc_library(
+ srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
- }),
- hdrs = select({
+ }),
+ hdrs = select({
"//conditions:default": [],
"//src/conditions:windows": hdrs,
- }),
- **kwargs)
+ }),
+ **kwargs
+ )
-def cc_binary(srcs=[], **kwargs):
- """Replace srcs with a dummy.cc on non-Windows platforms."""
- native.cc_binary(
- srcs = select({
+def cc_binary(srcs = [], **kwargs):
+ """Replace srcs with a dummy.cc on non-Windows platforms."""
+ native.cc_binary(
+ srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
- }),
- **kwargs)
+ }),
+ **kwargs
+ )
-def cc_test(srcs=[], **kwargs):
- """Replace srcs with a dummy.cc on non-Windows platforms."""
- native.cc_test(
- srcs = select({
+def cc_test(srcs = [], **kwargs):
+ """Replace srcs with a dummy.cc on non-Windows platforms."""
+ native.cc_test(
+ srcs = select({
"//conditions:default": ["dummy.cc"],
"//src/conditions:windows": srcs,
- }),
- **kwargs)
+ }),
+ **kwargs
+ )
diff --git a/tools/android/android_sdk_repository_template.bzl b/tools/android/android_sdk_repository_template.bzl
index f9b618d1e7..e41adfb0df 100644
--- a/tools/android/android_sdk_repository_template.bzl
+++ b/tools/android/android_sdk_repository_template.bzl
@@ -14,205 +14,206 @@
# limitations under the License.
def create_config_setting_rule():
- """Create config_setting rule for windows.
-
- These represent the matching --host_cpu values.
- """
- name = "windows"
- if not native.existing_rule(name):
- native.config_setting(
- name = name,
- values = {"host_cpu": "x64_" + name},
- )
+ """Create config_setting rule for windows.
+
+ These represent the matching --host_cpu values.
+ """
+ name = "windows"
+ if not native.existing_rule(name):
+ native.config_setting(
+ name = name,
+ values = {"host_cpu": "x64_" + name},
+ )
def create_android_sdk_rules(
- name,
- build_tools_version,
- build_tools_directory,
- api_levels,
- default_api_level):
- """Generate android_sdk rules for the API levels in the Android SDK.
-
- Args:
- name: string, the name of the repository being generated.
- build_tools_version: string, the version of Android's build tools to use.
- build_tools_directory: string, the directory name of the build tools in
- sdk's build-tools directory.
- api_levels: list of ints, the API levels from which to get android.jar
- et al. and create android_sdk rules.
- default_api_level: int, the API level to alias the default sdk to if
- --android_sdk is not specified on the command line.
- """
-
- create_config_setting_rule()
-
- windows_only_files = [
- "build-tools/%s/aapt.exe" % build_tools_directory,
- "build-tools/%s/aidl.exe" % build_tools_directory,
- "build-tools/%s/zipalign.exe" % build_tools_directory,
- "platform-tools/adb.exe",
- ] + native.glob(["build-tools/%s/aapt2.exe" % build_tools_directory])
-
- linux_only_files = [
- "build-tools/%s/aapt" % build_tools_directory,
- "build-tools/%s/aidl" % build_tools_directory,
- "build-tools/%s/zipalign" % build_tools_directory,
- "platform-tools/adb",
- ] + native.glob(
- ["extras", "build-tools/%s/aapt2" % build_tools_directory],
- exclude_directories = 0,
- )
-
- # This filegroup is used to pass the minimal contents of the SDK to the
- # Android integration tests. Note that in order to work on Windows, we cannot
- # include directories and must keep the size small.
- native.filegroup(
- name = "files",
- srcs = [
- "build-tools/%s/lib/apksigner.jar" % build_tools_directory,
- "build-tools/%s/lib/dx.jar" % build_tools_directory,
- "build-tools/%s/mainDexClasses.rules" % build_tools_directory,
- ] + [
- "platforms/android-%d/%s" % (api_level, filename)
- for api_level in api_levels
- for filename in ["android.jar", "framework.aidl"]
- ] + select({
- ":windows": windows_only_files,
- "//conditions:default": linux_only_files,
- }),
- )
-
- for api_level in api_levels:
- if api_level >= 23:
- # Android 23 removed most of org.apache.http from android.jar and moved it
- # to a separate jar.
- native.java_import(
- name = "org_apache_http_legacy-%d" % api_level,
- jars = ["platforms/android-%d/optional/org.apache.http.legacy.jar" % api_level]
- )
-
- native.android_sdk(
- name = "sdk-%d" % api_level,
- build_tools_version = build_tools_version,
- proguard = "@bazel_tools//third_party/java/proguard",
- aapt = select({
- ":windows": "build-tools/%s/aapt.exe" % build_tools_directory,
- "//conditions:default": ":aapt_binary",
- }),
- aapt2 = select({
- ":windows": "build-tools/%s/aapt2.exe" % build_tools_directory,
- "//conditions:default": ":aapt2_binary",
- }),
- dx = ":dx_binary",
- main_dex_list_creator = ":main_dex_list_creator",
- adb = select({
- ":windows": "platform-tools/adb.exe",
- "//conditions:default": "platform-tools/adb",
- }),
- framework_aidl = "platforms/android-%d/framework.aidl" % api_level,
- aidl = select({
- ":windows": "build-tools/%s/aidl.exe" % build_tools_directory,
- "//conditions:default": ":aidl_binary",
+ name,
+ build_tools_version,
+ build_tools_directory,
+ api_levels,
+ default_api_level):
+ """Generate android_sdk rules for the API levels in the Android SDK.
+
+ Args:
+ name: string, the name of the repository being generated.
+ build_tools_version: string, the version of Android's build tools to use.
+ build_tools_directory: string, the directory name of the build tools in
+ sdk's build-tools directory.
+ api_levels: list of ints, the API levels from which to get android.jar
+ et al. and create android_sdk rules.
+ default_api_level: int, the API level to alias the default sdk to if
+ --android_sdk is not specified on the command line.
+ """
+
+ create_config_setting_rule()
+
+ windows_only_files = [
+ "build-tools/%s/aapt.exe" % build_tools_directory,
+ "build-tools/%s/aidl.exe" % build_tools_directory,
+ "build-tools/%s/zipalign.exe" % build_tools_directory,
+ "platform-tools/adb.exe",
+ ] + native.glob(["build-tools/%s/aapt2.exe" % build_tools_directory])
+
+ linux_only_files = [
+ "build-tools/%s/aapt" % build_tools_directory,
+ "build-tools/%s/aidl" % build_tools_directory,
+ "build-tools/%s/zipalign" % build_tools_directory,
+ "platform-tools/adb",
+ ] + native.glob(
+ ["extras", "build-tools/%s/aapt2" % build_tools_directory],
+ exclude_directories = 0,
+ )
+
+ # This filegroup is used to pass the minimal contents of the SDK to the
+ # Android integration tests. Note that in order to work on Windows, we cannot
+ # include directories and must keep the size small.
+ native.filegroup(
+ name = "files",
+ srcs = [
+ "build-tools/%s/lib/apksigner.jar" % build_tools_directory,
+ "build-tools/%s/lib/dx.jar" % build_tools_directory,
+ "build-tools/%s/mainDexClasses.rules" % build_tools_directory,
+ ] + [
+ "platforms/android-%d/%s" % (api_level, filename)
+ for api_level in api_levels
+ for filename in ["android.jar", "framework.aidl"]
+ ] + select({
+ ":windows": windows_only_files,
+ "//conditions:default": linux_only_files,
}),
- android_jar = "platforms/android-%d/android.jar" % api_level,
- shrinked_android_jar = "platforms/android-%d/android.jar" % api_level,
- main_dex_classes = "build-tools/%s/mainDexClasses.rules" % build_tools_directory,
- apksigner = ":apksigner",
- zipalign = select({
- ":windows": "build-tools/%s/zipalign.exe" % build_tools_directory,
- "//conditions:default": ":zipalign_binary",
+ )
+
+ for api_level in api_levels:
+ if api_level >= 23:
+ # Android 23 removed most of org.apache.http from android.jar and moved it
+ # to a separate jar.
+ native.java_import(
+ name = "org_apache_http_legacy-%d" % api_level,
+ jars = ["platforms/android-%d/optional/org.apache.http.legacy.jar" % api_level],
+ )
+
+ native.android_sdk(
+ name = "sdk-%d" % api_level,
+ build_tools_version = build_tools_version,
+ proguard = "@bazel_tools//third_party/java/proguard",
+ aapt = select({
+ ":windows": "build-tools/%s/aapt.exe" % build_tools_directory,
+ "//conditions:default": ":aapt_binary",
+ }),
+ aapt2 = select({
+ ":windows": "build-tools/%s/aapt2.exe" % build_tools_directory,
+ "//conditions:default": ":aapt2_binary",
+ }),
+ dx = ":dx_binary",
+ main_dex_list_creator = ":main_dex_list_creator",
+ adb = select({
+ ":windows": "platform-tools/adb.exe",
+ "//conditions:default": "platform-tools/adb",
+ }),
+ framework_aidl = "platforms/android-%d/framework.aidl" % api_level,
+ aidl = select({
+ ":windows": "build-tools/%s/aidl.exe" % build_tools_directory,
+ "//conditions:default": ":aidl_binary",
+ }),
+ android_jar = "platforms/android-%d/android.jar" % api_level,
+ shrinked_android_jar = "platforms/android-%d/android.jar" % api_level,
+ main_dex_classes = "build-tools/%s/mainDexClasses.rules" % build_tools_directory,
+ apksigner = ":apksigner",
+ zipalign = select({
+ ":windows": "build-tools/%s/zipalign.exe" % build_tools_directory,
+ "//conditions:default": ":zipalign_binary",
+ }),
+ )
+
+ native.alias(
+ name = "org_apache_http_legacy",
+ actual = ":org_apache_http_legacy-%d" % default_api_level,
+ )
+
+ native.alias(
+ name = "sdk",
+ actual = ":sdk-%d" % default_api_level,
+ )
+
+ native.java_binary(
+ name = "apksigner",
+ main_class = "com.android.apksigner.ApkSignerTool",
+ runtime_deps = ["build-tools/%s/lib/apksigner.jar" % build_tools_directory],
+ )
+
+ native.filegroup(
+ name = "build_tools_libs",
+ srcs = native.glob([
+ "build-tools/%s/lib/**" % build_tools_directory,
+ # Build tools version 24.0.0 added a lib64 folder.
+ "build-tools/%s/lib64/**" % build_tools_directory,
+ ]),
+ )
+
+ for tool in ["aapt", "aapt2", "aidl", "zipalign"]:
+ native.genrule(
+ name = tool + "_runner",
+ outs = [tool + "_runner.sh"],
+ srcs = [],
+ cmd = "\n".join([
+ "cat > $@ << 'EOF'",
+ "#!/bin/bash",
+ "set -eu",
+ # The tools under build-tools/VERSION require the libraries under
+ # build-tools/VERSION/lib, so we can't simply depend on them as a
+ # file like we do with aapt.
+ # On Windows however we can use these binaries directly because
+ # there's no runfiles support so Bazel just creates a junction to
+ # {SDK}/build-tools.
+ "SDK=$${0}.runfiles/%s" % name,
+ # If $${SDK} is not a directory, it means that this tool is running
+ # from a runfiles directory, in the case of
+ # android_instrumentation_test. Hence, use the androidsdk
+ # that's already present in the runfiles of the current context.
+ "if [[ ! -d $${SDK} ]] ; then",
+ " SDK=$$(pwd)/../%s" % name,
+ "fi",
+ "exec $${SDK}/build-tools/%s/%s $$*" % (build_tools_directory, tool),
+ "EOF\n",
+ ]),
+ )
+
+ native.sh_binary(
+ name = tool + "_binary",
+ srcs = [tool + "_runner.sh"],
+ data = [
+ ":build_tools_libs",
+ "build-tools/%s/%s" % (build_tools_directory, tool),
+ ],
+ )
+
+ native.sh_binary(
+ name = "fail",
+ srcs = select({
+ ":windows": [":generate_fail_cmd"],
+ "//conditions:default": [":generate_fail_sh"],
}),
)
- native.alias(
- name = "org_apache_http_legacy",
- actual = ":org_apache_http_legacy-%d" % default_api_level,
- )
-
- native.alias(
- name = "sdk",
- actual = ":sdk-%d" % default_api_level,
- )
-
- native.java_binary(
- name = "apksigner",
- main_class = "com.android.apksigner.ApkSignerTool",
- runtime_deps = ["build-tools/%s/lib/apksigner.jar" % build_tools_directory],
- )
-
- native.filegroup(
- name = "build_tools_libs",
- srcs = native.glob([
- "build-tools/%s/lib/**" % build_tools_directory,
- # Build tools version 24.0.0 added a lib64 folder.
- "build-tools/%s/lib64/**" % build_tools_directory,
- ])
- )
-
- for tool in ["aapt", "aapt2", "aidl", "zipalign"]:
native.genrule(
- name = tool + "_runner",
- outs = [tool + "_runner.sh"],
- srcs = [],
- cmd = "\n".join([
- "cat > $@ << 'EOF'",
- "#!/bin/bash",
- "set -eu",
- # The tools under build-tools/VERSION require the libraries under
- # build-tools/VERSION/lib, so we can't simply depend on them as a
- # file like we do with aapt.
- # On Windows however we can use these binaries directly because
- # there's no runfiles support so Bazel just creates a junction to
- # {SDK}/build-tools.
- "SDK=$${0}.runfiles/%s" % name,
- # If $${SDK} is not a directory, it means that this tool is running
- # from a runfiles directory, in the case of
- # android_instrumentation_test. Hence, use the androidsdk
- # that's already present in the runfiles of the current context.
- "if [[ ! -d $${SDK} ]] ; then",
- " SDK=$$(pwd)/../%s" % name,
- "fi",
- "exec $${SDK}/build-tools/%s/%s $$*" % (build_tools_directory, tool),
- "EOF\n"]),
+ name = "generate_fail_sh",
+ executable = 1,
+ outs = ["fail.sh"],
+ cmd = "echo -e '#!/bin/bash\\nexit 1' >> $@; chmod +x $@",
)
- native.sh_binary(
- name = tool + "_binary",
- srcs = [tool + "_runner.sh"],
- data = [
- ":build_tools_libs",
- "build-tools/%s/%s" % (build_tools_directory, tool)
- ],
+ native.genrule(
+ name = "generate_fail_cmd",
+ executable = 1,
+ outs = ["fail.cmd"],
+ cmd = "echo @exit /b 1 > $@",
)
- native.sh_binary(
- name = "fail",
- srcs = select({
- ":windows": [":generate_fail_cmd"],
- "//conditions:default": [":generate_fail_sh"],
- }),
- )
-
- native.genrule(
- name = "generate_fail_sh",
- executable = 1,
- outs = ["fail.sh"],
- cmd = "echo -e '#!/bin/bash\\nexit 1' >> $@; chmod +x $@",
- )
-
- native.genrule(
- name = "generate_fail_cmd",
- executable = 1,
- outs = ["fail.cmd"],
- cmd = "echo @exit /b 1 > $@",
- )
-
-
- native.genrule(
- name = "main_dex_list_creator_source",
- srcs = [],
- outs = ["main_dex_list_creator.sh"],
- cmd = "\n".join(["cat > $@ <<'EOF'",
+ native.genrule(
+ name = "main_dex_list_creator_source",
+ srcs = [],
+ outs = ["main_dex_list_creator.sh"],
+ cmd = "\n".join([
+ "cat > $@ <<'EOF'",
"#!/bin/bash",
"",
"MAIN_DEX_LIST=$$1",
@@ -223,32 +224,32 @@ def create_android_sdk_rules(
"$$JAVA_BINARY $$STRIPPED_JAR $$JAR > $$MAIN_DEX_LIST",
"exit $$?",
"",
- "EOF\n"]),
- )
-
- native.sh_binary(
- name = "main_dex_list_creator",
- srcs = ["main_dex_list_creator.sh"],
- data = [":main_dex_list_creator_java"],
- )
+ "EOF\n",
+ ]),
+ )
- native.java_binary(
- name = "main_dex_list_creator_java",
- main_class = "com.android.multidex.ClassReferenceListBuilder",
- runtime_deps = [":dx_jar_import"],
- )
+ native.sh_binary(
+ name = "main_dex_list_creator",
+ srcs = ["main_dex_list_creator.sh"],
+ data = [":main_dex_list_creator_java"],
+ )
- native.java_binary(
- name = "dx_binary",
- main_class = "com.android.dx.command.Main",
- runtime_deps = [":dx_jar_import"],
- )
+ native.java_binary(
+ name = "main_dex_list_creator_java",
+ main_class = "com.android.multidex.ClassReferenceListBuilder",
+ runtime_deps = [":dx_jar_import"],
+ )
- native.java_import(
- name = "dx_jar_import",
- jars = ["build-tools/%s/lib/dx.jar" % build_tools_directory],
- )
+ native.java_binary(
+ name = "dx_binary",
+ main_class = "com.android.dx.command.Main",
+ runtime_deps = [":dx_jar_import"],
+ )
+ native.java_import(
+ name = "dx_jar_import",
+ jars = ["build-tools/%s/lib/dx.jar" % build_tools_directory],
+ )
TAGDIR_TO_TAG_MAP = {
"google_apis": "google",
@@ -257,93 +258,101 @@ TAGDIR_TO_TAG_MAP = {
"android-wear": "wear",
}
-
ARCHDIR_TO_ARCH_MAP = {
"x86": "x86",
"armeabi-v7a": "arm",
}
-
def create_system_images_filegroups(system_image_dirs):
- """Generate filegroups for the system images in the Android SDK.
-
- Args:
- system_image_dirs: list of strings, the directories containing system image
- files to be used to create android_device rules.
- """
-
- # These images will need to be updated as Android releases new system images.
- # We are intentionally not adding future releases because there is no
- # guarantee that they will work out of the box. Supported system images should
- # be added here once they have been confirmed to work with the Bazel Android
- # testing infrastructure.
- system_images = [(tag, str(api), arch)
- for tag in ["android", "google"]
- for api in [10] + range(15, 20) + range(21, 27)
- for arch in ("x86", "arm")]
- tv_images = [("tv", str(api), arch)
- for api in range(21, 25) for arch in ("x86", "arm")]
- wear_images = [("wear", str(api), "x86")
- for api in range(20, 26)] + [("wear", str(api), "arm")
- for api in range(24, 26)]
- supported_system_images = system_images + tv_images + wear_images
-
- installed_system_images_dirs = {}
- for system_image_dir in system_image_dirs:
- apidir, tagdir, archdir = system_image_dir.split("/")[1:]
- if "-" not in apidir:
- continue
- api = apidir.split("-")[1] # "android-24" --> "24", "android-O" --> "O"
- if tagdir not in TAGDIR_TO_TAG_MAP:
- continue
- tag = TAGDIR_TO_TAG_MAP[tagdir]
- if archdir not in ARCHDIR_TO_ARCH_MAP:
- continue
- arch = ARCHDIR_TO_ARCH_MAP[archdir]
- if (tag, api, arch) in supported_system_images:
- name = "emulator_images_%s_%s_%s" % (tag, api, arch)
- installed_system_images_dirs[name] = system_image_dir
- else:
- # TODO(bazel-team): If the user has an unsupported system image installed,
- # should we print a warning? This includes all 64-bit system-images.
- pass
-
- for (tag, api, arch) in supported_system_images:
- name = "emulator_images_%s_%s_%s" % (tag, api, arch)
- if name in installed_system_images_dirs:
- system_image_dir = installed_system_images_dirs[name]
- # For supported system images that exist in /sdk/system-images/, we
- # create a filegroup with their contents.
- native.filegroup(
- name = name,
- srcs = native.glob([
- "%s/**" % system_image_dir,
- ]),
- )
- native.filegroup(
- name = "%s_qemu2_extra" % name,
- srcs = native.glob(["%s/kernel-ranchu" % system_image_dir]),
- )
- else:
- # For supported system images that are not installed in the SDK, we
- # create a "poison pill" genrule to display a helpful error message to
- # a user who attempts to run a test against an android_device that
- # they don't have the system image for installed.
- native.genrule(
- name = name,
- outs = [
- # Necessary so that the build doesn't fail in analysis because
- # android_device expects a file named source.properties.
- "poison_pill_for_%s/source.properties" % name,
- ],
- cmd = """echo \
+ """Generate filegroups for the system images in the Android SDK.
+
+ Args:
+ system_image_dirs: list of strings, the directories containing system image
+ files to be used to create android_device rules.
+ """
+
+ # These images will need to be updated as Android releases new system images.
+ # We are intentionally not adding future releases because there is no
+ # guarantee that they will work out of the box. Supported system images should
+ # be added here once they have been confirmed to work with the Bazel Android
+ # testing infrastructure.
+ system_images = [
+ (tag, str(api), arch)
+ for tag in ["android", "google"]
+ for api in [10] + range(15, 20) + range(21, 27)
+ for arch in ("x86", "arm")
+ ]
+ tv_images = [
+ ("tv", str(api), arch)
+ for api in range(21, 25)
+ for arch in ("x86", "arm")
+ ]
+ wear_images = [
+ ("wear", str(api), "x86")
+ for api in range(20, 26)
+ ] + [
+ ("wear", str(api), "arm")
+ for api in range(24, 26)
+ ]
+ supported_system_images = system_images + tv_images + wear_images
+
+ installed_system_images_dirs = {}
+ for system_image_dir in system_image_dirs:
+ apidir, tagdir, archdir = system_image_dir.split("/")[1:]
+ if "-" not in apidir:
+ continue
+ api = apidir.split("-")[1] # "android-24" --> "24", "android-O" --> "O"
+ if tagdir not in TAGDIR_TO_TAG_MAP:
+ continue
+ tag = TAGDIR_TO_TAG_MAP[tagdir]
+ if archdir not in ARCHDIR_TO_ARCH_MAP:
+ continue
+ arch = ARCHDIR_TO_ARCH_MAP[archdir]
+ if (tag, api, arch) in supported_system_images:
+ name = "emulator_images_%s_%s_%s" % (tag, api, arch)
+ installed_system_images_dirs[name] = system_image_dir
+ else:
+ # TODO(bazel-team): If the user has an unsupported system image installed,
+ # should we print a warning? This includes all 64-bit system-images.
+ pass
+
+ for (tag, api, arch) in supported_system_images:
+ name = "emulator_images_%s_%s_%s" % (tag, api, arch)
+ if name in installed_system_images_dirs:
+ system_image_dir = installed_system_images_dirs[name]
+
+ # For supported system images that exist in /sdk/system-images/, we
+ # create a filegroup with their contents.
+ native.filegroup(
+ name = name,
+ srcs = native.glob([
+ "%s/**" % system_image_dir,
+ ]),
+ )
+ native.filegroup(
+ name = "%s_qemu2_extra" % name,
+ srcs = native.glob(["%s/kernel-ranchu" % system_image_dir]),
+ )
+ else:
+ # For supported system images that are not installed in the SDK, we
+ # create a "poison pill" genrule to display a helpful error message to
+ # a user who attempts to run a test against an android_device that
+ # they don't have the system image for installed.
+ native.genrule(
+ name = name,
+ outs = [
+ # Necessary so that the build doesn't fail in analysis because
+ # android_device expects a file named source.properties.
+ "poison_pill_for_%s/source.properties" % name,
+ ],
+ cmd = """echo \
This rule requires that the Android SDK used by Bazel has the \
following system image installed: %s. Please install this system \
image through the Android SDK Manager and try again. ; \
exit 1
""" % name,
- )
- native.filegroup(
- name = "%s_qemu2_extra" % name,
- srcs = [],
- )
+ )
+ native.filegroup(
+ name = "%s_qemu2_extra" % name,
+ srcs = [],
+ )
diff --git a/tools/build_defs/apple/shared.bzl b/tools/build_defs/apple/shared.bzl
index 0e3721f26f..440b1798f1 100644
--- a/tools/build_defs/apple/shared.bzl
+++ b/tools/build_defs/apple/shared.bzl
@@ -41,60 +41,55 @@ See :func:`apple_action`."""
XCRUNWRAPPER_LABEL = "//external:xcrunwrapper"
"""The label for xcrunwrapper tool."""
-
def label_scoped_path(ctx, path):
- """Return the path scoped to target's label."""
- return ctx.label.name + "/" + path.lstrip("/")
-
+ """Return the path scoped to target's label."""
+ return ctx.label.name + "/" + path.lstrip("/")
def module_cache_path(ctx):
- """Returns the Clang module cache path to use for this rule."""
- return ctx.genfiles_dir.path + "/_objc_module_cache"
-
+ """Returns the Clang module cache path to use for this rule."""
+ return ctx.genfiles_dir.path + "/_objc_module_cache"
def apple_action(ctx, **kw):
- """Creates an action that only runs on MacOS/Darwin.
+ """Creates an action that only runs on MacOS/Darwin.
- Call it similar to how you would call ctx.action:
- apple_action(ctx, outputs=[...], inputs=[...],...)
- """
- execution_requirements = dict(kw.get("execution_requirements", {}))
- execution_requirements.update(DARWIN_EXECUTION_REQUIREMENTS)
+ Call it similar to how you would call ctx.action:
+ apple_action(ctx, outputs=[...], inputs=[...],...)
+ """
+ execution_requirements = dict(kw.get("execution_requirements", {}))
+ execution_requirements.update(DARWIN_EXECUTION_REQUIREMENTS)
- no_sandbox = kw.pop("no_sandbox", False)
- if no_sandbox:
- execution_requirements["nosandbox"] = "1"
+ no_sandbox = kw.pop("no_sandbox", False)
+ if no_sandbox:
+ execution_requirements["nosandbox"] = "1"
- kw["execution_requirements"] = execution_requirements
-
- ctx.action(**kw)
+ kw["execution_requirements"] = execution_requirements
+ ctx.action(**kw)
def xcrun_env(ctx):
- """Returns the environment dictionary necessary to use xcrunwrapper."""
- platform = ctx.fragments.apple.single_arch_platform
-
- if hasattr(apple_common, "apple_host_system_env"):
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
- env = apple_common.target_apple_env(xcode_config, platform)
- env.update(apple_common.apple_host_system_env(xcode_config))
- else:
- env = ctx.fragments.apple.target_apple_env(platform)
- env.update(ctx.fragments.apple.apple_host_system_env())
+ """Returns the environment dictionary necessary to use xcrunwrapper."""
+ platform = ctx.fragments.apple.single_arch_platform
- return env
+ if hasattr(apple_common, "apple_host_system_env"):
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ env = apple_common.target_apple_env(xcode_config, platform)
+ env.update(apple_common.apple_host_system_env(xcode_config))
+ else:
+ env = ctx.fragments.apple.target_apple_env(platform)
+ env.update(ctx.fragments.apple.apple_host_system_env())
+ return env
def xcrun_action(ctx, **kw):
- """Creates an apple action that executes xcrunwrapper.
+ """Creates an apple action that executes xcrunwrapper.
- args:
- ctx: The context of the rule that owns this action.
+ args:
+ ctx: The context of the rule that owns this action.
- This method takes the same keyword arguments as ctx.action, however you don't
- need to specify the executable.
- """
- kw["env"] = dict(kw.get("env", {}))
- kw["env"].update(xcrun_env(ctx))
+ This method takes the same keyword arguments as ctx.action, however you don't
+ need to specify the executable.
+ """
+ kw["env"] = dict(kw.get("env", {}))
+ kw["env"].update(xcrun_env(ctx))
- apple_action(ctx, executable=ctx.executable._xcrunwrapper, **kw)
+ apple_action(ctx, executable = ctx.executable._xcrunwrapper, **kw)
diff --git a/tools/build_defs/docker/build.bzl b/tools/build_defs/docker/build.bzl
index bd9da71848..9399b0d9e6 100644
--- a/tools/build_defs/docker/build.bzl
+++ b/tools/build_defs/docker/build.bzl
@@ -13,317 +13,344 @@
# limitations under the License.
"""Rule for building a Docker image."""
-load(":filetype.bzl",
- tar_filetype="tar",
- deb_filetype="deb",
- docker_filetype="docker")
-load("//tools/build_defs/hash:hash.bzl",
- _hash_tools="tools", _sha256="sha256")
-load(":label.bzl", _string_to_label="string_to_label")
-load(":layers.bzl",
- _assemble_image="assemble",
- _get_layers="get_from_target",
- _incr_load="incremental_load",
- _layer_tools="tools")
+load(
+ ":filetype.bzl",
+ deb_filetype = "deb",
+ docker_filetype = "docker",
+ tar_filetype = "tar",
+)
+load(
+ "//tools/build_defs/hash:hash.bzl",
+ _hash_tools = "tools",
+ _sha256 = "sha256",
+)
+load(":label.bzl", _string_to_label = "string_to_label")
+load(
+ ":layers.bzl",
+ _assemble_image = "assemble",
+ _get_layers = "get_from_target",
+ _incr_load = "incremental_load",
+ _layer_tools = "tools",
+)
load(":list.bzl", "reverse")
-load(":path.bzl",
- "dirname", "strip_prefix",
- _join_path="join",
- _canonicalize_path="canonicalize")
-load(":serialize.bzl", _serialize_dict="dict_to_associative_list")
-
+load(
+ ":path.bzl",
+ "dirname",
+ "strip_prefix",
+ _canonicalize_path = "canonicalize",
+ _join_path = "join",
+)
+load(":serialize.bzl", _serialize_dict = "dict_to_associative_list")
def _build_layer(ctx):
- """Build the current layer for appending it the base layer."""
-
- layer = ctx.new_file(ctx.label.name + ".layer")
- build_layer = ctx.executable.build_layer
- args = [
- "--output=" + layer.path,
- "--directory=" + ctx.attr.directory,
- "--mode=" + ctx.attr.mode,
- ]
-
- if ctx.attr.data_path:
- # If data_prefix is specified, then add files relative to that.
- data_path = _join_path(
- dirname(ctx.outputs.out.short_path),
- _canonicalize_path(ctx.attr.data_path))
- args += ["--file=%s=%s" % (f.path, strip_prefix(f.short_path, data_path))
- for f in ctx.files.files]
- else:
- # Otherwise, files are added without a directory prefix at all.
- args += ["--file=%s=%s" % (f.path, f.basename)
- for f in ctx.files.files]
-
- args += ["--tar=" + f.path for f in ctx.files.tars]
- args += ["--deb=" + f.path for f in ctx.files.debs if f.path.endswith(".deb")]
- args += ["--link=%s:%s" % (k, ctx.attr.symlinks[k])
- for k in ctx.attr.symlinks]
- arg_file = ctx.new_file(ctx.label.name + ".layer.args")
- ctx.file_action(arg_file, "\n".join(args))
-
- ctx.action(
- executable = build_layer,
- arguments = ["--flagfile=" + arg_file.path],
- inputs = ctx.files.files + ctx.files.tars + ctx.files.debs + [arg_file],
- outputs = [layer],
- use_default_shell_env=True,
- mnemonic="DockerLayer"
- )
- return layer
-
+ """Build the current layer for appending it the base layer."""
+
+ layer = ctx.new_file(ctx.label.name + ".layer")
+ build_layer = ctx.executable.build_layer
+ args = [
+ "--output=" + layer.path,
+ "--directory=" + ctx.attr.directory,
+ "--mode=" + ctx.attr.mode,
+ ]
+
+ if ctx.attr.data_path:
+ # If data_prefix is specified, then add files relative to that.
+ data_path = _join_path(
+ dirname(ctx.outputs.out.short_path),
+ _canonicalize_path(ctx.attr.data_path),
+ )
+ args += [
+ "--file=%s=%s" % (f.path, strip_prefix(f.short_path, data_path))
+ for f in ctx.files.files
+ ]
+ else:
+ # Otherwise, files are added without a directory prefix at all.
+ args += [
+ "--file=%s=%s" % (f.path, f.basename)
+ for f in ctx.files.files
+ ]
+
+ args += ["--tar=" + f.path for f in ctx.files.tars]
+ args += ["--deb=" + f.path for f in ctx.files.debs if f.path.endswith(".deb")]
+ args += [
+ "--link=%s:%s" % (k, ctx.attr.symlinks[k])
+ for k in ctx.attr.symlinks
+ ]
+ arg_file = ctx.new_file(ctx.label.name + ".layer.args")
+ ctx.file_action(arg_file, "\n".join(args))
+
+ ctx.action(
+ executable = build_layer,
+ arguments = ["--flagfile=" + arg_file.path],
+ inputs = ctx.files.files + ctx.files.tars + ctx.files.debs + [arg_file],
+ outputs = [layer],
+ use_default_shell_env = True,
+ mnemonic = "DockerLayer",
+ )
+ return layer
# TODO(mattmoor): In a future change, we should establish the invariant that
# base must expose "docker_layers", possibly by hoisting a "docker_load" rule
# from a tarball "base".
def _get_base_artifact(ctx):
- if ctx.files.base:
- if hasattr(ctx.attr.base, "docker_layers"):
- # The base is the first layer in docker_layers if provided.
- return _get_layers(ctx, ctx.attr.base)[0]["layer"]
- if len(ctx.files.base) != 1:
- fail("base attribute should be a single tar file.")
- return ctx.files.base[0]
-
+ if ctx.files.base:
+ if hasattr(ctx.attr.base, "docker_layers"):
+ # The base is the first layer in docker_layers if provided.
+ return _get_layers(ctx, ctx.attr.base)[0]["layer"]
+ if len(ctx.files.base) != 1:
+ fail("base attribute should be a single tar file.")
+ return ctx.files.base[0]
def _image_config(ctx, layer_names):
- """Create the configuration for a new docker image."""
- config = ctx.new_file(ctx.label.name + ".config")
-
- label_file_dict = _string_to_label(
- ctx.files.label_files, ctx.attr.label_file_strings)
-
- labels = dict()
- for l in ctx.attr.labels:
- fname = ctx.attr.labels[l]
- if fname[0] == "@":
- labels[l] = "@" + label_file_dict[fname[1:]].path
- else:
- labels[l] = fname
-
- args = [
- "--output=%s" % config.path,
- "--entrypoint=%s" % ",".join(ctx.attr.entrypoint),
- "--command=%s" % ",".join(ctx.attr.cmd),
- "--labels=%s" % _serialize_dict(labels),
- "--env=%s" % _serialize_dict(ctx.attr.env),
- "--ports=%s" % ",".join(ctx.attr.ports),
- "--volumes=%s" % ",".join(ctx.attr.volumes)
- ]
- if ctx.attr.user:
- args += ["--user=" + ctx.attr.user]
- if ctx.attr.workdir:
- args += ["--workdir=" + ctx.attr.workdir]
-
- inputs = layer_names
- args += ["--layer=@" + l.path for l in layer_names]
-
- if ctx.attr.label_files:
- inputs += ctx.files.label_files
-
- base = _get_base_artifact(ctx)
- if base:
- args += ["--base=%s" % base.path]
- inputs += [base]
-
- ctx.action(
- executable = ctx.executable.create_image_config,
- arguments = args,
- inputs = inputs,
- outputs = [config],
- use_default_shell_env=True,
- mnemonic = "ImageConfig")
- return config
-
+ """Create the configuration for a new docker image."""
+ config = ctx.new_file(ctx.label.name + ".config")
+
+ label_file_dict = _string_to_label(
+ ctx.files.label_files,
+ ctx.attr.label_file_strings,
+ )
+
+ labels = dict()
+ for l in ctx.attr.labels:
+ fname = ctx.attr.labels[l]
+ if fname[0] == "@":
+ labels[l] = "@" + label_file_dict[fname[1:]].path
+ else:
+ labels[l] = fname
+
+ args = [
+ "--output=%s" % config.path,
+ "--entrypoint=%s" % ",".join(ctx.attr.entrypoint),
+ "--command=%s" % ",".join(ctx.attr.cmd),
+ "--labels=%s" % _serialize_dict(labels),
+ "--env=%s" % _serialize_dict(ctx.attr.env),
+ "--ports=%s" % ",".join(ctx.attr.ports),
+ "--volumes=%s" % ",".join(ctx.attr.volumes),
+ ]
+ if ctx.attr.user:
+ args += ["--user=" + ctx.attr.user]
+ if ctx.attr.workdir:
+ args += ["--workdir=" + ctx.attr.workdir]
+
+ inputs = layer_names
+ args += ["--layer=@" + l.path for l in layer_names]
+
+ if ctx.attr.label_files:
+ inputs += ctx.files.label_files
+
+ base = _get_base_artifact(ctx)
+ if base:
+ args += ["--base=%s" % base.path]
+ inputs += [base]
+
+ ctx.action(
+ executable = ctx.executable.create_image_config,
+ arguments = args,
+ inputs = inputs,
+ outputs = [config],
+ use_default_shell_env = True,
+ mnemonic = "ImageConfig",
+ )
+ return config
def _metadata_action(ctx, layer, name, output):
- """Generate the action to create the JSON metadata for the layer."""
- rewrite_tool = ctx.executable.rewrite_tool
-
- label_file_dict = _string_to_label(
- ctx.files.label_files, ctx.attr.label_file_strings)
-
- labels = dict()
- for l in ctx.attr.labels:
- fname = ctx.attr.labels[l]
- if fname[0] == "@":
- labels[l] = "@" + label_file_dict[fname[1:]].path
- else:
- labels[l] = fname
-
- args = [
- "--output=%s" % output.path,
- "--layer=%s" % layer.path,
- "--name=@%s" % name.path,
- "--entrypoint=%s" % ",".join(ctx.attr.entrypoint),
- "--command=%s" % ",".join(ctx.attr.cmd),
- "--labels=%s" % _serialize_dict(labels),
- "--env=%s" % _serialize_dict(ctx.attr.env),
- "--ports=%s" % ",".join(ctx.attr.ports),
- "--volumes=%s" % ",".join(ctx.attr.volumes)
- ]
- if ctx.attr.workdir:
- args += ["--workdir=" + ctx.attr.workdir]
- inputs = [layer, rewrite_tool, name]
- if ctx.attr.label_files:
- inputs += ctx.files.label_files
-
- # TODO(mattmoor): Does this properly handle naked tarballs?
- base = _get_base_artifact(ctx)
- if base:
- args += ["--base=%s" % base.path]
- inputs += [base]
- if ctx.attr.user:
- args += ["--user=" + ctx.attr.user]
-
- ctx.action(
- executable = rewrite_tool,
- arguments = args,
- inputs = inputs,
- outputs = [output],
- use_default_shell_env=True,
- mnemonic = "RewriteJSON")
-
+ """Generate the action to create the JSON metadata for the layer."""
+ rewrite_tool = ctx.executable.rewrite_tool
+
+ label_file_dict = _string_to_label(
+ ctx.files.label_files,
+ ctx.attr.label_file_strings,
+ )
+
+ labels = dict()
+ for l in ctx.attr.labels:
+ fname = ctx.attr.labels[l]
+ if fname[0] == "@":
+ labels[l] = "@" + label_file_dict[fname[1:]].path
+ else:
+ labels[l] = fname
+
+ args = [
+ "--output=%s" % output.path,
+ "--layer=%s" % layer.path,
+ "--name=@%s" % name.path,
+ "--entrypoint=%s" % ",".join(ctx.attr.entrypoint),
+ "--command=%s" % ",".join(ctx.attr.cmd),
+ "--labels=%s" % _serialize_dict(labels),
+ "--env=%s" % _serialize_dict(ctx.attr.env),
+ "--ports=%s" % ",".join(ctx.attr.ports),
+ "--volumes=%s" % ",".join(ctx.attr.volumes),
+ ]
+ if ctx.attr.workdir:
+ args += ["--workdir=" + ctx.attr.workdir]
+ inputs = [layer, rewrite_tool, name]
+ if ctx.attr.label_files:
+ inputs += ctx.files.label_files
+
+ # TODO(mattmoor): Does this properly handle naked tarballs?
+ base = _get_base_artifact(ctx)
+ if base:
+ args += ["--base=%s" % base.path]
+ inputs += [base]
+ if ctx.attr.user:
+ args += ["--user=" + ctx.attr.user]
+
+ ctx.action(
+ executable = rewrite_tool,
+ arguments = args,
+ inputs = inputs,
+ outputs = [output],
+ use_default_shell_env = True,
+ mnemonic = "RewriteJSON",
+ )
def _metadata(ctx, layer, name):
- """Create the metadata for the new docker image."""
- metadata = ctx.new_file(ctx.label.name + ".metadata")
- _metadata_action(ctx, layer, name, metadata)
- return metadata
-
+ """Create the metadata for the new docker image."""
+ metadata = ctx.new_file(ctx.label.name + ".metadata")
+ _metadata_action(ctx, layer, name, metadata)
+ return metadata
def _compute_layer_name(ctx, layer):
- """Compute the layer's name.
-
- This function synthesize a version of its metadata where in place
- of its final name, we use the SHA256 of the layer blob.
-
- This makes the name of the layer a function of:
- - Its layer's SHA256
- - Its metadata
- - Its parent's name.
- Assuming the parent's name is derived by this same rigor, then
- a simple induction proves the content addressability.
-
- Args:
- ctx: Rule context.
- layer: The layer's artifact for which to compute the name.
- Returns:
- The artifact that will contains the name for the layer.
- """
- metadata = ctx.new_file(ctx.label.name + ".metadata-name")
- layer_sha = _sha256(ctx, layer)
- _metadata_action(ctx, layer, layer_sha, metadata)
- return _sha256(ctx, metadata)
-
+ """Compute the layer's name.
+
+ This function synthesize a version of its metadata where in place
+ of its final name, we use the SHA256 of the layer blob.
+
+ This makes the name of the layer a function of:
+ - Its layer's SHA256
+ - Its metadata
+ - Its parent's name.
+ Assuming the parent's name is derived by this same rigor, then
+ a simple induction proves the content addressability.
+
+ Args:
+ ctx: Rule context.
+ layer: The layer's artifact for which to compute the name.
+ Returns:
+ The artifact that will contains the name for the layer.
+ """
+ metadata = ctx.new_file(ctx.label.name + ".metadata-name")
+ layer_sha = _sha256(ctx, layer)
+ _metadata_action(ctx, layer, layer_sha, metadata)
+ return _sha256(ctx, metadata)
def _repository_name(ctx):
- """Compute the repository name for the current rule."""
- if ctx.attr.legacy_repository_naming:
- # Legacy behavior, off by default.
- return _join_path(ctx.attr.repository, ctx.label.package.replace("/", "_"))
- # Newer Docker clients support multi-level names, which are a part of
- # the v2 registry specification.
- return _join_path(ctx.attr.repository, ctx.label.package)
+ """Compute the repository name for the current rule."""
+ if ctx.attr.legacy_repository_naming:
+ # Legacy behavior, off by default.
+ return _join_path(ctx.attr.repository, ctx.label.package.replace("/", "_"))
+ # Newer Docker clients support multi-level names, which are a part of
+ # the v2 registry specification.
+ return _join_path(ctx.attr.repository, ctx.label.package)
def _create_image(ctx, layers, identifier, config, name, metadata, tags):
- """Create the new image."""
- args = [
- "--output=" + ctx.outputs.layer.path,
- "--id=@" + identifier.path,
- "--config=" + config.path,
- ] + ["--tag=" + tag for tag in tags]
-
- args += ["--layer=@%s=%s" % (l["name"].path, l["layer"].path) for l in layers]
- inputs = [identifier, config] + [l["name"] for l in layers] + [l["layer"] for l in layers]
-
- if name:
- args += ["--legacy_id=@" + name.path]
- inputs += [name]
-
- if metadata:
- args += ["--metadata=" + metadata.path]
- inputs += [metadata]
-
- # If we have been provided a base image, add it.
- if ctx.attr.base and not hasattr(ctx.attr.base, "docker_layers"):
- legacy_base = _get_base_artifact(ctx)
- if legacy_base:
- args += ["--legacy_base=%s" % legacy_base.path]
- inputs += [legacy_base]
-
- # TODO(mattmoor): Does this properly handle naked tarballs? (excl. above)
- base = _get_base_artifact(ctx)
- if base:
- args += ["--base=%s" % base.path]
- inputs += [base]
- ctx.action(
- executable = ctx.executable.create_image,
- arguments = args,
- inputs = inputs,
- outputs = [ctx.outputs.layer],
- mnemonic = "CreateImage",
- )
-
+ """Create the new image."""
+ args = [
+ "--output=" + ctx.outputs.layer.path,
+ "--id=@" + identifier.path,
+ "--config=" + config.path,
+ ] + ["--tag=" + tag for tag in tags]
+
+ args += ["--layer=@%s=%s" % (l["name"].path, l["layer"].path) for l in layers]
+ inputs = [identifier, config] + [l["name"] for l in layers] + [l["layer"] for l in layers]
+
+ if name:
+ args += ["--legacy_id=@" + name.path]
+ inputs += [name]
+
+ if metadata:
+ args += ["--metadata=" + metadata.path]
+ inputs += [metadata]
+
+ # If we have been provided a base image, add it.
+ if ctx.attr.base and not hasattr(ctx.attr.base, "docker_layers"):
+ legacy_base = _get_base_artifact(ctx)
+ if legacy_base:
+ args += ["--legacy_base=%s" % legacy_base.path]
+ inputs += [legacy_base]
+
+ # TODO(mattmoor): Does this properly handle naked tarballs? (excl. above)
+ base = _get_base_artifact(ctx)
+ if base:
+ args += ["--base=%s" % base.path]
+ inputs += [base]
+ ctx.action(
+ executable = ctx.executable.create_image,
+ arguments = args,
+ inputs = inputs,
+ outputs = [ctx.outputs.layer],
+ mnemonic = "CreateImage",
+ )
def _docker_build_impl(ctx):
- """Implementation for the docker_build rule."""
- layer = _build_layer(ctx)
- layer_sha = _sha256(ctx, layer)
-
- config = _image_config(ctx, [layer_sha])
- identifier = _sha256(ctx, config)
-
- name = _compute_layer_name(ctx, layer)
- metadata = _metadata(ctx, layer, name)
-
- # Construct a temporary name based on the build target.
- tags = [_repository_name(ctx) + ":" + ctx.label.name]
-
- # creating a partial image so only pass the layers that belong to it
- image_layer = {"layer": layer, "name": layer_sha}
- _create_image(ctx, [image_layer], identifier, config, name, metadata, tags)
-
- # Compute the layers transitive provider.
- # This must includes all layers of the image, including:
- # - The layer introduced by this rule.
- # - The layers transitively introduced by docker_build deps.
- # - Layers introduced by a static tarball base.
- # This is because downstream tooling should just be able to depend on
- # the availability and completeness of this field.
- layers = [
- {"layer": ctx.outputs.layer, "id": identifier, "name": name}
- ] + _get_layers(ctx, ctx.attr.base)
-
- # Generate the incremental load statement
- _incr_load(ctx, layers, {tag_name: {"name": name, "id": identifier}
- for tag_name in tags},
- ctx.outputs.executable)
-
- _assemble_image(ctx, reverse(layers), {tag_name: name for tag_name in tags},
- ctx.outputs.out)
- runfiles = ctx.runfiles(
- files = ([l["name"] for l in layers] +
- [l["id"] for l in layers] +
- [l["layer"] for l in layers]))
- return struct(runfiles = runfiles,
- files = depset([ctx.outputs.layer]),
- docker_layers = layers)
-
+ """Implementation for the docker_build rule."""
+ layer = _build_layer(ctx)
+ layer_sha = _sha256(ctx, layer)
+
+ config = _image_config(ctx, [layer_sha])
+ identifier = _sha256(ctx, config)
+
+ name = _compute_layer_name(ctx, layer)
+ metadata = _metadata(ctx, layer, name)
+
+ # Construct a temporary name based on the build target.
+ tags = [_repository_name(ctx) + ":" + ctx.label.name]
+
+ # creating a partial image so only pass the layers that belong to it
+ image_layer = {"layer": layer, "name": layer_sha}
+ _create_image(ctx, [image_layer], identifier, config, name, metadata, tags)
+
+ # Compute the layers transitive provider.
+ # This must includes all layers of the image, including:
+ # - The layer introduced by this rule.
+ # - The layers transitively introduced by docker_build deps.
+ # - Layers introduced by a static tarball base.
+ # This is because downstream tooling should just be able to depend on
+ # the availability and completeness of this field.
+ layers = [
+ {"layer": ctx.outputs.layer, "id": identifier, "name": name},
+ ] + _get_layers(ctx, ctx.attr.base)
+
+ # Generate the incremental load statement
+ _incr_load(
+ ctx,
+ layers,
+ {
+ tag_name: {"name": name, "id": identifier}
+ for tag_name in tags
+ },
+ ctx.outputs.executable,
+ )
+
+ _assemble_image(
+ ctx,
+ reverse(layers),
+ {tag_name: name for tag_name in tags},
+ ctx.outputs.out,
+ )
+ runfiles = ctx.runfiles(
+ files = ([l["name"] for l in layers] +
+ [l["id"] for l in layers] +
+ [l["layer"] for l in layers]),
+ )
+ return struct(
+ runfiles = runfiles,
+ files = depset([ctx.outputs.layer]),
+ docker_layers = layers,
+ )
docker_build_ = rule(
implementation = _docker_build_impl,
attrs = dict({
- "base": attr.label(allow_files=docker_filetype),
+ "base": attr.label(allow_files = docker_filetype),
"data_path": attr.string(),
- "directory": attr.string(default="/"),
- "tars": attr.label_list(allow_files=tar_filetype),
- "debs": attr.label_list(allow_files=deb_filetype),
- "files": attr.label_list(allow_files=True),
- "legacy_repository_naming": attr.bool(default=False),
- "mode": attr.string(default="0555"),
+ "directory": attr.string(default = "/"),
+ "tars": attr.label_list(allow_files = tar_filetype),
+ "debs": attr.label_list(allow_files = deb_filetype),
+ "files": attr.label_list(allow_files = True),
+ "legacy_repository_naming": attr.bool(default = False),
+ "mode": attr.string(default = "0555"),
"symlinks": attr.string_dict(),
"entrypoint": attr.string_list(),
"cmd": attr.string_list(),
@@ -333,38 +360,43 @@ docker_build_ = rule(
"ports": attr.string_list(), # Skylark doesn't support int_list...
"volumes": attr.string_list(),
"workdir": attr.string(),
- "repository": attr.string(default="bazel"),
+ "repository": attr.string(default = "bazel"),
# Implicit dependencies.
"label_files": attr.label_list(
- allow_files=True),
+ allow_files = True,
+ ),
"label_file_strings": attr.string_list(),
"build_layer": attr.label(
- default=Label("//tools/build_defs/pkg:build_tar"),
- cfg="host",
- executable=True,
- allow_files=True),
+ default = Label("//tools/build_defs/pkg:build_tar"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
"create_image": attr.label(
- default=Label("//tools/build_defs/docker:create_image"),
- cfg="host",
- executable=True,
- allow_files=True),
+ default = Label("//tools/build_defs/docker:create_image"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
"rewrite_tool": attr.label(
- default=Label("//tools/build_defs/docker:rewrite_json"),
- cfg="host",
- executable=True,
- allow_files=True),
+ default = Label("//tools/build_defs/docker:rewrite_json"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
"create_image_config": attr.label(
- default=Label("//tools/build_defs/docker:create_image_config"),
- cfg="host",
- executable=True,
- allow_files=True)
+ default = Label("//tools/build_defs/docker:create_image_config"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
}.items() + _hash_tools.items() + _layer_tools.items()),
outputs = {
"out": "%{name}.tar",
"layer": "%{name}-layer.tar",
},
- executable = True)
-
+ executable = True,
+)
# This validates the two forms of value accepted by
# ENTRYPOINT and CMD, turning them into a canonical
@@ -387,15 +419,14 @@ docker_build_ = rule(
# ],
# NOTE: prefacing a command with 'exec' just ends up with the former
def _validate_command(name, argument):
- if type(argument) == "string":
- return ["/bin/sh", "-c", argument]
- elif type(argument) == "list":
- return argument
- elif argument:
- fail("The %s attribute must be a string or list, if specified." % name)
- else:
- return None
-
+ if type(argument) == "string":
+ return ["/bin/sh", "-c", argument]
+ elif type(argument) == "list":
+ return argument
+ elif argument:
+ fail("The %s attribute must be a string or list, if specified." % name)
+ else:
+ return None
# Produces a new docker image tarball compatible with 'docker load', which
# is a single additional layer atop 'base'. The goal is to have relatively
@@ -472,39 +503,39 @@ def _validate_command(name, argument):
# },
# )
def docker_build(**kwargs):
- """Package a docker image.
-
- This rule generates a sequence of genrules the last of which is named 'name',
- so the dependency graph works out properly. The output of this rule is a
- tarball compatible with 'docker save/load' with the structure:
- {layer-name}:
- layer.tar
- VERSION
- json
- {image-config-sha256}.json
- ...
- manifest.json
- repositories
- top # an implementation detail of our rules, not consumed by Docker.
- This rule appends a single new layer to the tarball of this form provided
- via the 'base' parameter.
-
- The images produced by this rule are always named 'bazel/tmp:latest' when
- loaded (an internal detail). The expectation is that the images produced
- by these rules will be uploaded using the 'docker_push' rule below.
-
- Args:
- **kwargs: See above.
- """
- if "cmd" in kwargs:
- kwargs["cmd"] = _validate_command("cmd", kwargs["cmd"])
- for reserved in ["label_files", "label_file_strings"]:
- if reserved in kwargs:
- fail("reserved for internal use by docker_build macro", attr=reserved)
- if "labels" in kwargs:
- files = sorted(depset([v[1:] for v in kwargs["labels"].values() if v[0] == "@"]))
- kwargs["label_files"] = files
- kwargs["label_file_strings"] = files
- if "entrypoint" in kwargs:
- kwargs["entrypoint"] = _validate_command("entrypoint", kwargs["entrypoint"])
- docker_build_(**kwargs)
+ """Package a docker image.
+
+ This rule generates a sequence of genrules the last of which is named 'name',
+ so the dependency graph works out properly. The output of this rule is a
+ tarball compatible with 'docker save/load' with the structure:
+ {layer-name}:
+ layer.tar
+ VERSION
+ json
+ {image-config-sha256}.json
+ ...
+ manifest.json
+ repositories
+ top # an implementation detail of our rules, not consumed by Docker.
+ This rule appends a single new layer to the tarball of this form provided
+ via the 'base' parameter.
+
+ The images produced by this rule are always named 'bazel/tmp:latest' when
+ loaded (an internal detail). The expectation is that the images produced
+ by these rules will be uploaded using the 'docker_push' rule below.
+
+ Args:
+ **kwargs: See above.
+ """
+ if "cmd" in kwargs:
+ kwargs["cmd"] = _validate_command("cmd", kwargs["cmd"])
+ for reserved in ["label_files", "label_file_strings"]:
+ if reserved in kwargs:
+ fail("reserved for internal use by docker_build macro", attr = reserved)
+ if "labels" in kwargs:
+ files = sorted(depset([v[1:] for v in kwargs["labels"].values() if v[0] == "@"]))
+ kwargs["label_files"] = files
+ kwargs["label_file_strings"] = files
+ if "entrypoint" in kwargs:
+ kwargs["entrypoint"] = _validate_command("entrypoint", kwargs["entrypoint"])
+ docker_build_(**kwargs)
diff --git a/tools/build_defs/docker/bundle.bzl b/tools/build_defs/docker/bundle.bzl
index 7aca21553b..f2ad4139eb 100644
--- a/tools/build_defs/docker/bundle.bzl
+++ b/tools/build_defs/docker/bundle.bzl
@@ -13,72 +13,77 @@
# limitations under the License.
"""Rule for bundling Docker images into a tarball."""
-load(":label.bzl", _string_to_label="string_to_label")
-load(":layers.bzl",
- _assemble_image="assemble",
- _get_layers="get_from_target",
- _incr_load="incremental_load",
- _layer_tools="tools")
+load(":label.bzl", _string_to_label = "string_to_label")
+load(
+ ":layers.bzl",
+ _assemble_image = "assemble",
+ _get_layers = "get_from_target",
+ _incr_load = "incremental_load",
+ _layer_tools = "tools",
+)
load(":list.bzl", "reverse")
-
def _docker_bundle_impl(ctx):
- """Implementation for the docker_bundle rule."""
-
- # Compute the set of layers from the image_targes.
- image_target_dict = _string_to_label(
- ctx.attr.image_targets, ctx.attr.image_target_strings)
-
- seen_names = []
- layers = []
- for image in ctx.attr.image_targets:
- # TODO(mattmoor): Add support for naked tarballs.
- for layer in _get_layers(ctx, image):
- if layer["name"].path in seen_names:
- continue
- seen_names.append(layer["name"].path)
- layers.append(layer)
-
- images = dict()
- for unresolved_tag in ctx.attr.images:
- # Allow users to put make variables into the tag name.
- tag = ctx.expand_make_variables("images", unresolved_tag, {})
-
- target = ctx.attr.images[unresolved_tag]
- target = image_target_dict[target]
- images[tag] = _get_layers(ctx, target)[0]
-
- _incr_load(ctx, layers, images, ctx.outputs.executable)
-
- _assemble_image(ctx, reverse(layers), {
- # Create a new dictionary with the same keyspace that
- # points to the name of the layer.
- k: images[k]["name"]
- for k in images
- }, ctx.outputs.out)
-
- runfiles = ctx.runfiles(
- files = ([l["name"] for l in layers] +
- [l["id"] for l in layers] +
- [l["layer"] for l in layers]))
-
- return struct(runfiles = runfiles,
- files = depset())
-
+ """Implementation for the docker_bundle rule."""
+
+ # Compute the set of layers from the image_targes.
+ image_target_dict = _string_to_label(
+ ctx.attr.image_targets,
+ ctx.attr.image_target_strings,
+ )
+
+ seen_names = []
+ layers = []
+ for image in ctx.attr.image_targets:
+ # TODO(mattmoor): Add support for naked tarballs.
+ for layer in _get_layers(ctx, image):
+ if layer["name"].path in seen_names:
+ continue
+ seen_names.append(layer["name"].path)
+ layers.append(layer)
+
+ images = dict()
+ for unresolved_tag in ctx.attr.images:
+ # Allow users to put make variables into the tag name.
+ tag = ctx.expand_make_variables("images", unresolved_tag, {})
+
+ target = ctx.attr.images[unresolved_tag]
+ target = image_target_dict[target]
+ images[tag] = _get_layers(ctx, target)[0]
+
+ _incr_load(ctx, layers, images, ctx.outputs.executable)
+
+ _assemble_image(ctx, reverse(layers), {
+ # Create a new dictionary with the same keyspace that
+ # points to the name of the layer.
+ k: images[k]["name"]
+ for k in images
+ }, ctx.outputs.out)
+
+ runfiles = ctx.runfiles(
+ files = ([l["name"] for l in layers] +
+ [l["id"] for l in layers] +
+ [l["layer"] for l in layers]),
+ )
+
+ return struct(
+ runfiles = runfiles,
+ files = depset(),
+ )
docker_bundle_ = rule(
implementation = _docker_bundle_impl,
attrs = dict({
"images": attr.string_dict(),
# Implicit dependencies.
- "image_targets": attr.label_list(allow_files=True),
+ "image_targets": attr.label_list(allow_files = True),
"image_target_strings": attr.string_list(),
}.items() + _layer_tools.items()),
outputs = {
"out": "%{name}.tar",
},
- executable = True)
-
+ executable = True,
+)
# Produces a new docker image tarball compatible with 'docker load', which
# contains the N listed 'images', each aliased with their key.
@@ -92,17 +97,17 @@ docker_bundle_ = rule(
# }
# )
def docker_bundle(**kwargs):
- """Package several docker images into a single tarball.
+ """Package several docker images into a single tarball.
- Args:
- **kwargs: See above.
- """
- for reserved in ["image_targets", "image_target_strings"]:
- if reserved in kwargs:
- fail("reserved for internal use by docker_bundle macro", attr=reserved)
+ Args:
+ **kwargs: See above.
+ """
+ for reserved in ["image_targets", "image_target_strings"]:
+ if reserved in kwargs:
+ fail("reserved for internal use by docker_bundle macro", attr = reserved)
- if "images" in kwargs:
- kwargs["image_targets"] = kwargs["images"].values()
- kwargs["image_target_strings"] = kwargs["images"].values()
+ if "images" in kwargs:
+ kwargs["image_targets"] = kwargs["images"].values()
+ kwargs["image_target_strings"] = kwargs["images"].values()
- docker_bundle_(**kwargs)
+ docker_bundle_(**kwargs)
diff --git a/tools/build_defs/docker/label.bzl b/tools/build_defs/docker/label.bzl
index 8297f4563f..5842384604 100644
--- a/tools/build_defs/docker/label.bzl
+++ b/tools/build_defs/docker/label.bzl
@@ -14,10 +14,10 @@
"""Rules for dealing with labels and their string form."""
def string_to_label(label_list, string_list):
- """Form a mapping from label strings to the resolved label."""
- label_string_dict = dict()
- for i in range(len(label_list)):
- string = string_list[i]
- label = label_list[i]
- label_string_dict[string] = label
- return label_string_dict
+ """Form a mapping from label strings to the resolved label."""
+ label_string_dict = dict()
+ for i in range(len(label_list)):
+ string = string_list[i]
+ label = label_list[i]
+ label_string_dict[string] = label
+ return label_string_dict
diff --git a/tools/build_defs/docker/layers.bzl b/tools/build_defs/docker/layers.bzl
index e011f025f3..7ca2409561 100644
--- a/tools/build_defs/docker/layers.bzl
+++ b/tools/build_defs/docker/layers.bzl
@@ -14,68 +14,72 @@
"""Tools for dealing with Docker Image layers."""
load(":list.bzl", "reverse")
-load(":path.bzl", _get_runfile_path="runfile")
+load(":path.bzl", _get_runfile_path = "runfile")
def get_from_target(unused_ctx, target):
- if hasattr(target, "docker_layers"):
- return target.docker_layers
- else:
- # TODO(mattmoor): Use containerregistry.client's FromTarball
- # to create an entry from a tarball base image.
- return []
-
+ if hasattr(target, "docker_layers"):
+ return target.docker_layers
+ else:
+ # TODO(mattmoor): Use containerregistry.client's FromTarball
+ # to create an entry from a tarball base image.
+ return []
def assemble(ctx, layers, tags_to_names, output):
- """Create the full image from the list of layers."""
- layers = [l["layer"] for l in layers]
- args = [
- "--output=" + output.path,
- ] + [
- "--tags=" + tag + "=@" + tags_to_names[tag].path
- for tag in tags_to_names
- ] + ["--layer=" + l.path for l in layers]
- inputs = layers + tags_to_names.values()
- ctx.action(
- executable = ctx.executable.join_layers,
- arguments = args,
- inputs = inputs,
- outputs = [output],
- mnemonic = "JoinLayers"
- )
-
+ """Create the full image from the list of layers."""
+ layers = [l["layer"] for l in layers]
+ args = [
+ "--output=" + output.path,
+ ] + [
+ "--tags=" + tag + "=@" + tags_to_names[tag].path
+ for tag in tags_to_names
+ ] + ["--layer=" + l.path for l in layers]
+ inputs = layers + tags_to_names.values()
+ ctx.action(
+ executable = ctx.executable.join_layers,
+ arguments = args,
+ inputs = inputs,
+ outputs = [output],
+ mnemonic = "JoinLayers",
+ )
def incremental_load(ctx, layers, images, output):
- """Generate the incremental load statement."""
- ctx.template_action(
- template = ctx.file.incremental_load_template,
- substitutions = {
- "%{load_statements}": "\n".join([
- "incr_load '%s' '%s' '%s'" % (_get_runfile_path(ctx, l["name"]),
- _get_runfile_path(ctx, l["id"]),
- _get_runfile_path(ctx, l["layer"]))
- # The last layer is the first in the list of layers.
- # We reverse to load the layer from the parent to the child.
- for l in reverse(layers)]),
- "%{tag_statements}": "\n".join([
- "tag_layer '%s' '%s' '%s'" % (
- img,
- _get_runfile_path(ctx, images[img]["name"]),
- _get_runfile_path(ctx, images[img]["id"]))
- for img in images
- ])
- },
- output = output,
- executable = True)
-
+ """Generate the incremental load statement."""
+ ctx.template_action(
+ template = ctx.file.incremental_load_template,
+ substitutions = {
+ "%{load_statements}": "\n".join([
+ "incr_load '%s' '%s' '%s'" % (
+ _get_runfile_path(ctx, l["name"]),
+ _get_runfile_path(ctx, l["id"]),
+ _get_runfile_path(ctx, l["layer"]),
+ )
+ # The last layer is the first in the list of layers.
+ # We reverse to load the layer from the parent to the child.
+ for l in reverse(layers)
+ ]),
+ "%{tag_statements}": "\n".join([
+ "tag_layer '%s' '%s' '%s'" % (
+ img,
+ _get_runfile_path(ctx, images[img]["name"]),
+ _get_runfile_path(ctx, images[img]["id"]),
+ )
+ for img in images
+ ]),
+ },
+ output = output,
+ executable = True,
+ )
tools = {
"incremental_load_template": attr.label(
- default=Label("//tools/build_defs/docker:incremental_load_template"),
- single_file=True,
- allow_files=True),
+ default = Label("//tools/build_defs/docker:incremental_load_template"),
+ single_file = True,
+ allow_files = True,
+ ),
"join_layers": attr.label(
- default=Label("//tools/build_defs/docker:join_layers"),
- cfg="host",
- executable=True,
- allow_files=True)
+ default = Label("//tools/build_defs/docker:join_layers"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
}
diff --git a/tools/build_defs/docker/list.bzl b/tools/build_defs/docker/list.bzl
index 21036a75d6..f10e117932 100644
--- a/tools/build_defs/docker/list.bzl
+++ b/tools/build_defs/docker/list.bzl
@@ -13,9 +13,8 @@
# limitations under the License.
"""Methods for manipulating lists."""
-
def reverse(lst):
- result = []
- for el in lst:
- result = [el] + result
- return result
+ result = []
+ for el in lst:
+ result = [el] + result
+ return result
diff --git a/tools/build_defs/docker/path.bzl b/tools/build_defs/docker/path.bzl
index 03cd4155d1..4e9f13aac2 100644
--- a/tools/build_defs/docker/path.bzl
+++ b/tools/build_defs/docker/path.bzl
@@ -13,57 +13,53 @@
# limitations under the License.
"""Rules for manipulating paths."""
-
def dirname(path):
- """Returns the directory's name."""
- last_sep = path.rfind("/")
- if last_sep == -1:
- return "" # The artifact is at the top level.
- return path[:last_sep]
-
+ """Returns the directory's name."""
+ last_sep = path.rfind("/")
+ if last_sep == -1:
+ return "" # The artifact is at the top level.
+ return path[:last_sep]
def join(directory, path):
- """Compute the relative data path prefix from the data_path attribute."""
- if not path:
- return directory
- if path[0] == "/":
- return path[1:]
- if directory == "/":
- return path
- return directory + "/" + path
-
+ """Compute the relative data path prefix from the data_path attribute."""
+ if not path:
+ return directory
+ if path[0] == "/":
+ return path[1:]
+ if directory == "/":
+ return path
+ return directory + "/" + path
def canonicalize(path):
- """Canonicalize the input path."""
- if not path:
- return path
- # Strip ./ from the beginning if specified.
- # There is no way to handle .// correctly (no function that would make
- # that possible and Skylark is not turing complete) so just consider it
- # as an absolute path. A path of / should preserve the entire
- # path up to the repository root.
- if path == "/":
- return path
- if len(path) >= 2 and path[0:2] == "./":
- path = path[2:]
- if not path or path == ".": # Relative to current package
- return ""
- elif path[0] == "/": # Absolute path
- return path
- else: # Relative to a sub-directory
- return path
+ """Canonicalize the input path."""
+ if not path:
+ return path
+ # Strip ./ from the beginning if specified.
+ # There is no way to handle .// correctly (no function that would make
+ # that possible and Skylark is not turing complete) so just consider it
+ # as an absolute path. A path of / should preserve the entire
+ # path up to the repository root.
+ if path == "/":
+ return path
+ if len(path) >= 2 and path[0:2] == "./":
+ path = path[2:]
+ if not path or path == ".": # Relative to current package
+ return ""
+ elif path[0] == "/": # Absolute path
+ return path
+ else: # Relative to a sub-directory
+ return path
def strip_prefix(path, prefix):
- """Returns the path with the specified prefix removed."""
- if path.startswith(prefix):
- return path[len(prefix):]
- return path
-
+ """Returns the path with the specified prefix removed."""
+ if path.startswith(prefix):
+ return path[len(prefix):]
+ return path
def runfile(ctx, f):
- """Return the runfiles relative path of f."""
- if ctx.workspace_name:
- return ctx.workspace_name + "/" + f.short_path
- else:
- return f.short_path
+ """Return the runfiles relative path of f."""
+ if ctx.workspace_name:
+ return ctx.workspace_name + "/" + f.short_path
+ else:
+ return f.short_path
diff --git a/tools/build_defs/docker/serialize.bzl b/tools/build_defs/docker/serialize.bzl
index 58fe163851..e684387f35 100644
--- a/tools/build_defs/docker/serialize.bzl
+++ b/tools/build_defs/docker/serialize.bzl
@@ -13,7 +13,6 @@
# limitations under the License.
"""Methods for serializing objects."""
-
def dict_to_associative_list(dict_value):
- """Serializes a dict to an associative list."""
- return ",".join(["%s=%s" % (k, dict_value[k]) for k in dict_value])
+ """Serializes a dict to an associative list."""
+ return ",".join(["%s=%s" % (k, dict_value[k]) for k in dict_value])
diff --git a/tools/build_defs/hash/hash.bzl b/tools/build_defs/hash/hash.bzl
index 62741540b2..3eb3cd6af1 100644
--- a/tools/build_defs/hash/hash.bzl
+++ b/tools/build_defs/hash/hash.bzl
@@ -14,21 +14,22 @@
"""Functions for producing the hash of an artifact."""
def sha256(ctx, artifact):
- """Create an action to compute the SHA-256 of an artifact."""
- out = ctx.new_file(artifact.basename + ".sha256")
- ctx.action(
- executable = ctx.executable.sha256,
- arguments = [artifact.path, out.path],
- inputs = [artifact],
- outputs = [out],
- mnemonic = "SHA256")
- return out
-
+ """Create an action to compute the SHA-256 of an artifact."""
+ out = ctx.new_file(artifact.basename + ".sha256")
+ ctx.action(
+ executable = ctx.executable.sha256,
+ arguments = [artifact.path, out.path],
+ inputs = [artifact],
+ outputs = [out],
+ mnemonic = "SHA256",
+ )
+ return out
tools = {
"sha256": attr.label(
- default=Label("//tools/build_defs/hash:sha256"),
- cfg="host",
- executable=True,
- allow_files=True)
+ default = Label("//tools/build_defs/hash:sha256"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
}
diff --git a/tools/build_defs/pkg/path.bzl b/tools/build_defs/pkg/path.bzl
index 0b544299ee..4fc9357802 100644
--- a/tools/build_defs/pkg/path.bzl
+++ b/tools/build_defs/pkg/path.bzl
@@ -14,43 +14,43 @@
"""Helper functions that don't depend on Skylark, so can be unit tested."""
def _short_path_dirname(path):
- """Returns the directory's name of the short path of an artifact."""
- sp = path.short_path
- last_pkg = sp.rfind("/")
- if last_pkg == -1:
- # Top-level BUILD file.
- return ""
- return sp[:last_pkg]
+ """Returns the directory's name of the short path of an artifact."""
+ sp = path.short_path
+ last_pkg = sp.rfind("/")
+ if last_pkg == -1:
+ # Top-level BUILD file.
+ return ""
+ return sp[:last_pkg]
def dest_path(f, strip_prefix):
- """Returns the short path of f, stripped of strip_prefix."""
- if strip_prefix == None:
- # If no strip_prefix was specified, use the package of the
- # given input as the strip_prefix.
- strip_prefix = _short_path_dirname(f)
- if not strip_prefix:
+ """Returns the short path of f, stripped of strip_prefix."""
+ if strip_prefix == None:
+ # If no strip_prefix was specified, use the package of the
+ # given input as the strip_prefix.
+ strip_prefix = _short_path_dirname(f)
+ if not strip_prefix:
+ return f.short_path
+ if f.short_path.startswith(strip_prefix):
+ return f.short_path[len(strip_prefix):]
return f.short_path
- if f.short_path.startswith(strip_prefix):
- return f.short_path[len(strip_prefix):]
- return f.short_path
def compute_data_path(out, data_path):
- """Compute the relative data path prefix from the data_path attribute."""
- if data_path:
- # Strip ./ from the beginning if specified.
- # There is no way to handle .// correctly (no function that would make
- # that possible and Skylark is not turing complete) so just consider it
- # as an absolute path.
- if len(data_path) >= 2 and data_path[0:2] == "./":
- data_path = data_path[2:]
- if not data_path or data_path == ".": # Relative to current package
- return _short_path_dirname(out)
- elif data_path[0] == "/": # Absolute path
- return data_path[1:]
- else: # Relative to a sub-directory
- tmp_short_path_dirname = _short_path_dirname(out)
- if tmp_short_path_dirname:
- return tmp_short_path_dirname + "/" + data_path
- return data_path
- else:
- return None
+ """Compute the relative data path prefix from the data_path attribute."""
+ if data_path:
+ # Strip ./ from the beginning if specified.
+ # There is no way to handle .// correctly (no function that would make
+ # that possible and Skylark is not turing complete) so just consider it
+ # as an absolute path.
+ if len(data_path) >= 2 and data_path[0:2] == "./":
+ data_path = data_path[2:]
+ if not data_path or data_path == ".": # Relative to current package
+ return _short_path_dirname(out)
+ elif data_path[0] == "/": # Absolute path
+ return data_path[1:]
+ else: # Relative to a sub-directory
+ tmp_short_path_dirname = _short_path_dirname(out)
+ if tmp_short_path_dirname:
+ return tmp_short_path_dirname + "/" + data_path
+ return data_path
+ else:
+ return None
diff --git a/tools/build_defs/pkg/pkg.bzl b/tools/build_defs/pkg/pkg.bzl
index 8cbe03ceec..f18a736d87 100644
--- a/tools/build_defs/pkg/pkg.bzl
+++ b/tools/build_defs/pkg/pkg.bzl
@@ -12,251 +12,263 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for manipulation of various packaging."""
-load(":path.bzl", "dest_path", "compute_data_path")
+
+load(":path.bzl", "compute_data_path", "dest_path")
# Filetype to restrict inputs
tar_filetype = [".tar", ".tar.gz", ".tgz", ".tar.xz", ".tar.bz2"]
deb_filetype = [".deb", ".udeb"]
def _pkg_tar_impl(ctx):
- """Implementation of the pkg_tar rule."""
- # Compute the relative path
- data_path = compute_data_path(ctx.outputs.out, ctx.attr.strip_prefix)
+ """Implementation of the pkg_tar rule."""
- build_tar = ctx.executable.build_tar
- args = [
- "--output=" + ctx.outputs.out.path,
- "--directory=" + ctx.attr.package_dir,
- "--mode=" + ctx.attr.mode,
- "--owner=" + ctx.attr.owner,
- "--owner_name=" + ctx.attr.ownername,
- ]
+ # Compute the relative path
+ data_path = compute_data_path(ctx.outputs.out, ctx.attr.strip_prefix)
- file_inputs = ctx.files.srcs[:]
+ build_tar = ctx.executable.build_tar
+ args = [
+ "--output=" + ctx.outputs.out.path,
+ "--directory=" + ctx.attr.package_dir,
+ "--mode=" + ctx.attr.mode,
+ "--owner=" + ctx.attr.owner,
+ "--owner_name=" + ctx.attr.ownername,
+ ]
- # Add runfiles if requested
- if ctx.attr.include_runfiles:
- for f in ctx.attr.srcs:
- if hasattr(f, "default_runfiles"):
- run_files = f.default_runfiles.files.to_list()
- file_inputs += run_files
+ file_inputs = ctx.files.srcs[:]
- args += ["--file=%s=%s" % (f.path, dest_path(f, data_path))
- for f in file_inputs]
- for target, f_dest_path in ctx.attr.files.items():
- target_files = target.files.to_list()
- if len(target_files) != 1:
- fail("Inputs to pkg_tar.files_map must describe exactly one file.")
- file_inputs += [target_files[0]]
- args += ["--file=%s=%s" % (target_files[0].path, f_dest_path)]
- if ctx.attr.modes:
- args += ["--modes=%s=%s" % (key, ctx.attr.modes[key]) for key in ctx.attr.modes]
- if ctx.attr.owners:
- args += ["--owners=%s=%s" % (key, ctx.attr.owners[key]) for key in ctx.attr.owners]
- if ctx.attr.ownernames:
- args += ["--owner_names=%s=%s" % (key, ctx.attr.ownernames[key])
- for key in ctx.attr.ownernames]
- if ctx.attr.empty_files:
- args += ["--empty_file=%s" % empty_file for empty_file in ctx.attr.empty_files]
- if ctx.attr.empty_dirs:
- args += ["--empty_dir=%s" % empty_dir for empty_dir in ctx.attr.empty_dirs]
- if ctx.attr.extension:
- dotPos = ctx.attr.extension.find('.')
- if dotPos > 0:
- dotPos += 1
- args += ["--compression=%s" % ctx.attr.extension[dotPos:]]
- args += ["--tar=" + f.path for f in ctx.files.deps]
- args += ["--link=%s:%s" % (k, ctx.attr.symlinks[k])
- for k in ctx.attr.symlinks]
- arg_file = ctx.actions.declare_file(ctx.label.name + ".args")
- ctx.actions.write(arg_file, "\n".join(args))
+ # Add runfiles if requested
+ if ctx.attr.include_runfiles:
+ for f in ctx.attr.srcs:
+ if hasattr(f, "default_runfiles"):
+ run_files = f.default_runfiles.files.to_list()
+ file_inputs += run_files
- ctx.actions.run_shell(
- command = "%s --flagfile=%s" % (build_tar.path, arg_file.path),
- inputs = file_inputs + ctx.files.deps + [arg_file, build_tar],
- outputs = [ctx.outputs.out],
- mnemonic="PackageTar",
- use_default_shell_env = True,
- )
+ args += [
+ "--file=%s=%s" % (f.path, dest_path(f, data_path))
+ for f in file_inputs
+ ]
+ for target, f_dest_path in ctx.attr.files.items():
+ target_files = target.files.to_list()
+ if len(target_files) != 1:
+ fail("Inputs to pkg_tar.files_map must describe exactly one file.")
+ file_inputs += [target_files[0]]
+ args += ["--file=%s=%s" % (target_files[0].path, f_dest_path)]
+ if ctx.attr.modes:
+ args += ["--modes=%s=%s" % (key, ctx.attr.modes[key]) for key in ctx.attr.modes]
+ if ctx.attr.owners:
+ args += ["--owners=%s=%s" % (key, ctx.attr.owners[key]) for key in ctx.attr.owners]
+ if ctx.attr.ownernames:
+ args += [
+ "--owner_names=%s=%s" % (key, ctx.attr.ownernames[key])
+ for key in ctx.attr.ownernames
+ ]
+ if ctx.attr.empty_files:
+ args += ["--empty_file=%s" % empty_file for empty_file in ctx.attr.empty_files]
+ if ctx.attr.empty_dirs:
+ args += ["--empty_dir=%s" % empty_dir for empty_dir in ctx.attr.empty_dirs]
+ if ctx.attr.extension:
+ dotPos = ctx.attr.extension.find(".")
+ if dotPos > 0:
+ dotPos += 1
+ args += ["--compression=%s" % ctx.attr.extension[dotPos:]]
+ args += ["--tar=" + f.path for f in ctx.files.deps]
+ args += [
+ "--link=%s:%s" % (k, ctx.attr.symlinks[k])
+ for k in ctx.attr.symlinks
+ ]
+ arg_file = ctx.actions.declare_file(ctx.label.name + ".args")
+ ctx.actions.write(arg_file, "\n".join(args))
+ ctx.actions.run_shell(
+ command = "%s --flagfile=%s" % (build_tar.path, arg_file.path),
+ inputs = file_inputs + ctx.files.deps + [arg_file, build_tar],
+ outputs = [ctx.outputs.out],
+ mnemonic = "PackageTar",
+ use_default_shell_env = True,
+ )
def _pkg_deb_impl(ctx):
- """The implementation for the pkg_deb rule."""
- files = [ctx.file.data]
- args = [
- "--output=" + ctx.outputs.deb.path,
- "--changes=" + ctx.outputs.changes.path,
- "--data=" + ctx.file.data.path,
- "--package=" + ctx.attr.package,
- "--architecture=" + ctx.attr.architecture,
- "--maintainer=" + ctx.attr.maintainer,
- ]
- if ctx.attr.preinst:
- args += ["--preinst=@" + ctx.file.preinst.path]
- files += [ctx.file.preinst]
- if ctx.attr.postinst:
- args += ["--postinst=@" + ctx.file.postinst.path]
- files += [ctx.file.postinst]
- if ctx.attr.prerm:
- args += ["--prerm=@" + ctx.file.prerm.path]
- files += [ctx.file.prerm]
- if ctx.attr.postrm:
- args += ["--postrm=@" + ctx.file.postrm.path]
- files += [ctx.file.postrm]
+ """The implementation for the pkg_deb rule."""
+ files = [ctx.file.data]
+ args = [
+ "--output=" + ctx.outputs.deb.path,
+ "--changes=" + ctx.outputs.changes.path,
+ "--data=" + ctx.file.data.path,
+ "--package=" + ctx.attr.package,
+ "--architecture=" + ctx.attr.architecture,
+ "--maintainer=" + ctx.attr.maintainer,
+ ]
+ if ctx.attr.preinst:
+ args += ["--preinst=@" + ctx.file.preinst.path]
+ files += [ctx.file.preinst]
+ if ctx.attr.postinst:
+ args += ["--postinst=@" + ctx.file.postinst.path]
+ files += [ctx.file.postinst]
+ if ctx.attr.prerm:
+ args += ["--prerm=@" + ctx.file.prerm.path]
+ files += [ctx.file.prerm]
+ if ctx.attr.postrm:
+ args += ["--postrm=@" + ctx.file.postrm.path]
+ files += [ctx.file.postrm]
- # Conffiles can be specified by a file or a string list
- if ctx.attr.conffiles_file:
- if ctx.attr.conffiles:
- fail("Both conffiles and conffiles_file attributes were specified")
- args += ["--conffile=@" + ctx.file.conffiles_file.path]
- files += [ctx.file.conffiles_file]
- elif ctx.attr.conffiles:
- args += ["--conffile=%s" % cf for cf in ctx.attr.conffiles]
+ # Conffiles can be specified by a file or a string list
+ if ctx.attr.conffiles_file:
+ if ctx.attr.conffiles:
+ fail("Both conffiles and conffiles_file attributes were specified")
+ args += ["--conffile=@" + ctx.file.conffiles_file.path]
+ files += [ctx.file.conffiles_file]
+ elif ctx.attr.conffiles:
+ args += ["--conffile=%s" % cf for cf in ctx.attr.conffiles]
- # Version and description can be specified by a file or inlined
- if ctx.attr.version_file:
- if ctx.attr.version:
- fail("Both version and version_file attributes were specified")
- args += ["--version=@" + ctx.file.version_file.path]
- files += [ctx.file.version_file]
- elif ctx.attr.version:
- args += ["--version=" + ctx.attr.version]
- else:
- fail("Neither version_file nor version attribute was specified")
+ # Version and description can be specified by a file or inlined
+ if ctx.attr.version_file:
+ if ctx.attr.version:
+ fail("Both version and version_file attributes were specified")
+ args += ["--version=@" + ctx.file.version_file.path]
+ files += [ctx.file.version_file]
+ elif ctx.attr.version:
+ args += ["--version=" + ctx.attr.version]
+ else:
+ fail("Neither version_file nor version attribute was specified")
- if ctx.attr.description_file:
- if ctx.attr.description:
- fail("Both description and description_file attributes were specified")
- args += ["--description=@" + ctx.file.description_file.path]
- files += [ctx.file.description_file]
- elif ctx.attr.description:
- args += ["--description=" + ctx.attr.description]
- else:
- fail("Neither description_file nor description attribute was specified")
+ if ctx.attr.description_file:
+ if ctx.attr.description:
+ fail("Both description and description_file attributes were specified")
+ args += ["--description=@" + ctx.file.description_file.path]
+ files += [ctx.file.description_file]
+ elif ctx.attr.description:
+ args += ["--description=" + ctx.attr.description]
+ else:
+ fail("Neither description_file nor description attribute was specified")
- # Built using can also be specified by a file or inlined (but is not mandatory)
- if ctx.attr.built_using_file:
- if ctx.attr.built_using:
- fail("Both build_using and built_using_file attributes were specified")
- args += ["--built_using=@" + ctx.file.built_using_file.path]
- files += [ctx.file.built_using_file]
- elif ctx.attr.built_using:
- args += ["--built_using=" + ctx.attr.built_using]
+ # Built using can also be specified by a file or inlined (but is not mandatory)
+ if ctx.attr.built_using_file:
+ if ctx.attr.built_using:
+ fail("Both build_using and built_using_file attributes were specified")
+ args += ["--built_using=@" + ctx.file.built_using_file.path]
+ files += [ctx.file.built_using_file]
+ elif ctx.attr.built_using:
+ args += ["--built_using=" + ctx.attr.built_using]
- if ctx.attr.priority:
- args += ["--priority=" + ctx.attr.priority]
- if ctx.attr.section:
- args += ["--section=" + ctx.attr.section]
- if ctx.attr.homepage:
- args += ["--homepage=" + ctx.attr.homepage]
+ if ctx.attr.priority:
+ args += ["--priority=" + ctx.attr.priority]
+ if ctx.attr.section:
+ args += ["--section=" + ctx.attr.section]
+ if ctx.attr.homepage:
+ args += ["--homepage=" + ctx.attr.homepage]
- args += ["--distribution=" + ctx.attr.distribution]
- args += ["--urgency=" + ctx.attr.urgency]
- args += ["--depends=" + d for d in ctx.attr.depends]
- args += ["--suggests=" + d for d in ctx.attr.suggests]
- args += ["--enhances=" + d for d in ctx.attr.enhances]
- args += ["--conflicts=" + d for d in ctx.attr.conflicts]
- args += ["--pre_depends=" + d for d in ctx.attr.predepends]
- args += ["--recommends=" + d for d in ctx.attr.recommends]
+ args += ["--distribution=" + ctx.attr.distribution]
+ args += ["--urgency=" + ctx.attr.urgency]
+ args += ["--depends=" + d for d in ctx.attr.depends]
+ args += ["--suggests=" + d for d in ctx.attr.suggests]
+ args += ["--enhances=" + d for d in ctx.attr.enhances]
+ args += ["--conflicts=" + d for d in ctx.attr.conflicts]
+ args += ["--pre_depends=" + d for d in ctx.attr.predepends]
+ args += ["--recommends=" + d for d in ctx.attr.recommends]
- ctx.action(
- executable = ctx.executable.make_deb,
- arguments = args,
- inputs = files,
- outputs = [ctx.outputs.deb, ctx.outputs.changes],
- mnemonic="MakeDeb"
- )
- ctx.action(
- command = "ln -s %s %s" % (ctx.outputs.deb.basename, ctx.outputs.out.path),
- inputs = [ctx.outputs.deb],
- outputs = [ctx.outputs.out])
+ ctx.action(
+ executable = ctx.executable.make_deb,
+ arguments = args,
+ inputs = files,
+ outputs = [ctx.outputs.deb, ctx.outputs.changes],
+ mnemonic = "MakeDeb",
+ )
+ ctx.action(
+ command = "ln -s %s %s" % (ctx.outputs.deb.basename, ctx.outputs.out.path),
+ inputs = [ctx.outputs.deb],
+ outputs = [ctx.outputs.out],
+ )
# A rule for creating a tar file, see README.md
_real_pkg_tar = rule(
implementation = _pkg_tar_impl,
attrs = {
"strip_prefix": attr.string(),
- "package_dir": attr.string(default="/"),
- "deps": attr.label_list(allow_files=tar_filetype),
- "srcs": attr.label_list(allow_files=True),
- "files": attr.label_keyed_string_dict(allow_files=True),
- "mode": attr.string(default="0555"),
+ "package_dir": attr.string(default = "/"),
+ "deps": attr.label_list(allow_files = tar_filetype),
+ "srcs": attr.label_list(allow_files = True),
+ "files": attr.label_keyed_string_dict(allow_files = True),
+ "mode": attr.string(default = "0555"),
"modes": attr.string_dict(),
- "owner": attr.string(default="0.0"),
- "ownername": attr.string(default="."),
+ "owner": attr.string(default = "0.0"),
+ "ownername": attr.string(default = "."),
"owners": attr.string_dict(),
"ownernames": attr.string_dict(),
- "extension": attr.string(default="tar"),
+ "extension": attr.string(default = "tar"),
"symlinks": attr.string_dict(),
"empty_files": attr.string_list(),
- "include_runfiles": attr.bool(default=False, mandatory=False),
+ "include_runfiles": attr.bool(default = False, mandatory = False),
"empty_dirs": attr.string_list(),
# Implicit dependencies.
"build_tar": attr.label(
- default=Label("//tools/build_defs/pkg:build_tar"),
- cfg="host",
- executable=True,
- allow_files=True)
+ default = Label("//tools/build_defs/pkg:build_tar"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
},
outputs = {
"out": "%{name}.%{extension}",
},
- executable = False)
+ executable = False,
+)
def pkg_tar(**kwargs):
- # Compatibility with older versions of pkg_tar that define files as
- # a flat list of labels.
- if "srcs" not in kwargs:
- if "files" in kwargs:
- if not hasattr(kwargs["files"], "items"):
- label = "%s//%s:%s" % (native.repository_name(), native.package_name(), kwargs["name"])
- print("%s: you provided a non dictionary to the pkg_tar `files` attribute. " % (label,) +
- "This attribute was renamed to `srcs`. " +
- "Consider renaming it in your BUILD file.")
- kwargs["srcs"] = kwargs.pop("files")
- _real_pkg_tar(**kwargs)
+ # Compatibility with older versions of pkg_tar that define files as
+ # a flat list of labels.
+ if "srcs" not in kwargs:
+ if "files" in kwargs:
+ if not hasattr(kwargs["files"], "items"):
+ label = "%s//%s:%s" % (native.repository_name(), native.package_name(), kwargs["name"])
+ print("%s: you provided a non dictionary to the pkg_tar `files` attribute. " % (label,) +
+ "This attribute was renamed to `srcs`. " +
+ "Consider renaming it in your BUILD file.")
+ kwargs["srcs"] = kwargs.pop("files")
+ _real_pkg_tar(**kwargs)
# A rule for creating a deb file, see README.md
pkg_deb = rule(
implementation = _pkg_deb_impl,
attrs = {
- "data": attr.label(mandatory=True, allow_files=tar_filetype, single_file=True),
- "package": attr.string(mandatory=True),
- "architecture": attr.string(default="all"),
- "distribution": attr.string(default="unstable"),
- "urgency": attr.string(default="medium"),
- "maintainer": attr.string(mandatory=True),
- "preinst": attr.label(allow_files=True, single_file=True),
- "postinst": attr.label(allow_files=True, single_file=True),
- "prerm": attr.label(allow_files=True, single_file=True),
- "postrm": attr.label(allow_files=True, single_file=True),
- "conffiles_file": attr.label(allow_files=True, single_file=True),
- "conffiles": attr.string_list(default=[]),
- "version_file": attr.label(allow_files=True, single_file=True),
+ "data": attr.label(mandatory = True, allow_files = tar_filetype, single_file = True),
+ "package": attr.string(mandatory = True),
+ "architecture": attr.string(default = "all"),
+ "distribution": attr.string(default = "unstable"),
+ "urgency": attr.string(default = "medium"),
+ "maintainer": attr.string(mandatory = True),
+ "preinst": attr.label(allow_files = True, single_file = True),
+ "postinst": attr.label(allow_files = True, single_file = True),
+ "prerm": attr.label(allow_files = True, single_file = True),
+ "postrm": attr.label(allow_files = True, single_file = True),
+ "conffiles_file": attr.label(allow_files = True, single_file = True),
+ "conffiles": attr.string_list(default = []),
+ "version_file": attr.label(allow_files = True, single_file = True),
"version": attr.string(),
- "description_file": attr.label(allow_files=True, single_file=True),
+ "description_file": attr.label(allow_files = True, single_file = True),
"description": attr.string(),
- "built_using_file": attr.label(allow_files=True, single_file=True),
+ "built_using_file": attr.label(allow_files = True, single_file = True),
"built_using": attr.string(),
"priority": attr.string(),
"section": attr.string(),
"homepage": attr.string(),
- "depends": attr.string_list(default=[]),
- "suggests": attr.string_list(default=[]),
- "enhances": attr.string_list(default=[]),
- "conflicts": attr.string_list(default=[]),
- "predepends": attr.string_list(default=[]),
- "recommends": attr.string_list(default=[]),
+ "depends": attr.string_list(default = []),
+ "suggests": attr.string_list(default = []),
+ "enhances": attr.string_list(default = []),
+ "conflicts": attr.string_list(default = []),
+ "predepends": attr.string_list(default = []),
+ "recommends": attr.string_list(default = []),
# Implicit dependencies.
"make_deb": attr.label(
- default=Label("//tools/build_defs/pkg:make_deb"),
- cfg="host",
- executable=True,
- allow_files=True)
+ default = Label("//tools/build_defs/pkg:make_deb"),
+ cfg = "host",
+ executable = True,
+ allow_files = True,
+ ),
},
outputs = {
"out": "%{name}.deb",
"deb": "%{package}_%{version}_%{architecture}.deb",
- "changes": "%{package}_%{version}_%{architecture}.changes"
+ "changes": "%{package}_%{version}_%{architecture}.changes",
},
- executable = False)
+ executable = False,
+)
diff --git a/tools/build_defs/pkg/rpm.bzl b/tools/build_defs/pkg/rpm.bzl
index f5d257fea7..f3e006429c 100644
--- a/tools/build_defs/pkg/rpm.bzl
+++ b/tools/build_defs/pkg/rpm.bzl
@@ -18,109 +18,114 @@ rpm_filetype = [".rpm"]
spec_filetype = [".spec"]
def _pkg_rpm_impl(ctx):
- """Implements to pkg_rpm rule."""
-
- files = []
- args = ["--name=" + ctx.label.name]
-
- # Version can be specified by a file or inlined.
- if ctx.attr.version_file:
- if ctx.attr.version:
- fail("Both version and version_file attributes were specified")
- args += ["--version=@" + ctx.file.version_file.path]
- files += [ctx.file.version_file]
- elif ctx.attr.version:
- args += ["--version=" + ctx.attr.version]
-
- # Release can be specified by a file or inlined.
- if ctx.attr.release_file:
- if ctx.attr.release:
- fail("Both release and release_file attributes were specified")
- args += ["--release=@" + ctx.file.release_file.path]
- files += [ctx.file.release_file]
- elif ctx.attr.release:
- args += ["--release=" + ctx.attr.release]
-
- if ctx.attr.architecture:
- args += ["--arch=" + ctx.attr.architecture]
-
- if not ctx.attr.spec_file:
- fail("spec_file was not specified")
-
- # Expand the spec file template.
- spec_file = ctx.actions.declare_file("%s.spec" % ctx.label.name)
- # Create the default substitutions based on the data files.
- substitutions = {}
- for data_file in ctx.files.data:
- key = "{%s}" % data_file.basename
- substitutions[key] = data_file.path
- ctx.actions.expand_template(
- template = ctx.file.spec_file,
- output = spec_file,
- substitutions = substitutions)
- args += ["--spec_file=" + spec_file.path]
- files += [spec_file]
-
- args += ["--out_file=" + ctx.outputs.rpm.path]
-
- # Add data files.
- if ctx.file.changelog:
- files += [ctx.file.changelog]
- args += [ctx.file.changelog.path]
- files += ctx.files.data
-
- for f in ctx.files.data:
- args += [f.path]
-
- if ctx.attr.debug:
- args += ["--debug"]
-
- # Call the generator script.
- # TODO(katre): Generate a source RPM.
- ctx.actions.run(
- executable = ctx.executable._make_rpm,
- use_default_shell_env = True,
- arguments = args,
- inputs = files,
- outputs = [ctx.outputs.rpm],
- mnemonic = "MakeRpm")
-
- # Link the RPM to the expected output name.
- ctx.actions.run(
- executable = "ln",
- arguments = [
- "-s",
- ctx.outputs.rpm.basename,
- ctx.outputs.out.path,
- ],
- inputs = [ctx.outputs.rpm],
- outputs = [ctx.outputs.out])
-
- # Link the RPM to the RPM-recommended output name.
- if "rpm_nvra" in dir(ctx.outputs):
+ """Implements to pkg_rpm rule."""
+
+ files = []
+ args = ["--name=" + ctx.label.name]
+
+ # Version can be specified by a file or inlined.
+ if ctx.attr.version_file:
+ if ctx.attr.version:
+ fail("Both version and version_file attributes were specified")
+ args += ["--version=@" + ctx.file.version_file.path]
+ files += [ctx.file.version_file]
+ elif ctx.attr.version:
+ args += ["--version=" + ctx.attr.version]
+
+ # Release can be specified by a file or inlined.
+ if ctx.attr.release_file:
+ if ctx.attr.release:
+ fail("Both release and release_file attributes were specified")
+ args += ["--release=@" + ctx.file.release_file.path]
+ files += [ctx.file.release_file]
+ elif ctx.attr.release:
+ args += ["--release=" + ctx.attr.release]
+
+ if ctx.attr.architecture:
+ args += ["--arch=" + ctx.attr.architecture]
+
+ if not ctx.attr.spec_file:
+ fail("spec_file was not specified")
+
+ # Expand the spec file template.
+ spec_file = ctx.actions.declare_file("%s.spec" % ctx.label.name)
+
+ # Create the default substitutions based on the data files.
+ substitutions = {}
+ for data_file in ctx.files.data:
+ key = "{%s}" % data_file.basename
+ substitutions[key] = data_file.path
+ ctx.actions.expand_template(
+ template = ctx.file.spec_file,
+ output = spec_file,
+ substitutions = substitutions,
+ )
+ args += ["--spec_file=" + spec_file.path]
+ files += [spec_file]
+
+ args += ["--out_file=" + ctx.outputs.rpm.path]
+
+ # Add data files.
+ if ctx.file.changelog:
+ files += [ctx.file.changelog]
+ args += [ctx.file.changelog.path]
+ files += ctx.files.data
+
+ for f in ctx.files.data:
+ args += [f.path]
+
+ if ctx.attr.debug:
+ args += ["--debug"]
+
+ # Call the generator script.
+ # TODO(katre): Generate a source RPM.
+ ctx.actions.run(
+ executable = ctx.executable._make_rpm,
+ use_default_shell_env = True,
+ arguments = args,
+ inputs = files,
+ outputs = [ctx.outputs.rpm],
+ mnemonic = "MakeRpm",
+ )
+
+ # Link the RPM to the expected output name.
ctx.actions.run(
executable = "ln",
arguments = [
- "-s",
- ctx.outputs.rpm.basename,
- ctx.outputs.rpm_nvra.path,
+ "-s",
+ ctx.outputs.rpm.basename,
+ ctx.outputs.out.path,
],
inputs = [ctx.outputs.rpm],
- outputs = [ctx.outputs.rpm_nvra])
+ outputs = [ctx.outputs.out],
+ )
+
+ # Link the RPM to the RPM-recommended output name.
+ if "rpm_nvra" in dir(ctx.outputs):
+ ctx.actions.run(
+ executable = "ln",
+ arguments = [
+ "-s",
+ ctx.outputs.rpm.basename,
+ ctx.outputs.rpm_nvra.path,
+ ],
+ inputs = [ctx.outputs.rpm],
+ outputs = [ctx.outputs.rpm_nvra],
+ )
def _pkg_rpm_outputs(version, release):
- outputs = {
- "out": "%{name}.rpm",
- "rpm": "%{name}-%{architecture}.rpm",
- }
+ outputs = {
+ "out": "%{name}.rpm",
+ "rpm": "%{name}-%{architecture}.rpm",
+ }
- # The "rpm_nvra" output follows the recommended package naming convention of
- # Name-Version-Release.Arch.rpm
- # See http://ftp.rpm.org/max-rpm/ch-rpm-file-format.html
- if version and release:
- outputs["rpm_nvra"] = "%{name}-%{version}-%{release}.%{architecture}.rpm"
+ # The "rpm_nvra" output follows the recommended package naming convention of
+ # Name-Version-Release.Arch.rpm
+ # See http://ftp.rpm.org/max-rpm/ch-rpm-file-format.html
+ if version and release:
+ outputs["rpm_nvra"] = "%{name}-%{version}-%{release}.%{architecture}.rpm"
- return outputs
+ return outputs
# Define the rule.
pkg_rpm = rule(
@@ -144,9 +149,9 @@ pkg_rpm = rule(
mandatory = True,
allow_files = True,
),
- "release_file": attr.label(allow_files=True, single_file=True),
+ "release_file": attr.label(allow_files = True, single_file = True),
"release": attr.string(),
- "debug": attr.bool(default=False),
+ "debug": attr.bool(default = False),
# Implicit dependencies.
"_make_rpm": attr.label(
diff --git a/tools/build_defs/repo/git.bzl b/tools/build_defs/repo/git.bzl
index 4972037d47..1ea5804d56 100644
--- a/tools/build_defs/repo/git.bzl
+++ b/tools/build_defs/repo/git.bzl
@@ -13,35 +13,35 @@
# limitations under the License.
"""Rules for cloning external git repositories."""
-load("@bazel_tools//tools/build_defs/repo:utils.bzl", "workspace_and_buildfile", "patch")
-
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "workspace_and_buildfile")
def _clone_or_update(ctx):
- if ((not ctx.attr.tag and not ctx.attr.commit) or
- (ctx.attr.tag and ctx.attr.commit)):
- fail('Exactly one of commit and tag must be provided')
- shallow = ''
- if ctx.attr.commit:
- ref = ctx.attr.commit
- else:
- ref = 'tags/' + ctx.attr.tag
- shallow = '--depth=1'
- directory=str(ctx.path('.'))
- if ctx.attr.strip_prefix:
- directory = directory + "-tmp"
- if ctx.attr.shallow_since:
- if ctx.attr.tag:
- fail('shallow_since not allowed if a tag is specified; --depth=1 will be used for tags')
- shallow='--shallow-since=%s' % ctx.attr.shallow_since
-
- if (ctx.attr.verbose):
- print('git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]' %
- (' (%s)' % shallow if shallow else '',
- ctx.name,
- ctx.attr.strip_prefix if ctx.attr.strip_prefix else 'None',
- ))
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- st = ctx.execute([bash_exe, '-c', """
+ if ((not ctx.attr.tag and not ctx.attr.commit) or
+ (ctx.attr.tag and ctx.attr.commit)):
+ fail("Exactly one of commit and tag must be provided")
+ shallow = ""
+ if ctx.attr.commit:
+ ref = ctx.attr.commit
+ else:
+ ref = "tags/" + ctx.attr.tag
+ shallow = "--depth=1"
+ directory = str(ctx.path("."))
+ if ctx.attr.strip_prefix:
+ directory = directory + "-tmp"
+ if ctx.attr.shallow_since:
+ if ctx.attr.tag:
+ fail("shallow_since not allowed if a tag is specified; --depth=1 will be used for tags")
+ shallow = "--shallow-since=%s" % ctx.attr.shallow_since
+
+ if (ctx.attr.verbose):
+ print("git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]" %
+ (
+ " (%s)" % shallow if shallow else "",
+ ctx.name,
+ ctx.attr.strip_prefix if ctx.attr.strip_prefix else "None",
+ ))
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ st = ctx.execute([bash_exe, "-c", """
set -ex
( cd {working_dir} &&
if ! ( cd '{dir_link}' && [[ "$(git rev-parse --git-dir)" == '.git' ]] ) >/dev/null 2>&1; then
@@ -52,70 +52,67 @@ set -ex
git reset --hard {ref} || ((git fetch {shallow} origin {ref}:{ref} || git fetch origin {ref}:{ref}) && git reset --hard {ref})
git clean -xdf )
""".format(
- working_dir=ctx.path('.').dirname,
- dir_link=ctx.path('.'),
- directory=directory,
- remote=ctx.attr.remote,
- ref=ref,
- shallow=shallow,
- )])
-
- if st.return_code:
- fail('error cloning %s:\n%s' % (ctx.name, st.stderr))
-
- if ctx.attr.strip_prefix:
- dest_link="{}/{}".format(directory, ctx.attr.strip_prefix)
- if not ctx.path(dest_link).exists:
- fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix))
-
- ctx.symlink(dest_link, ctx.path('.'))
- if ctx.attr.init_submodules:
- st = ctx.execute([bash_exe, '-c', """
+ working_dir = ctx.path(".").dirname,
+ dir_link = ctx.path("."),
+ directory = directory,
+ remote = ctx.attr.remote,
+ ref = ref,
+ shallow = shallow,
+ )])
+
+ if st.return_code:
+ fail("error cloning %s:\n%s" % (ctx.name, st.stderr))
+
+ if ctx.attr.strip_prefix:
+ dest_link = "{}/{}".format(directory, ctx.attr.strip_prefix)
+ if not ctx.path(dest_link).exists:
+ fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix))
+
+ ctx.symlink(dest_link, ctx.path("."))
+ if ctx.attr.init_submodules:
+ st = ctx.execute([bash_exe, "-c", """
set -ex
( cd '{directory}'
git submodule update --init --checkout --force )
""".format(
- directory=ctx.path('.'),
- )])
- if st.return_code:
- fail('error updating submodules %s:\n%s' % (ctx.name, st.stderr))
-
+ directory = ctx.path("."),
+ )])
+ if st.return_code:
+ fail("error updating submodules %s:\n%s" % (ctx.name, st.stderr))
def _new_git_repository_implementation(ctx):
- if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or
- (ctx.attr.build_file and ctx.attr.build_file_content)):
- fail('Exactly one of build_file and build_file_content must be provided.')
- _clone_or_update(ctx)
- workspace_and_buildfile(ctx)
- patch(ctx)
+ if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or
+ (ctx.attr.build_file and ctx.attr.build_file_content)):
+ fail("Exactly one of build_file and build_file_content must be provided.")
+ _clone_or_update(ctx)
+ workspace_and_buildfile(ctx)
+ patch(ctx)
def _git_repository_implementation(ctx):
- _clone_or_update(ctx)
- patch(ctx)
-
+ _clone_or_update(ctx)
+ patch(ctx)
_common_attrs = {
- 'remote': attr.string(mandatory=True),
- 'commit': attr.string(default=''),
- 'shallow_since': attr.string(default=''),
- 'tag': attr.string(default=''),
- 'init_submodules': attr.bool(default=False),
- 'verbose': attr.bool(default=False),
- 'strip_prefix': attr.string(default=''),
- 'patches': attr.label_list(default=[]),
- 'patch_tool': attr.string(default="patch"),
- 'patch_cmds': attr.string_list(default=[]),
+ "remote": attr.string(mandatory = True),
+ "commit": attr.string(default = ""),
+ "shallow_since": attr.string(default = ""),
+ "tag": attr.string(default = ""),
+ "init_submodules": attr.bool(default = False),
+ "verbose": attr.bool(default = False),
+ "strip_prefix": attr.string(default = ""),
+ "patches": attr.label_list(default = []),
+ "patch_tool": attr.string(default = "patch"),
+ "patch_cmds": attr.string_list(default = []),
}
-
new_git_repository = repository_rule(
implementation = _new_git_repository_implementation,
attrs = dict(_common_attrs.items() + {
- 'build_file': attr.label(allow_single_file=True),
- 'build_file_content': attr.string(),
- 'workspace_file': attr.label(),
- 'workspace_file_content': attr.string(),
- }.items())
+ "build_file": attr.label(allow_single_file = True),
+ "build_file_content": attr.string(),
+ "workspace_file": attr.label(),
+ "workspace_file_content": attr.string(),
+ }.items()),
)
"""Clone an external git repository.
@@ -168,8 +165,8 @@ Args:
"""
git_repository = repository_rule(
- implementation=_git_repository_implementation,
- attrs=_common_attrs,
+ implementation = _git_repository_implementation,
+ attrs = _common_attrs,
)
"""Clone an external git repository.
diff --git a/tools/build_defs/repo/git_repositories.bzl b/tools/build_defs/repo/git_repositories.bzl
index 1519fbc1db..14d2815e37 100644
--- a/tools/build_defs/repo/git_repositories.bzl
+++ b/tools/build_defs/repo/git_repositories.bzl
@@ -20,13 +20,13 @@ load(
)
def git_repository(**kwargs):
- print("The git_repository rule has been moved. Please load " +
- "@bazel_tools//tools/build_defs/repo:git.bzl instead. This redirect " +
- "will be removed in the future.")
- original_git_repository(**kwargs)
+ print("The git_repository rule has been moved. Please load " +
+ "@bazel_tools//tools/build_defs/repo:git.bzl instead. This redirect " +
+ "will be removed in the future.")
+ original_git_repository(**kwargs)
def new_git_repository(**kwargs):
- print("The new_git_repository rule has been moved. Please load " +
- "@bazel_tools//tools/build_defs/repo:git.bzl instead. This redirect " +
- "will be removed in the future.")
- original_new_git_repository(**kwargs)
+ print("The new_git_repository rule has been moved. Please load " +
+ "@bazel_tools//tools/build_defs/repo:git.bzl instead. This redirect " +
+ "will be removed in the future.")
+ original_new_git_repository(**kwargs)
diff --git a/tools/build_defs/repo/http.bzl b/tools/build_defs/repo/http.bzl
index 80ab58e8e6..02879f4b7e 100644
--- a/tools/build_defs/repo/http.bzl
+++ b/tools/build_defs/repo/http.bzl
@@ -30,31 +30,36 @@ These rules are improved versions of the native http rules and will eventually
replace the native rules.
"""
-load("@bazel_tools//tools/build_defs/repo:utils.bzl", "workspace_and_buildfile", "patch")
+load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "workspace_and_buildfile")
def _http_archive_impl(ctx):
- """Implementation of the http_archive rule."""
- if not ctx.attr.url and not ctx.attr.urls:
- ctx.fail("At least of of url and urls must be provided")
- if ctx.attr.build_file and ctx.attr.build_file_content:
- ctx.fail("Only one of build_file and build_file_content can be provided.")
-
- if ctx.attr.build_file:
- print("ctx.attr.build_file %s, path %s" %
- (str(ctx.attr.build_file), ctx.path(ctx.attr.build_file)))
- for patchfile in ctx.attr.patches:
- print("patch file %s, path %s" % (patchfile, ctx.path(patchfile)))
-
- all_urls = []
- if ctx.attr.urls:
- all_urls = ctx.attr.urls
- if ctx.attr.url:
- all_urls = [ctx.attr.url] + all_urls
-
- ctx.download_and_extract(all_urls, "", ctx.attr.sha256, ctx.attr.type,
- ctx.attr.strip_prefix)
- patch(ctx)
- workspace_and_buildfile(ctx)
+ """Implementation of the http_archive rule."""
+ if not ctx.attr.url and not ctx.attr.urls:
+ ctx.fail("At least of of url and urls must be provided")
+ if ctx.attr.build_file and ctx.attr.build_file_content:
+ ctx.fail("Only one of build_file and build_file_content can be provided.")
+
+ if ctx.attr.build_file:
+ print("ctx.attr.build_file %s, path %s" %
+ (str(ctx.attr.build_file), ctx.path(ctx.attr.build_file)))
+ for patchfile in ctx.attr.patches:
+ print("patch file %s, path %s" % (patchfile, ctx.path(patchfile)))
+
+ all_urls = []
+ if ctx.attr.urls:
+ all_urls = ctx.attr.urls
+ if ctx.attr.url:
+ all_urls = [ctx.attr.url] + all_urls
+
+ ctx.download_and_extract(
+ all_urls,
+ "",
+ ctx.attr.sha256,
+ ctx.attr.type,
+ ctx.attr.strip_prefix,
+ )
+ patch(ctx)
+ workspace_and_buildfile(ctx)
_HTTP_FILE_BUILD = """
package(default_visibility = ["//visibility:public"])
@@ -66,11 +71,15 @@ filegroup(
"""
def _http_file_impl(ctx):
- """Implementation of the http_file rule."""
- ctx.download(ctx.attr.urls, "file/downloaded", ctx.attr.sha256,
- ctx.attr.executable)
- ctx.file("WORKSPACE", "workspace(name = \"{name}\")".format(name=ctx.name))
- ctx.file("file/BUILD", _HTTP_FILE_BUILD)
+ """Implementation of the http_file rule."""
+ ctx.download(
+ ctx.attr.urls,
+ "file/downloaded",
+ ctx.attr.sha256,
+ ctx.attr.executable,
+ )
+ ctx.file("WORKSPACE", "workspace(name = \"{name}\")".format(name = ctx.name))
+ ctx.file("file/BUILD", _HTTP_FILE_BUILD)
_HTTP_JAR_BUILD = """
package(default_visibility = ["//visibility:public"])
@@ -90,16 +99,15 @@ filegroup(
"""
def _http_jar_impl(ctx):
- """Implementation of the http_jar rule."""
- all_urls = []
- if ctx.attr.urls:
- all_urls = ctx.attr.urls
- if ctx.attr.url:
- all_urls = [ctx.attr.url] + all_urls
- ctx.download(all_urls, "jar/downloaded.jar", ctx.attr.sha256)
- ctx.file("WORKSPACE", "workspace(name = \"{name}\")".format(name=ctx.name))
- ctx.file("jar/BUILD", _HTTP_JAR_BUILD)
-
+ """Implementation of the http_jar rule."""
+ all_urls = []
+ if ctx.attr.urls:
+ all_urls = ctx.attr.urls
+ if ctx.attr.url:
+ all_urls = [ctx.attr.url] + all_urls
+ ctx.download(all_urls, "jar/downloaded.jar", ctx.attr.sha256)
+ ctx.file("WORKSPACE", "workspace(name = \"{name}\")".format(name = ctx.name))
+ ctx.file("jar/BUILD", _HTTP_JAR_BUILD)
_http_archive_attrs = {
"url": attr.string(),
@@ -109,14 +117,13 @@ _http_archive_attrs = {
"type": attr.string(),
"build_file": attr.label(),
"build_file_content": attr.string(),
- "patches": attr.label_list(default=[]),
- "patch_tool": attr.string(default="patch"),
- "patch_cmds": attr.string_list(default=[]),
+ "patches": attr.label_list(default = []),
+ "patch_tool": attr.string(default = "patch"),
+ "patch_cmds": attr.string_list(default = []),
"workspace_file": attr.label(),
"workspace_file_content": attr.string(),
}
-
http_archive = repository_rule(
implementation = _http_archive_impl,
attrs = _http_archive_attrs,
@@ -235,13 +242,12 @@ Args:
patch_cmds: sequence of commands to be applied after patches are applied.
"""
-
http_file = repository_rule(
implementation = _http_file_impl,
attrs = {
"executable": attr.bool(),
"sha256": attr.string(),
- "urls": attr.string_list(mandatory=True),
+ "urls": attr.string_list(mandatory = True),
},
)
"""Downloads a file from a URL and makes it available to be used as a file
diff --git a/tools/build_defs/repo/java.bzl b/tools/build_defs/repo/java.bzl
index 9803658204..1e40ba4a9d 100644
--- a/tools/build_defs/repo/java.bzl
+++ b/tools/build_defs/repo/java.bzl
@@ -172,8 +172,8 @@ reasonably expected to already be provided.
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_import_external")
def java_import_external(jar_sha256, **kwargs):
- jvm_import_external(
- rule_name = "java_import",
- jar_sha256 = jar_sha256,
- **kwargs
- )
+ jvm_import_external(
+ rule_name = "java_import",
+ jar_sha256 = jar_sha256,
+ **kwargs
+ )
diff --git a/tools/build_defs/repo/jvm.bzl b/tools/build_defs/repo/jvm.bzl
index f0d80ffe3e..b0acd2417f 100644
--- a/tools/build_defs/repo/jvm.bzl
+++ b/tools/build_defs/repo/jvm.bzl
@@ -44,124 +44,134 @@ _PASS_PROPS = (
)
def _jvm_import_external(repository_ctx):
- """Implementation of `java_import_external` rule."""
- if (repository_ctx.attr.generated_linkable_rule_name and
- not repository_ctx.attr.neverlink):
- fail("Only use generated_linkable_rule_name if neverlink is set")
- name = repository_ctx.attr.generated_rule_name or repository_ctx.name
- urls = repository_ctx.attr.jar_urls
- sha = repository_ctx.attr.jar_sha256
- path = repository_ctx.name + ".jar"
- for url in urls:
- if url.endswith(".jar"):
- path = url[url.rindex("/") + 1:]
- break
- srcurls = repository_ctx.attr.srcjar_urls
- srcsha = repository_ctx.attr.srcjar_sha256
- srcpath = repository_ctx.name + "-src.jar" if srcurls else ""
- for url in srcurls:
- if url.endswith(".jar"):
- srcpath = url[url.rindex("/") + 1:].replace("-sources.jar", "-src.jar")
- break
- lines = [_HEADER, ""]
- if repository_ctx.attr.rule_load:
- lines.append(repository_ctx.attr.rule_load)
+ """Implementation of `java_import_external` rule."""
+ if (repository_ctx.attr.generated_linkable_rule_name and
+ not repository_ctx.attr.neverlink):
+ fail("Only use generated_linkable_rule_name if neverlink is set")
+ name = repository_ctx.attr.generated_rule_name or repository_ctx.name
+ urls = repository_ctx.attr.jar_urls
+ sha = repository_ctx.attr.jar_sha256
+ path = repository_ctx.name + ".jar"
+ for url in urls:
+ if url.endswith(".jar"):
+ path = url[url.rindex("/") + 1:]
+ break
+ srcurls = repository_ctx.attr.srcjar_urls
+ srcsha = repository_ctx.attr.srcjar_sha256
+ srcpath = repository_ctx.name + "-src.jar" if srcurls else ""
+ for url in srcurls:
+ if url.endswith(".jar"):
+ srcpath = url[url.rindex("/") + 1:].replace("-sources.jar", "-src.jar")
+ break
+ lines = [_HEADER, ""]
+ if repository_ctx.attr.rule_load:
+ lines.append(repository_ctx.attr.rule_load)
+ lines.append("")
+ if repository_ctx.attr.default_visibility:
+ lines.append("package(default_visibility = %s)" % (
+ repository_ctx.attr.default_visibility
+ ))
+ lines.append("")
+ lines.append("licenses(%s)" % repr(repository_ctx.attr.licenses))
lines.append("")
- if repository_ctx.attr.default_visibility:
- lines.append("package(default_visibility = %s)" % (
- repository_ctx.attr.default_visibility))
- lines.append("")
- lines.append("licenses(%s)" % repr(repository_ctx.attr.licenses))
- lines.append("")
- lines.extend(_serialize_given_rule_import(
- repository_ctx.attr.rule_name, name, path, srcpath, repository_ctx.attr, _PASS_PROPS, repository_ctx.attr.additional_rule_attrs))
- if (repository_ctx.attr.neverlink and
- repository_ctx.attr.generated_linkable_rule_name):
lines.extend(_serialize_given_rule_import(
repository_ctx.attr.rule_name,
- repository_ctx.attr.generated_linkable_rule_name,
+ name,
path,
srcpath,
repository_ctx.attr,
- [p for p in _PASS_PROPS if p != "neverlink"],
- repository_ctx.attr.additional_rule_attrs))
- extra = repository_ctx.attr.extra_build_file_content
- if extra:
- lines.append(extra)
- if not extra.endswith("\n"):
- lines.append("")
- repository_ctx.download(urls, path, sha)
- if srcurls:
- repository_ctx.download(srcurls, srcpath, srcsha)
- repository_ctx.file("BUILD", "\n".join(lines))
- repository_ctx.file("jar/BUILD", "\n".join([
- _HEADER,
- "",
- "package(default_visibility = %r)" % (
- repository_ctx.attr.visibility or
- repository_ctx.attr.default_visibility),
- "",
- "alias(",
- " name = \"jar\",",
- " actual = \"@%s\"," % repository_ctx.name,
- ")",
- "",
- ]))
+ _PASS_PROPS,
+ repository_ctx.attr.additional_rule_attrs,
+ ))
+ if (repository_ctx.attr.neverlink and
+ repository_ctx.attr.generated_linkable_rule_name):
+ lines.extend(_serialize_given_rule_import(
+ repository_ctx.attr.rule_name,
+ repository_ctx.attr.generated_linkable_rule_name,
+ path,
+ srcpath,
+ repository_ctx.attr,
+ [p for p in _PASS_PROPS if p != "neverlink"],
+ repository_ctx.attr.additional_rule_attrs,
+ ))
+ extra = repository_ctx.attr.extra_build_file_content
+ if extra:
+ lines.append(extra)
+ if not extra.endswith("\n"):
+ lines.append("")
+ repository_ctx.download(urls, path, sha)
+ if srcurls:
+ repository_ctx.download(srcurls, srcpath, srcsha)
+ repository_ctx.file("BUILD", "\n".join(lines))
+ repository_ctx.file("jar/BUILD", "\n".join([
+ _HEADER,
+ "",
+ "package(default_visibility = %r)" % (
+ repository_ctx.attr.visibility or
+ repository_ctx.attr.default_visibility
+ ),
+ "",
+ "alias(",
+ " name = \"jar\",",
+ " actual = \"@%s\"," % repository_ctx.name,
+ ")",
+ "",
+ ]))
def _convert_to_url(artifact, server_urls):
parts = artifact.split(":")
- group_id_part = parts[0].replace(".","/")
+ group_id_part = parts[0].replace(".", "/")
artifact_id = parts[1]
version = parts[2]
packaging = "jar"
classifier_part = ""
if len(parts) == 4:
- packaging = parts[2]
- version = parts[3]
+ packaging = parts[2]
+ version = parts[3]
elif len(parts) == 5:
- packaging = parts[2]
- classifier_part = "-"+parts[3]
- version = parts[4]
+ packaging = parts[2]
+ classifier_part = "-" + parts[3]
+ version = parts[4]
final_name = artifact_id + "-" + version + classifier_part + "." + packaging
- url_suffix = group_id_part+"/"+artifact_id + "/" + version + "/" + final_name
+ url_suffix = group_id_part + "/" + artifact_id + "/" + version + "/" + final_name
urls = []
for server_url in server_urls:
- urls.append(_concat_with_needed_slash(server_url, url_suffix))
+ urls.append(_concat_with_needed_slash(server_url, url_suffix))
return urls
def _concat_with_needed_slash(server_url, url_suffix):
- if server_url.endswith("/"):
- return server_url + url_suffix
- else:
- return server_url + "/" + url_suffix
+ if server_url.endswith("/"):
+ return server_url + url_suffix
+ else:
+ return server_url + "/" + url_suffix
def _serialize_given_rule_import(rule_name, name, path, srcpath, attrs, props, additional_rule_attrs):
- lines = [
- "%s(" % rule_name,
- " name = %s," % repr(name),
- " jars = [%s]," % repr(path),
- ]
- if srcpath:
- lines.append(" srcjar = %s," % repr(srcpath))
- for prop in props:
- value = getattr(attrs, prop, None)
- if value:
- if prop.endswith("_"):
- prop = prop[:-1]
- lines.append(" %s = %s," % (prop, repr(value)))
- for attr_key in additional_rule_attrs:
- lines.append(" %s = %s," % (attr_key, additional_rule_attrs[attr_key]))
- lines.append(")")
- lines.append("")
- return lines
+ lines = [
+ "%s(" % rule_name,
+ " name = %s," % repr(name),
+ " jars = [%s]," % repr(path),
+ ]
+ if srcpath:
+ lines.append(" srcjar = %s," % repr(srcpath))
+ for prop in props:
+ value = getattr(attrs, prop, None)
+ if value:
+ if prop.endswith("_"):
+ prop = prop[:-1]
+ lines.append(" %s = %s," % (prop, repr(value)))
+ for attr_key in additional_rule_attrs:
+ lines.append(" %s = %s," % (attr_key, additional_rule_attrs[attr_key]))
+ lines.append(")")
+ lines.append("")
+ return lines
jvm_import_external = repository_rule(
- implementation=_jvm_import_external,
- attrs={
- "rule_name": attr.string(mandatory=True),
- "licenses": attr.string_list(mandatory=True, allow_empty=False),
- "jar_urls": attr.string_list(mandatory=True, allow_empty=False),
+ implementation = _jvm_import_external,
+ attrs = {
+ "rule_name": attr.string(mandatory = True),
+ "licenses": attr.string_list(mandatory = True, allow_empty = False),
+ "jar_urls": attr.string_list(mandatory = True, allow_empty = False),
"jar_sha256": attr.string(),
"rule_load": attr.string(),
"additional_rule_attrs": attr.string_dict(),
@@ -174,12 +184,13 @@ jvm_import_external = repository_rule(
"neverlink": attr.bool(),
"generated_rule_name": attr.string(),
"generated_linkable_rule_name": attr.string(),
- "default_visibility": attr.string_list(default=["//visibility:public"]),
+ "default_visibility": attr.string_list(default = ["//visibility:public"]),
"extra_build_file_content": attr.string(),
- })
+ },
+)
def jvm_maven_import_external(artifact, server_urls, **kwargs):
- jvm_import_external(
- jar_urls = _convert_to_url(artifact, server_urls),
- **kwargs
- ) \ No newline at end of file
+ jvm_import_external(
+ jar_urls = _convert_to_url(artifact, server_urls),
+ **kwargs
+ )
diff --git a/tools/build_defs/repo/maven_rules.bzl b/tools/build_defs/repo/maven_rules.bzl
index 953532dd98..8e18b9ddd2 100644
--- a/tools/build_defs/repo/maven_rules.bzl
+++ b/tools/build_defs/repo/maven_rules.bzl
@@ -44,101 +44,96 @@ DEPS = ["mvn", "openssl", "awk"]
MVN_PLUGIN = "org.apache.maven.plugins:maven-dependency-plugin:2.10"
-
def _execute(ctx, command):
- return ctx.execute(["bash", "-c", """
+ return ctx.execute(["bash", "-c", """
set -ex
%s""" % command])
-
# Fail fast
def _check_dependencies(ctx):
- for dep in DEPS:
- if ctx.which(dep) == None:
- fail("%s requires %s as a dependency. Please check your PATH." % (ctx.name, dep))
-
+ for dep in DEPS:
+ if ctx.which(dep) == None:
+ fail("%s requires %s as a dependency. Please check your PATH." % (ctx.name, dep))
def _validate_attr(ctx):
- if hasattr(ctx.attr, "server") and (ctx.attr.server != None):
- fail("%s specifies a 'server' attribute which is currently not supported." % ctx.name)
-
+ if hasattr(ctx.attr, "server") and (ctx.attr.server != None):
+ fail("%s specifies a 'server' attribute which is currently not supported." % ctx.name)
def _artifact_dir(coordinates):
- return "/".join(coordinates.group_id.split(".") +
- [coordinates.artifact_id, coordinates.version])
-
+ return "/".join(coordinates.group_id.split(".") +
+ [coordinates.artifact_id, coordinates.version])
# Creates a struct containing the different parts of an artifact's FQN.
# If the fully_qualified_name does not specify a packaging and the rule does
# not set a default packaging then JAR is assumed.
-def _create_coordinates(fully_qualified_name, packaging="jar"):
- parts = fully_qualified_name.split(":")
- classifier = None
-
- if len(parts) == 3:
- group_id, artifact_id, version = parts
- # Updates the FQN with the default packaging so that the Maven plugin
- # downloads the correct artifact.
- fully_qualified_name = "%s:%s" % (fully_qualified_name, packaging)
- elif len(parts) == 4:
- group_id, artifact_id, version, packaging = parts
- elif len(parts) == 5:
- group_id, artifact_id, version, packaging, classifier = parts
- else:
- fail("Invalid fully qualified name for artifact: %s" % fully_qualified_name)
-
- return struct(
- fully_qualified_name = fully_qualified_name,
- group_id = group_id,
- artifact_id = artifact_id,
- packaging = packaging,
- classifier = classifier,
- version = version,
- )
-
+def _create_coordinates(fully_qualified_name, packaging = "jar"):
+ parts = fully_qualified_name.split(":")
+ classifier = None
+
+ if len(parts) == 3:
+ group_id, artifact_id, version = parts
+
+ # Updates the FQN with the default packaging so that the Maven plugin
+ # downloads the correct artifact.
+ fully_qualified_name = "%s:%s" % (fully_qualified_name, packaging)
+ elif len(parts) == 4:
+ group_id, artifact_id, version, packaging = parts
+ elif len(parts) == 5:
+ group_id, artifact_id, version, packaging, classifier = parts
+ else:
+ fail("Invalid fully qualified name for artifact: %s" % fully_qualified_name)
+
+ return struct(
+ fully_qualified_name = fully_qualified_name,
+ group_id = group_id,
+ artifact_id = artifact_id,
+ packaging = packaging,
+ classifier = classifier,
+ version = version,
+ )
# NOTE: Please use this method to define ALL paths that the maven_*
# rules use. Doing otherwise will lead to inconsistencies and/or errors.
#
# CONVENTION: *_path refers to files, *_dir refers to directories.
def _create_paths(ctx, coordinates):
- """Creates a struct that contains the paths to create the cache WORKSPACE"""
-
- # e.g. guava-18.0.jar
- artifact_filename = "%s-%s" % (coordinates.artifact_id,
- coordinates.version)
- if coordinates.classifier:
- artifact_filename += "-" + coordinates.classifier
- artifact_filename += "." + coordinates.packaging
- sha1_filename = "%s.sha1" % artifact_filename
+ """Creates a struct that contains the paths to create the cache WORKSPACE"""
- # e.g. com/google/guava/guava/18.0
- relative_artifact_dir = _artifact_dir(coordinates)
-
- # The symlink to the actual artifact is stored in this dir, along with the
- # BUILD file. The dir has the same name as the packaging to support syntax
- # like @guava//jar and @google_play_services//aar.
- symlink_dir = coordinates.packaging
-
- m2 = ".m2"
- m2_repo = "/".join([m2, "repository"]) # .m2/repository
-
- return struct(
- artifact_filename = artifact_filename,
- sha1_filename = sha1_filename,
-
- symlink_dir = ctx.path(symlink_dir),
-
- # e.g. external/com_google_guava_guava/ \
- # .m2/repository/com/google/guava/guava/18.0/guava-18.0.jar
- artifact_path = ctx.path("/".join([m2_repo, relative_artifact_dir, artifact_filename])),
- artifact_dir = ctx.path("/".join([m2_repo, relative_artifact_dir])),
-
- sha1_path = ctx.path("/".join([m2_repo, relative_artifact_dir, sha1_filename])),
-
- # e.g. external/com_google_guava_guava/jar/guava-18.0.jar
- symlink_artifact_path = ctx.path("/".join([symlink_dir, artifact_filename])),
- )
+ # e.g. guava-18.0.jar
+ artifact_filename = "%s-%s" % (
+ coordinates.artifact_id,
+ coordinates.version,
+ )
+ if coordinates.classifier:
+ artifact_filename += "-" + coordinates.classifier
+ artifact_filename += "." + coordinates.packaging
+ sha1_filename = "%s.sha1" % artifact_filename
+
+ # e.g. com/google/guava/guava/18.0
+ relative_artifact_dir = _artifact_dir(coordinates)
+
+ # The symlink to the actual artifact is stored in this dir, along with the
+ # BUILD file. The dir has the same name as the packaging to support syntax
+ # like @guava//jar and @google_play_services//aar.
+ symlink_dir = coordinates.packaging
+
+ m2 = ".m2"
+ m2_repo = "/".join([m2, "repository"]) # .m2/repository
+
+ return struct(
+ artifact_filename = artifact_filename,
+ sha1_filename = sha1_filename,
+ symlink_dir = ctx.path(symlink_dir),
+
+ # e.g. external/com_google_guava_guava/ \
+ # .m2/repository/com/google/guava/guava/18.0/guava-18.0.jar
+ artifact_path = ctx.path("/".join([m2_repo, relative_artifact_dir, artifact_filename])),
+ artifact_dir = ctx.path("/".join([m2_repo, relative_artifact_dir])),
+ sha1_path = ctx.path("/".join([m2_repo, relative_artifact_dir, sha1_filename])),
+
+ # e.g. external/com_google_guava_guava/jar/guava-18.0.jar
+ symlink_artifact_path = ctx.path("/".join([symlink_dir, artifact_filename])),
+ )
_maven_jar_build_file_template = """
# DO NOT EDIT: automatically generated BUILD file for maven_jar rule {rule_name}
@@ -178,98 +173,95 @@ filegroup(
# Provides the syntax "@jar_name//jar" for dependencies
def _generate_build_file(ctx, template, paths):
- deps_string = "\n".join(["'%s'," % dep for dep in ctx.attr.deps])
- contents = template.format(
- rule_name = ctx.name,
- artifact_filename = paths.artifact_filename,
- deps_string = deps_string)
- ctx.file('%s/BUILD' % paths.symlink_dir, contents, False)
-
+ deps_string = "\n".join(["'%s'," % dep for dep in ctx.attr.deps])
+ contents = template.format(
+ rule_name = ctx.name,
+ artifact_filename = paths.artifact_filename,
+ deps_string = deps_string,
+ )
+ ctx.file("%s/BUILD" % paths.symlink_dir, contents, False)
def _file_exists(ctx, filename):
- return _execute(ctx, "[[ -f %s ]] && exit 0 || exit 1" % filename).return_code == 0
-
+ return _execute(ctx, "[[ -f %s ]] && exit 0 || exit 1" % filename).return_code == 0
# Constructs the maven command to retrieve the dependencies from remote
# repositories using the dependency plugin, and executes it.
def _mvn_download(ctx, paths, fully_qualified_name):
- # If a custom settings file exists, we'll use that. If not, Maven will use the default settings.
- mvn_flags = ""
- if hasattr(ctx.attr, "settings") and ctx.attr.settings != None:
- ctx.symlink(ctx.attr.settings, "settings.xml")
- mvn_flags += "-s %s " % "settings.xml"
-
- # dependency:get step. Downloads the artifact into the local repository.
- mvn_get = MVN_PLUGIN + ":get"
- mvn_artifact = "-Dartifact=%s" % fully_qualified_name
- mvn_transitive = "-Dtransitive=false"
- if hasattr(ctx.attr, "repository") and ctx.attr.repository != "":
- mvn_flags += "-Dmaven.repo.remote=%s " % ctx.attr.repository
- command = " ".join(["mvn", mvn_flags, mvn_get, mvn_transitive, mvn_artifact])
- exec_result = _execute(ctx, command)
- if exec_result.return_code != 0:
- fail("%s\n%s\nFailed to fetch Maven dependency" % (exec_result.stdout, exec_result.stderr))
-
- # dependency:copy step. Moves the artifact from the local repository into //external.
- mvn_copy = MVN_PLUGIN + ":copy"
- mvn_output_dir = "-DoutputDirectory=%s" % paths.artifact_dir
- command = " ".join(["mvn", mvn_flags, mvn_copy, mvn_artifact, mvn_output_dir])
- exec_result = _execute(ctx, command)
- if exec_result.return_code != 0:
- fail("%s\n%s\nFailed to fetch Maven dependency" % (exec_result.stdout, exec_result.stderr))
-
+ # If a custom settings file exists, we'll use that. If not, Maven will use the default settings.
+ mvn_flags = ""
+ if hasattr(ctx.attr, "settings") and ctx.attr.settings != None:
+ ctx.symlink(ctx.attr.settings, "settings.xml")
+ mvn_flags += "-s %s " % "settings.xml"
+
+ # dependency:get step. Downloads the artifact into the local repository.
+ mvn_get = MVN_PLUGIN + ":get"
+ mvn_artifact = "-Dartifact=%s" % fully_qualified_name
+ mvn_transitive = "-Dtransitive=false"
+ if hasattr(ctx.attr, "repository") and ctx.attr.repository != "":
+ mvn_flags += "-Dmaven.repo.remote=%s " % ctx.attr.repository
+ command = " ".join(["mvn", mvn_flags, mvn_get, mvn_transitive, mvn_artifact])
+ exec_result = _execute(ctx, command)
+ if exec_result.return_code != 0:
+ fail("%s\n%s\nFailed to fetch Maven dependency" % (exec_result.stdout, exec_result.stderr))
+
+ # dependency:copy step. Moves the artifact from the local repository into //external.
+ mvn_copy = MVN_PLUGIN + ":copy"
+ mvn_output_dir = "-DoutputDirectory=%s" % paths.artifact_dir
+ command = " ".join(["mvn", mvn_flags, mvn_copy, mvn_artifact, mvn_output_dir])
+ exec_result = _execute(ctx, command)
+ if exec_result.return_code != 0:
+ fail("%s\n%s\nFailed to fetch Maven dependency" % (exec_result.stdout, exec_result.stderr))
def _check_sha1(ctx, paths, sha1):
- actual_sha1 = _execute(ctx, "openssl sha1 %s | awk '{printf $2}'" % paths.artifact_path).stdout
-
- if sha1.lower() != actual_sha1.lower():
- fail(("{rule_name} has SHA-1 of {actual_sha1}, " +
- "does not match expected SHA-1 ({expected_sha1})").format(
- rule_name = ctx.name,
- expected_sha1 = sha1,
- actual_sha1 = actual_sha1))
- else:
- _execute(ctx, "echo %s %s > %s" % (sha1, paths.artifact_path, paths.sha1_path))
-
+ actual_sha1 = _execute(ctx, "openssl sha1 %s | awk '{printf $2}'" % paths.artifact_path).stdout
+
+ if sha1.lower() != actual_sha1.lower():
+ fail(("{rule_name} has SHA-1 of {actual_sha1}, " +
+ "does not match expected SHA-1 ({expected_sha1})").format(
+ rule_name = ctx.name,
+ expected_sha1 = sha1,
+ actual_sha1 = actual_sha1,
+ ))
+ else:
+ _execute(ctx, "echo %s %s > %s" % (sha1, paths.artifact_path, paths.sha1_path))
def _maven_artifact_impl(ctx, default_rule_packaging, build_file_template):
- # Ensure that we have all of the dependencies installed
- _check_dependencies(ctx)
-
- # Provide warnings and errors about attributes
- _validate_attr(ctx)
+ # Ensure that we have all of the dependencies installed
+ _check_dependencies(ctx)
- # Create a struct to contain the different parts of the artifact FQN
- coordinates = _create_coordinates(ctx.attr.artifact, default_rule_packaging)
+ # Provide warnings and errors about attributes
+ _validate_attr(ctx)
- # Create a struct to store the relative and absolute paths needed for this rule
- paths = _create_paths(ctx, coordinates)
+ # Create a struct to contain the different parts of the artifact FQN
+ coordinates = _create_coordinates(ctx.attr.artifact, default_rule_packaging)
- _generate_build_file(
- ctx = ctx,
- template = build_file_template,
- paths = paths,
- )
+ # Create a struct to store the relative and absolute paths needed for this rule
+ paths = _create_paths(ctx, coordinates)
- if _execute(ctx, "mkdir -p %s" % paths.symlink_dir).return_code != 0:
- fail("%s: Failed to create dirs in execution root.\n" % ctx.name)
+ _generate_build_file(
+ ctx = ctx,
+ template = build_file_template,
+ paths = paths,
+ )
- # Download the artifact
- _mvn_download(
- ctx = ctx,
- paths = paths,
- fully_qualified_name = coordinates.fully_qualified_name
- )
+ if _execute(ctx, "mkdir -p %s" % paths.symlink_dir).return_code != 0:
+ fail("%s: Failed to create dirs in execution root.\n" % ctx.name)
- if (ctx.attr.sha1 != ""):
- _check_sha1(
+ # Download the artifact
+ _mvn_download(
ctx = ctx,
paths = paths,
- sha1 = ctx.attr.sha1,
+ fully_qualified_name = coordinates.fully_qualified_name,
)
- ctx.symlink(paths.artifact_path, paths.symlink_artifact_path)
+ if (ctx.attr.sha1 != ""):
+ _check_sha1(
+ ctx = ctx,
+ paths = paths,
+ sha1 = ctx.attr.sha1,
+ )
+ ctx.symlink(paths.artifact_path, paths.symlink_artifact_path)
_common_maven_rule_attrs = {
"artifact": attr.string(
@@ -285,11 +277,10 @@ _common_maven_rule_attrs = {
}
def _maven_jar_impl(ctx):
- _maven_artifact_impl(ctx, "jar", _maven_jar_build_file_template)
-
+ _maven_artifact_impl(ctx, "jar", _maven_jar_build_file_template)
def _maven_aar_impl(ctx):
- _maven_artifact_impl(ctx, "aar", _maven_aar_build_file_template)
+ _maven_artifact_impl(ctx, "aar", _maven_aar_build_file_template)
maven_jar = repository_rule(
implementation = _maven_jar_impl,
@@ -298,18 +289,17 @@ maven_jar = repository_rule(
"repository": attr.string(default = ""),
"server": attr.label(default = None),
}.items()),
- local=False,
+ local = False,
)
maven_aar = repository_rule(
- implementation=_maven_aar_impl,
- attrs=_common_maven_rule_attrs,
- local=False,
+ implementation = _maven_aar_impl,
+ attrs = _common_maven_rule_attrs,
+ local = False,
)
-
def _maven_dependency_plugin_impl(ctx):
- _BUILD_FILE = """
+ _BUILD_FILE = """
# DO NOT EDIT: automatically generated BUILD file for maven_dependency_plugin
filegroup(
@@ -318,9 +308,9 @@ filegroup(
visibility = ['//visibility:public']
)
"""
- ctx.file("BUILD", _BUILD_FILE, False)
+ ctx.file("BUILD", _BUILD_FILE, False)
- _SETTINGS_XML = """
+ _SETTINGS_XML = """
<!-- # DO NOT EDIT: automatically generated settings.xml for maven_dependency_plugin -->
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
@@ -336,27 +326,25 @@ filegroup(
</mirrors>
</settings>
""".format(
- localRepository = ctx.path("repository"),
- mirror = MAVEN_CENTRAL_URL,
- )
- settings_path = ctx.path("settings.xml")
- ctx.file("%s" % settings_path, _SETTINGS_XML, False)
-
- # Download the plugin with transitive dependencies
- mvn_flags = "-s %s" % settings_path
- mvn_get = MVN_PLUGIN + ":get"
- mvn_artifact = "-Dartifact=%s" % MVN_PLUGIN
- command = " ".join(["mvn", mvn_flags, mvn_get, mvn_artifact])
+ localRepository = ctx.path("repository"),
+ mirror = MAVEN_CENTRAL_URL,
+ )
+ settings_path = ctx.path("settings.xml")
+ ctx.file("%s" % settings_path, _SETTINGS_XML, False)
- exec_result = _execute(ctx, command)
- if exec_result.return_code != 0:
- fail("%s\nFailed to fetch Maven dependency" % exec_result.stderr)
+ # Download the plugin with transitive dependencies
+ mvn_flags = "-s %s" % settings_path
+ mvn_get = MVN_PLUGIN + ":get"
+ mvn_artifact = "-Dartifact=%s" % MVN_PLUGIN
+ command = " ".join(["mvn", mvn_flags, mvn_get, mvn_artifact])
+ exec_result = _execute(ctx, command)
+ if exec_result.return_code != 0:
+ fail("%s\nFailed to fetch Maven dependency" % exec_result.stderr)
_maven_dependency_plugin = repository_rule(
- implementation=_maven_dependency_plugin_impl,
+ implementation = _maven_dependency_plugin_impl,
)
-
def maven_dependency_plugin():
- _maven_dependency_plugin(name = "m2")
+ _maven_dependency_plugin(name = "m2")
diff --git a/tools/build_defs/repo/utils.bzl b/tools/build_defs/repo/utils.bzl
index d46b90eafa..f355f9a30e 100644
--- a/tools/build_defs/repo/utils.bzl
+++ b/tools/build_defs/repo/utils.bzl
@@ -27,55 +27,54 @@ load(
"""
def workspace_and_buildfile(ctx):
- """Utility function for writing WORKSPACE and, if requested, a BUILD file.
+ """Utility function for writing WORKSPACE and, if requested, a BUILD file.
- It assumes the paramters name, build_file, and build_file_contents to be
- present in ctx.attr, the latter two possibly with value None.
+ It assumes the paramters name, build_file, and build_file_contents to be
+ present in ctx.attr, the latter two possibly with value None.
- Args:
- ctx: The repository context of the repository rule calling this utility
- function.
- """
- if ctx.attr.build_file and ctx.attr.build_file_content:
- ctx.fail("Only one of build_file and build_file_content can be provided.")
+ Args:
+ ctx: The repository context of the repository rule calling this utility
+ function.
+ """
+ if ctx.attr.build_file and ctx.attr.build_file_content:
+ ctx.fail("Only one of build_file and build_file_content can be provided.")
- if ctx.attr.workspace_file and ctx.attr.workspace_file_content:
- ctx.fail("Only one of workspace_file and workspace_file_content can be provided.")
+ if ctx.attr.workspace_file and ctx.attr.workspace_file_content:
+ ctx.fail("Only one of workspace_file and workspace_file_content can be provided.")
- if ctx.attr.workspace_file:
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
- ctx.symlink(ctx.attr.workspace_file, "WORKSPACE")
- elif ctx.attr.workspace_file_content:
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
- ctx.file("WORKSPACE", ctx.attr.build_file_content)
- else:
- ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name=ctx.name))
+ if ctx.attr.workspace_file:
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
+ ctx.symlink(ctx.attr.workspace_file, "WORKSPACE")
+ elif ctx.attr.workspace_file_content:
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ ctx.execute([bash_exe, "-c", "rm -f WORKSPACE"])
+ ctx.file("WORKSPACE", ctx.attr.build_file_content)
+ else:
+ ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name = ctx.name))
- if ctx.attr.build_file:
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- ctx.execute([bash_exe, "-c", "rm -f BUILD BUILD.bazel"])
- ctx.symlink(ctx.attr.build_file, "BUILD")
- elif ctx.attr.build_file_content:
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- ctx.execute([bash_exe, "-c", "rm -f BUILD.bazel"])
- ctx.file("BUILD", ctx.attr.build_file_content)
+ if ctx.attr.build_file:
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ ctx.execute([bash_exe, "-c", "rm -f BUILD BUILD.bazel"])
+ ctx.symlink(ctx.attr.build_file, "BUILD")
+ elif ctx.attr.build_file_content:
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ ctx.execute([bash_exe, "-c", "rm -f BUILD.bazel"])
+ ctx.file("BUILD", ctx.attr.build_file_content)
def patch(ctx):
- """Implementation of patching an already extracted repository"""
- bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
- for patchfile in ctx.attr.patches:
- command = "{patchtool} -p0 < {patchfile}".format(
- patchtool=ctx.attr.patch_tool,
- patchfile=ctx.path(patchfile))
- st = ctx.execute([bash_exe, "-c", command])
- if st.return_code:
- fail("Error applying patch %s:\n%s" % (str(patchfile), st.stderr))
- for cmd in ctx.attr.patch_cmds:
- st = ctx.execute([bash_exe, "-c", cmd])
- if st.return_code:
- fail("Error applying patch command %s:\n%s%s"
- % (cmd, st.stdout, st.stderr))
-
-
+ """Implementation of patching an already extracted repository"""
+ bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
+ for patchfile in ctx.attr.patches:
+ command = "{patchtool} -p0 < {patchfile}".format(
+ patchtool = ctx.attr.patch_tool,
+ patchfile = ctx.path(patchfile),
+ )
+ st = ctx.execute([bash_exe, "-c", command])
+ if st.return_code:
+ fail("Error applying patch %s:\n%s" % (str(patchfile), st.stderr))
+ for cmd in ctx.attr.patch_cmds:
+ st = ctx.execute([bash_exe, "-c", cmd])
+ if st.return_code:
+ fail("Error applying patch command %s:\n%s%s" %
+ (cmd, st.stdout, st.stderr))
diff --git a/tools/build_rules/genproto.bzl b/tools/build_rules/genproto.bzl
index f935e52b68..f63ba005f2 100644
--- a/tools/build_rules/genproto.bzl
+++ b/tools/build_rules/genproto.bzl
@@ -18,25 +18,27 @@
proto_filetype = [".proto"]
def cc_grpc_library(name, src):
- basename = src[:-len(".proto")]
- protoc_label = str(Label("//third_party/protobuf:protoc"))
- cpp_plugin_label = str(Label("//third_party/grpc:cpp_plugin"))
- native.genrule(
- name = name + "_codegen",
- srcs = [src],
- tools = [protoc_label, cpp_plugin_label],
- cmd = "\\\n".join([
- "$(location " + protoc_label + ")",
- " --plugin=protoc-gen-grpc=$(location " + cpp_plugin_label + ")",
- " --cpp_out=$(GENDIR)",
- " --grpc_out=$(GENDIR)",
- " $(location " + src + ")"]),
- outs = [basename + ".grpc.pb.h", basename + ".grpc.pb.cc", basename + ".pb.cc", basename + ".pb.h"])
-
- native.cc_library(
- name = name,
- srcs = [basename + ".grpc.pb.cc", basename + ".pb.cc"],
- hdrs = [basename + ".grpc.pb.h", basename + ".pb.h"],
- deps = [str(Label("//third_party/grpc:grpc++_unsecure"))],
- includes = ["."])
+ basename = src[:-len(".proto")]
+ protoc_label = str(Label("//third_party/protobuf:protoc"))
+ cpp_plugin_label = str(Label("//third_party/grpc:cpp_plugin"))
+ native.genrule(
+ name = name + "_codegen",
+ srcs = [src],
+ tools = [protoc_label, cpp_plugin_label],
+ cmd = "\\\n".join([
+ "$(location " + protoc_label + ")",
+ " --plugin=protoc-gen-grpc=$(location " + cpp_plugin_label + ")",
+ " --cpp_out=$(GENDIR)",
+ " --grpc_out=$(GENDIR)",
+ " $(location " + src + ")",
+ ]),
+ outs = [basename + ".grpc.pb.h", basename + ".grpc.pb.cc", basename + ".pb.cc", basename + ".pb.h"],
+ )
+ native.cc_library(
+ name = name,
+ srcs = [basename + ".grpc.pb.cc", basename + ".pb.cc"],
+ hdrs = [basename + ".grpc.pb.h", basename + ".pb.h"],
+ deps = [str(Label("//third_party/grpc:grpc++_unsecure"))],
+ includes = ["."],
+ )
diff --git a/tools/build_rules/java_rules_skylark.bzl b/tools/build_rules/java_rules_skylark.bzl
index ec103830d2..50a657728e 100644
--- a/tools/build_rules/java_rules_skylark.bzl
+++ b/tools/build_rules/java_rules_skylark.bzl
@@ -20,177 +20,183 @@ srcjar_filetype = FileType([".jar", ".srcjar"])
# production ready.
def java_library_impl(ctx):
- javac_options = ctx.fragments.java.default_javac_flags
- class_jar = ctx.outputs.class_jar
- compile_time_jars = depset(order="topological")
- runtime_jars = depset(order="topological")
- for dep in ctx.attr.deps:
- compile_time_jars += dep.compile_time_jars
- runtime_jars += dep.runtime_jars
-
- jars = jar_filetype.filter(ctx.files.jars)
- neverlink_jars = jar_filetype.filter(ctx.files.neverlink_jars)
- compile_time_jars += jars + neverlink_jars
- runtime_jars += jars
- compile_time_jars_list = list(compile_time_jars) # TODO: This is weird.
-
- build_output = class_jar.path + ".build_output"
- java_output = class_jar.path + ".build_java"
- javalist_output = class_jar.path + ".build_java_list"
- sources = ctx.files.srcs
-
- sources_param_file = ctx.new_file(ctx.bin_dir, class_jar, "-2.params")
- ctx.file_action(
- output = sources_param_file,
- content = cmd_helper.join_paths("\n", depset(sources)),
- executable = False)
-
- # Cleaning build output directory
- cmd = "set -e;rm -rf " + build_output + " " + java_output + " " + javalist_output + "\n"
- cmd += "mkdir " + build_output + " " + java_output + "\n"
- files = " @" + sources_param_file.path
-
- if ctx.files.srcjars:
- files += " @" + javalist_output
- for file in ctx.files.srcjars:
- cmd += "%s tf %s | grep '\.java$' | sed 's|^|%s/|' >> %s\n" % (ctx.file._jar.path, file.path, java_output, javalist_output)
- cmd += "unzip %s -d %s >/dev/null\n" % (file.path, java_output)
-
- if ctx.files.srcs or ctx.files.srcjars:
- cmd += ctx.file._javac.path
- cmd += " " + " ".join(javac_options)
- if compile_time_jars:
- cmd += " -classpath '" + cmd_helper.join_paths(ctx.configuration.host_path_separator, compile_time_jars) + "'"
- cmd += " -d " + build_output + files + "\n"
-
- # We haven't got a good story for where these should end up, so
- # stick them in the root of the jar.
- for r in ctx.files.resources:
- cmd += "cp %s %s\n" % (r.path, build_output)
- cmd += (ctx.file._jar.path + " cf " + class_jar.path + " -C " + build_output + " .\n" +
- "touch " + build_output + "\n")
- ctx.action(
- inputs = (sources + compile_time_jars_list + [sources_param_file] +
- [ctx.file._jar] + ctx.files._jdk + ctx.files.resources + ctx.files.srcjars),
- outputs = [class_jar],
- mnemonic='JavacBootstrap',
- command=cmd,
- use_default_shell_env=True)
-
- runfiles = ctx.runfiles(collect_data = True)
-
- return struct(files = depset([class_jar]),
- compile_time_jars = compile_time_jars + [class_jar],
- runtime_jars = runtime_jars + [class_jar],
- runfiles = runfiles)
-
+ javac_options = ctx.fragments.java.default_javac_flags
+ class_jar = ctx.outputs.class_jar
+ compile_time_jars = depset(order = "topological")
+ runtime_jars = depset(order = "topological")
+ for dep in ctx.attr.deps:
+ compile_time_jars += dep.compile_time_jars
+ runtime_jars += dep.runtime_jars
+
+ jars = jar_filetype.filter(ctx.files.jars)
+ neverlink_jars = jar_filetype.filter(ctx.files.neverlink_jars)
+ compile_time_jars += jars + neverlink_jars
+ runtime_jars += jars
+ compile_time_jars_list = list(compile_time_jars) # TODO: This is weird.
+
+ build_output = class_jar.path + ".build_output"
+ java_output = class_jar.path + ".build_java"
+ javalist_output = class_jar.path + ".build_java_list"
+ sources = ctx.files.srcs
+
+ sources_param_file = ctx.new_file(ctx.bin_dir, class_jar, "-2.params")
+ ctx.file_action(
+ output = sources_param_file,
+ content = cmd_helper.join_paths("\n", depset(sources)),
+ executable = False,
+ )
+
+ # Cleaning build output directory
+ cmd = "set -e;rm -rf " + build_output + " " + java_output + " " + javalist_output + "\n"
+ cmd += "mkdir " + build_output + " " + java_output + "\n"
+ files = " @" + sources_param_file.path
+
+ if ctx.files.srcjars:
+ files += " @" + javalist_output
+ for file in ctx.files.srcjars:
+ cmd += "%s tf %s | grep '\.java$' | sed 's|^|%s/|' >> %s\n" % (ctx.file._jar.path, file.path, java_output, javalist_output)
+ cmd += "unzip %s -d %s >/dev/null\n" % (file.path, java_output)
+
+ if ctx.files.srcs or ctx.files.srcjars:
+ cmd += ctx.file._javac.path
+ cmd += " " + " ".join(javac_options)
+ if compile_time_jars:
+ cmd += " -classpath '" + cmd_helper.join_paths(ctx.configuration.host_path_separator, compile_time_jars) + "'"
+ cmd += " -d " + build_output + files + "\n"
+
+ # We haven't got a good story for where these should end up, so
+ # stick them in the root of the jar.
+ for r in ctx.files.resources:
+ cmd += "cp %s %s\n" % (r.path, build_output)
+ cmd += (ctx.file._jar.path + " cf " + class_jar.path + " -C " + build_output + " .\n" +
+ "touch " + build_output + "\n")
+ ctx.action(
+ inputs = (sources + compile_time_jars_list + [sources_param_file] +
+ [ctx.file._jar] + ctx.files._jdk + ctx.files.resources + ctx.files.srcjars),
+ outputs = [class_jar],
+ mnemonic = "JavacBootstrap",
+ command = cmd,
+ use_default_shell_env = True,
+ )
+
+ runfiles = ctx.runfiles(collect_data = True)
+
+ return struct(
+ files = depset([class_jar]),
+ compile_time_jars = compile_time_jars + [class_jar],
+ runtime_jars = runtime_jars + [class_jar],
+ runfiles = runfiles,
+ )
def java_binary_impl(ctx):
- library_result = java_library_impl(ctx)
-
- deploy_jar = ctx.outputs.deploy_jar
- manifest = ctx.outputs.manifest
- build_output = deploy_jar.path + ".build_output"
- main_class = ctx.attr.main_class
- ctx.file_action(
- output = manifest,
- content = "Main-Class: " + main_class + "\n",
- executable = False)
-
- # Cleaning build output directory
- cmd = "set -e;rm -rf " + build_output + ";mkdir " + build_output + "\n"
- for jar in library_result.runtime_jars:
- cmd += "unzip -qn " + jar.path + " -d " + build_output + "\n"
- cmd += (ctx.file._jar.path + " cmf " + manifest.path + " " +
- deploy_jar.path + " -C " + build_output + " .\n" +
- "touch " + build_output + "\n")
-
- ctx.action(
- inputs=list(library_result.runtime_jars) + [manifest] + ctx.files._jdk,
- outputs=[deploy_jar],
- mnemonic='Deployjar',
- command=cmd,
- use_default_shell_env=True)
-
- # Write the wrapper.
- executable = ctx.outputs.executable
- ctx.file_action(
- output = executable,
- content = '\n'.join([
- "#!/bin/bash",
- "# autogenerated - do not edit.",
- "case \"$0\" in",
- "/*) self=\"$0\" ;;",
- "*) self=\"$PWD/$0\";;",
- "esac",
- "",
- "if [[ -z \"$JAVA_RUNFILES\" ]]; then",
- " if [[ -e \"${self}.runfiles\" ]]; then",
- " export JAVA_RUNFILES=\"${self}.runfiles\"",
- " fi",
- " if [[ -n \"$JAVA_RUNFILES\" ]]; then",
- " export TEST_SRCDIR=${TEST_SRCDIR:-$JAVA_RUNFILES}",
- " fi",
- "fi",
- "",
-
- "jvm_bin=%s" % (ctx.file._java.path),
- "if [[ ! -x ${jvm_bin} ]]; then",
- " jvm_bin=$(which java)",
- "fi",
-
- # We extract the .so into a temp dir. If only we could mmap
- # directly from the zip file.
- "DEPLOY=$(dirname $self)/$(basename %s)" % deploy_jar.path,
-
- # This works both on Darwin and Linux, with the darwin path
- # looking like tmp.XXXXXXXX.{random}
- "SO_DIR=$(mktemp -d -t tmp.XXXXXXXXX)",
- "function cleanup() {",
- " rm -rf ${SO_DIR}",
- "}",
- "trap cleanup EXIT",
- "unzip -q -d ${SO_DIR} ${DEPLOY} \"*.so\" \"*.dll\" \"*.dylib\" >& /dev/null",
- ("${jvm_bin} -Djava.library.path=${SO_DIR} %s -jar $DEPLOY \"$@\""
- % ' '.join(ctx.attr.jvm_flags)) ,
- "",
+ library_result = java_library_impl(ctx)
+
+ deploy_jar = ctx.outputs.deploy_jar
+ manifest = ctx.outputs.manifest
+ build_output = deploy_jar.path + ".build_output"
+ main_class = ctx.attr.main_class
+ ctx.file_action(
+ output = manifest,
+ content = "Main-Class: " + main_class + "\n",
+ executable = False,
+ )
+
+ # Cleaning build output directory
+ cmd = "set -e;rm -rf " + build_output + ";mkdir " + build_output + "\n"
+ for jar in library_result.runtime_jars:
+ cmd += "unzip -qn " + jar.path + " -d " + build_output + "\n"
+ cmd += (ctx.file._jar.path + " cmf " + manifest.path + " " +
+ deploy_jar.path + " -C " + build_output + " .\n" +
+ "touch " + build_output + "\n")
+
+ ctx.action(
+ inputs = list(library_result.runtime_jars) + [manifest] + ctx.files._jdk,
+ outputs = [deploy_jar],
+ mnemonic = "Deployjar",
+ command = cmd,
+ use_default_shell_env = True,
+ )
+
+ # Write the wrapper.
+ executable = ctx.outputs.executable
+ ctx.file_action(
+ output = executable,
+ content = "\n".join([
+ "#!/bin/bash",
+ "# autogenerated - do not edit.",
+ "case \"$0\" in",
+ "/*) self=\"$0\" ;;",
+ "*) self=\"$PWD/$0\";;",
+ "esac",
+ "",
+ "if [[ -z \"$JAVA_RUNFILES\" ]]; then",
+ " if [[ -e \"${self}.runfiles\" ]]; then",
+ " export JAVA_RUNFILES=\"${self}.runfiles\"",
+ " fi",
+ " if [[ -n \"$JAVA_RUNFILES\" ]]; then",
+ " export TEST_SRCDIR=${TEST_SRCDIR:-$JAVA_RUNFILES}",
+ " fi",
+ "fi",
+ "",
+ "jvm_bin=%s" % (ctx.file._java.path),
+ "if [[ ! -x ${jvm_bin} ]]; then",
+ " jvm_bin=$(which java)",
+ "fi",
+
+ # We extract the .so into a temp dir. If only we could mmap
+ # directly from the zip file.
+ "DEPLOY=$(dirname $self)/$(basename %s)" % deploy_jar.path,
+
+ # This works both on Darwin and Linux, with the darwin path
+ # looking like tmp.XXXXXXXX.{random}
+ "SO_DIR=$(mktemp -d -t tmp.XXXXXXXXX)",
+ "function cleanup() {",
+ " rm -rf ${SO_DIR}",
+ "}",
+ "trap cleanup EXIT",
+ "unzip -q -d ${SO_DIR} ${DEPLOY} \"*.so\" \"*.dll\" \"*.dylib\" >& /dev/null",
+ ("${jvm_bin} -Djava.library.path=${SO_DIR} %s -jar $DEPLOY \"$@\"" %
+ " ".join(ctx.attr.jvm_flags)),
+ "",
]),
- executable = True)
-
- runfiles = ctx.runfiles(files = [deploy_jar, executable] + ctx.files._jdk, collect_data = True)
- files_to_build = depset([deploy_jar, manifest, executable])
- files_to_build += library_result.files
+ executable = True,
+ )
- return struct(files = files_to_build, runfiles = runfiles)
+ runfiles = ctx.runfiles(files = [deploy_jar, executable] + ctx.files._jdk, collect_data = True)
+ files_to_build = depset([deploy_jar, manifest, executable])
+ files_to_build += library_result.files
+ return struct(files = files_to_build, runfiles = runfiles)
def java_import_impl(ctx):
- # TODO(bazel-team): Why do we need to filter here? The attribute
- # already says only jars are allowed.
- jars = depset(jar_filetype.filter(ctx.files.jars))
- neverlink_jars = depset(jar_filetype.filter(ctx.files.neverlink_jars))
- runfiles = ctx.runfiles(collect_data = True)
- return struct(files = jars,
- compile_time_jars = jars + neverlink_jars,
- runtime_jars = jars,
- runfiles = runfiles)
-
+ # TODO(bazel-team): Why do we need to filter here? The attribute
+ # already says only jars are allowed.
+ jars = depset(jar_filetype.filter(ctx.files.jars))
+ neverlink_jars = depset(jar_filetype.filter(ctx.files.neverlink_jars))
+ runfiles = ctx.runfiles(collect_data = True)
+ return struct(
+ files = jars,
+ compile_time_jars = jars + neverlink_jars,
+ runtime_jars = jars,
+ runfiles = runfiles,
+ )
java_library_attrs = {
- "_java": attr.label(default=Label("//tools/jdk:java"), single_file=True),
- "_javac": attr.label(default=Label("//tools/jdk:javac"), single_file=True),
- "_jar": attr.label(default=Label("//tools/jdk:jar"), single_file=True),
- "_jdk": attr.label(default=Label("//tools/jdk:jdk"), allow_files=True),
- "data": attr.label_list(allow_files=True, cfg="data"),
- "resources": attr.label_list(allow_files=True),
- "srcs": attr.label_list(allow_files=java_filetype),
- "jars": attr.label_list(allow_files=jar_filetype),
- "neverlink_jars": attr.label_list(allow_files=jar_filetype),
- "srcjars": attr.label_list(allow_files=srcjar_filetype),
+ "_java": attr.label(default = Label("//tools/jdk:java"), single_file = True),
+ "_javac": attr.label(default = Label("//tools/jdk:javac"), single_file = True),
+ "_jar": attr.label(default = Label("//tools/jdk:jar"), single_file = True),
+ "_jdk": attr.label(default = Label("//tools/jdk:jdk"), allow_files = True),
+ "data": attr.label_list(allow_files = True, cfg = "data"),
+ "resources": attr.label_list(allow_files = True),
+ "srcs": attr.label_list(allow_files = java_filetype),
+ "jars": attr.label_list(allow_files = jar_filetype),
+ "neverlink_jars": attr.label_list(allow_files = jar_filetype),
+ "srcjars": attr.label_list(allow_files = srcjar_filetype),
"deps": attr.label_list(
- allow_files=False,
- providers = ["compile_time_jars", "runtime_jars"]),
- }
+ allow_files = False,
+ providers = ["compile_time_jars", "runtime_jars"],
+ ),
+}
java_library = rule(
java_library_impl,
@@ -198,7 +204,7 @@ java_library = rule(
outputs = {
"class_jar": "lib%{name}.jar",
},
- fragments = ['java', 'cpp'],
+ fragments = ["java", "cpp"],
)
# A copy to avoid conflict with native rule.
@@ -208,56 +214,60 @@ bootstrap_java_library = rule(
outputs = {
"class_jar": "lib%{name}.jar",
},
- fragments = ['java'],
+ fragments = ["java"],
)
java_binary_attrs_common = dict(java_library_attrs)
java_binary_attrs_common.update({
"jvm_flags": attr.string_list(),
- "jvm": attr.label(default=Label("//tools/jdk:jdk"), allow_files=True),
+ "jvm": attr.label(default = Label("//tools/jdk:jdk"), allow_files = True),
})
java_binary_attrs = dict(java_binary_attrs_common)
-java_binary_attrs["main_class"] = attr.string(mandatory=True)
+java_binary_attrs["main_class"] = attr.string(mandatory = True)
java_binary_outputs = {
"class_jar": "lib%{name}.jar",
"deploy_jar": "%{name}_deploy.jar",
- "manifest": "%{name}_MANIFEST.MF"
+ "manifest": "%{name}_MANIFEST.MF",
}
-java_binary = rule(java_binary_impl,
- executable = True,
- attrs = java_binary_attrs,
- outputs = java_binary_outputs,
- fragments = ['java', 'cpp'],
+java_binary = rule(
+ java_binary_impl,
+ executable = True,
+ attrs = java_binary_attrs,
+ outputs = java_binary_outputs,
+ fragments = ["java", "cpp"],
)
# A copy to avoid conflict with native rule
-bootstrap_java_binary = rule(java_binary_impl,
- executable = True,
- attrs = java_binary_attrs,
- outputs = java_binary_outputs,
- fragments = ['java'],
+bootstrap_java_binary = rule(
+ java_binary_impl,
+ executable = True,
+ attrs = java_binary_attrs,
+ outputs = java_binary_outputs,
+ fragments = ["java"],
)
-java_test = rule(java_binary_impl,
- executable = True,
- attrs = dict(java_binary_attrs_common.items() + [
- ("main_class", attr.string(default="org.junit.runner.JUnitCore")),
- # TODO(bazel-team): it would be better if we could offer a
- # test_class attribute, but the "args" attribute is hard
- # coded in the bazel infrastructure.
- ]),
- outputs = java_binary_outputs,
- test = True,
- fragments = ['java', 'cpp'],
+java_test = rule(
+ java_binary_impl,
+ executable = True,
+ attrs = dict(java_binary_attrs_common.items() + [
+ ("main_class", attr.string(default = "org.junit.runner.JUnitCore")),
+ # TODO(bazel-team): it would be better if we could offer a
+ # test_class attribute, but the "args" attribute is hard
+ # coded in the bazel infrastructure.
+ ]),
+ outputs = java_binary_outputs,
+ test = True,
+ fragments = ["java", "cpp"],
)
java_import = rule(
java_import_impl,
attrs = {
- "jars": attr.label_list(allow_files=jar_filetype),
- "srcjar": attr.label(allow_files=srcjar_filetype),
- "neverlink_jars": attr.label_list(allow_files=jar_filetype, default=[]),
- })
+ "jars": attr.label_list(allow_files = jar_filetype),
+ "srcjar": attr.label(allow_files = srcjar_filetype),
+ "neverlink_jars": attr.label_list(allow_files = jar_filetype, default = []),
+ },
+)
diff --git a/tools/build_rules/test_rules.bzl b/tools/build_rules/test_rules.bzl
index d815129687..c365021c74 100644
--- a/tools/build_rules/test_rules.bzl
+++ b/tools/build_rules/test_rules.bzl
@@ -18,31 +18,33 @@
### or sometimes pass depending on a trivial computation.
def success_target(ctx, msg):
- """Return a success for an analysis test.
-
- The test rule must have an executable output.
-
- Args:
- ctx: the Bazel rule context
- msg: an informative message to display
-
- Returns:
- a suitable rule implementation struct(),
- with actions that always succeed at execution time.
- """
- exe = ctx.outputs.executable
- dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
- ctx.actions.write(
- output=dat,
- content=msg)
- ctx.actions.write(
- output=exe,
- content="cat " + dat.path + " ; echo",
- is_executable=True)
- return struct(runfiles=ctx.runfiles([exe, dat]))
+ """Return a success for an analysis test.
+
+ The test rule must have an executable output.
+
+ Args:
+ ctx: the Bazel rule context
+ msg: an informative message to display
+
+ Returns:
+ a suitable rule implementation struct(),
+ with actions that always succeed at execution time.
+ """
+ exe = ctx.outputs.executable
+ dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
+ ctx.actions.write(
+ output = dat,
+ content = msg,
+ )
+ ctx.actions.write(
+ output = exe,
+ content = "cat " + dat.path + " ; echo",
+ is_executable = True,
+ )
+ return struct(runfiles = ctx.runfiles([exe, dat]))
def _successful_test_impl(ctx):
- return success_target(ctx, ctx.attr.msg)
+ return success_target(ctx, ctx.attr.msg)
successful_test = rule(
attrs = {"msg": attr.string(mandatory = True)},
@@ -52,32 +54,35 @@ successful_test = rule(
)
def failure_target(ctx, msg):
- """Return a failure for an analysis test.
-
- The test rule must have an executable output.
-
- Args:
- ctx: the Bazel rule context
- msg: an informative message to display
-
- Returns:
- a suitable rule implementation struct(),
- with actions that always fail at execution time.
- """
- ### fail(msg) ### <--- This would fail at analysis time.
- exe = ctx.outputs.executable
- dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
- ctx.file_action(
- output=dat,
- content=msg)
- ctx.file_action(
- output=exe,
- content="(cat " + dat.short_path + " ; echo ) >&2 ; exit 1",
- executable=True)
- return struct(runfiles=ctx.runfiles([exe, dat]))
+ """Return a failure for an analysis test.
+
+ The test rule must have an executable output.
+
+ Args:
+ ctx: the Bazel rule context
+ msg: an informative message to display
+
+ Returns:
+ a suitable rule implementation struct(),
+ with actions that always fail at execution time.
+ """
+
+ ### fail(msg) ### <--- This would fail at analysis time.
+ exe = ctx.outputs.executable
+ dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
+ ctx.file_action(
+ output = dat,
+ content = msg,
+ )
+ ctx.file_action(
+ output = exe,
+ content = "(cat " + dat.short_path + " ; echo ) >&2 ; exit 1",
+ executable = True,
+ )
+ return struct(runfiles = ctx.runfiles([exe, dat]))
def _failed_test_impl(ctx):
- return failure_target(ctx, ctx.attr.msg)
+ return failure_target(ctx, ctx.attr.msg)
failed_test = rule(
attrs = {"msg": attr.string(mandatory = True)},
@@ -88,151 +93,166 @@ failed_test = rule(
### Second, general purpose utilities
-def assert_(condition, string="assertion failed", *args):
- """Trivial assertion mechanism.
+def assert_(condition, string = "assertion failed", *args):
+ """Trivial assertion mechanism.
- Args:
- condition: a generalized boolean expected to be true
- string: a format string for the error message should the assertion fail
- *args: format arguments for the error message should the assertion fail
+ Args:
+ condition: a generalized boolean expected to be true
+ string: a format string for the error message should the assertion fail
+ *args: format arguments for the error message should the assertion fail
- Returns:
- None.
+ Returns:
+ None.
- Raises:
- an error if the condition isn't true.
- """
+ Raises:
+ an error if the condition isn't true.
+ """
- if not condition:
- fail(string % args)
+ if not condition:
+ fail(string % args)
def strip_prefix(prefix, string):
- assert_(string.startswith(prefix),
- "%s does not start with %s", string, prefix)
- return string[len(prefix):len(string)]
-
-def expectation_description(expect=None, expect_failure=None):
- """Turn expectation of result or error into a string."""
- if expect_failure:
- return "failure " + str(expect_failure)
- else:
- return "result " + repr(expect)
+ assert_(
+ string.startswith(prefix),
+ "%s does not start with %s",
+ string,
+ prefix,
+ )
+ return string[len(prefix):len(string)]
+
+def expectation_description(expect = None, expect_failure = None):
+ """Turn expectation of result or error into a string."""
+ if expect_failure:
+ return "failure " + str(expect_failure)
+ else:
+ return "result " + repr(expect)
def check_results(result, failure, expect, expect_failure):
- """See if actual computation results match expectations.
-
- Args:
- result: the result returned by the test if it ran to completion
- failure: the failure message caught while testing, if any
- expect: the expected result for a successful test, if no failure expected
- expect_failure: the expected failure message for the test, if any
-
- Returns:
- a pair (tuple) of a boolean (true if success) and a message (string).
- """
- wanted = expectation_description(expect, expect_failure)
- found = expectation_description(result, failure)
- if wanted == found:
- return (True, "successfully computed " + wanted)
- else:
- return (False, "expect " + wanted + " but found " + found)
-
-def load_results(name, result=None, failure=None,
- expect=None, expect_failure=None):
- """issue load-time results of a test.
-
- Args:
- name: the name of the Bazel rule at load time.
- result: the result returned by the test if it ran to completion
- failure: the failure message caught while testing, if any
- expect: the expected result for a successful test, if no failure expected
- expect_failure: the expected failure message for the test, if any
-
- Returns:
- None, after issuing a rule that will succeed at execution time if
- expectations were met.
- """
- (is_success, msg) = check_results(result, failure, expect, expect_failure)
- this_test = successful_test if is_success else failed_test
- return this_test(name=name, msg=msg)
-
-def analysis_results(ctx, result=None, failure=None,
- expect=None, expect_failure=None):
- """issue analysis-time results of a test.
-
- Args:
- ctx: the Bazel rule context
- result: the result returned by the test if it ran to completion
- failure: the failure message caught while testing, if any
- expect: the expected result for a successful test, if no failure expected
- expect_failure: the expected failure message for the test, if any
-
- Returns:
- a suitable rule implementation struct(),
- with actions that succeed at execution time if expectation were met,
- or fail at execution time if they didn't.
- """
- (is_success, msg) = check_results(result, failure, expect, expect_failure)
- this_test = success_target if is_success else failure_target
- return this_test(ctx, msg)
+ """See if actual computation results match expectations.
+
+ Args:
+ result: the result returned by the test if it ran to completion
+ failure: the failure message caught while testing, if any
+ expect: the expected result for a successful test, if no failure expected
+ expect_failure: the expected failure message for the test, if any
+
+ Returns:
+ a pair (tuple) of a boolean (true if success) and a message (string).
+ """
+ wanted = expectation_description(expect, expect_failure)
+ found = expectation_description(result, failure)
+ if wanted == found:
+ return (True, "successfully computed " + wanted)
+ else:
+ return (False, "expect " + wanted + " but found " + found)
+
+def load_results(
+ name,
+ result = None,
+ failure = None,
+ expect = None,
+ expect_failure = None):
+ """issue load-time results of a test.
+
+ Args:
+ name: the name of the Bazel rule at load time.
+ result: the result returned by the test if it ran to completion
+ failure: the failure message caught while testing, if any
+ expect: the expected result for a successful test, if no failure expected
+ expect_failure: the expected failure message for the test, if any
+
+ Returns:
+ None, after issuing a rule that will succeed at execution time if
+ expectations were met.
+ """
+ (is_success, msg) = check_results(result, failure, expect, expect_failure)
+ this_test = successful_test if is_success else failed_test
+ return this_test(name = name, msg = msg)
+
+def analysis_results(
+ ctx,
+ result = None,
+ failure = None,
+ expect = None,
+ expect_failure = None):
+ """issue analysis-time results of a test.
+
+ Args:
+ ctx: the Bazel rule context
+ result: the result returned by the test if it ran to completion
+ failure: the failure message caught while testing, if any
+ expect: the expected result for a successful test, if no failure expected
+ expect_failure: the expected failure message for the test, if any
+
+ Returns:
+ a suitable rule implementation struct(),
+ with actions that succeed at execution time if expectation were met,
+ or fail at execution time if they didn't.
+ """
+ (is_success, msg) = check_results(result, failure, expect, expect_failure)
+ this_test = success_target if is_success else failure_target
+ return this_test(ctx, msg)
### Simple tests
def _rule_test_impl(ctx):
- """check that a rule generates the desired outputs and providers."""
- rule_ = ctx.attr.rule
- rule_name = str(rule_.label)
- exe = ctx.outputs.executable
- if ctx.attr.generates:
- # Generate the proper prefix to remove from generated files.
- prefix_parts = []
-
- if rule_.label.workspace_root:
- # Create a prefix that is correctly relative to the output of this rule.
- prefix_parts = ["..", strip_prefix("external/", rule_.label.workspace_root)]
-
- if rule_.label.package:
- prefix_parts.append(rule_.label.package)
-
- prefix = "/".join(prefix_parts)
-
- if prefix:
- # If the prefix isn't empty, it needs a trailing slash.
- prefix = prefix + "/"
-
- # TODO(bazel-team): Use set() instead of sorted() once
- # set comparison is implemented.
- # TODO(bazel-team): Use a better way to determine if two paths refer to
- # the same file.
- generates = sorted(ctx.attr.generates)
- generated = sorted([strip_prefix(prefix, f.short_path)
- for f in rule_.files.to_list()])
- if generates != generated:
- fail("rule %s generates %s not %s"
- % (rule_name, repr(generated), repr(generates)))
- provides = ctx.attr.provides
- if provides:
- files = []
- commands = []
- for k in provides.keys():
- if hasattr(rule_, k):
- v = repr(getattr(rule_, k))
- else:
- fail(("rule %s doesn't provide attribute %s. "
- + "Its list of attributes is: %s")
- % (rule_name, k, dir(rule_)))
- file_ = ctx.new_file(ctx.genfiles_dir, exe, "." + k)
- files += [file_]
- regexp = provides[k]
- commands += [
- "if ! grep %s %s ; then echo 'bad %s:' ; cat %s ; echo ; exit 1 ; fi"
- % (repr(regexp), file_.short_path, k, file_.short_path)]
- ctx.file_action(output=file_, content=v)
- script = "\n".join(commands + ["true"])
- ctx.file_action(output=exe, content=script, executable=True)
- return struct(runfiles=ctx.runfiles([exe] + files))
- else:
- return success_target(ctx, "success")
+ """check that a rule generates the desired outputs and providers."""
+ rule_ = ctx.attr.rule
+ rule_name = str(rule_.label)
+ exe = ctx.outputs.executable
+ if ctx.attr.generates:
+ # Generate the proper prefix to remove from generated files.
+ prefix_parts = []
+
+ if rule_.label.workspace_root:
+ # Create a prefix that is correctly relative to the output of this rule.
+ prefix_parts = ["..", strip_prefix("external/", rule_.label.workspace_root)]
+
+ if rule_.label.package:
+ prefix_parts.append(rule_.label.package)
+
+ prefix = "/".join(prefix_parts)
+
+ if prefix:
+ # If the prefix isn't empty, it needs a trailing slash.
+ prefix = prefix + "/"
+
+ # TODO(bazel-team): Use set() instead of sorted() once
+ # set comparison is implemented.
+ # TODO(bazel-team): Use a better way to determine if two paths refer to
+ # the same file.
+ generates = sorted(ctx.attr.generates)
+ generated = sorted([
+ strip_prefix(prefix, f.short_path)
+ for f in rule_.files.to_list()
+ ])
+ if generates != generated:
+ fail("rule %s generates %s not %s" %
+ (rule_name, repr(generated), repr(generates)))
+ provides = ctx.attr.provides
+ if provides:
+ files = []
+ commands = []
+ for k in provides.keys():
+ if hasattr(rule_, k):
+ v = repr(getattr(rule_, k))
+ else:
+ fail(("rule %s doesn't provide attribute %s. " +
+ "Its list of attributes is: %s") %
+ (rule_name, k, dir(rule_)))
+ file_ = ctx.new_file(ctx.genfiles_dir, exe, "." + k)
+ files += [file_]
+ regexp = provides[k]
+ commands += [
+ "if ! grep %s %s ; then echo 'bad %s:' ; cat %s ; echo ; exit 1 ; fi" %
+ (repr(regexp), file_.short_path, k, file_.short_path),
+ ]
+ ctx.file_action(output = file_, content = v)
+ script = "\n".join(commands + ["true"])
+ ctx.file_action(output = exe, content = script, executable = True)
+ return struct(runfiles = ctx.runfiles([exe] + files))
+ else:
+ return success_target(ctx, "success")
rule_test = rule(
attrs = {
@@ -246,36 +266,42 @@ rule_test = rule(
)
def _file_test_impl(ctx):
- """check that a file has a given content."""
- exe = ctx.outputs.executable
- file_ = ctx.file.file
- content = ctx.attr.content
- regexp = ctx.attr.regexp
- matches = ctx.attr.matches
- if bool(content) == bool(regexp):
- fail("Must specify one and only one of content or regexp")
- if content and matches != -1:
- fail("matches only makes sense with regexp")
- if content:
- dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
- ctx.file_action(
- output=dat,
- content=content)
+ """check that a file has a given content."""
+ exe = ctx.outputs.executable
+ file_ = ctx.file.file
+ content = ctx.attr.content
+ regexp = ctx.attr.regexp
+ matches = ctx.attr.matches
+ if bool(content) == bool(regexp):
+ fail("Must specify one and only one of content or regexp")
+ if content and matches != -1:
+ fail("matches only makes sense with regexp")
+ if content:
+ dat = ctx.new_file(ctx.genfiles_dir, exe, ".dat")
+ ctx.file_action(
+ output = dat,
+ content = content,
+ )
+ ctx.file_action(
+ output = exe,
+ content = "diff -u %s %s" % (dat.short_path, file_.short_path),
+ executable = True,
+ )
+ return struct(runfiles = ctx.runfiles([exe, dat, file_]))
+ if matches != -1:
+ script = "[ %s == $(grep -c %s %s) ]" % (
+ matches,
+ repr(regexp),
+ file_.short_path,
+ )
+ else:
+ script = "grep %s %s" % (repr(regexp), file_.short_path)
ctx.file_action(
- output=exe,
- content="diff -u %s %s" % (dat.short_path, file_.short_path),
- executable=True)
- return struct(runfiles=ctx.runfiles([exe, dat, file_]))
- if matches != -1:
- script = "[ %s == $(grep -c %s %s) ]" % (
- matches, repr(regexp), file_.short_path)
- else:
- script = "grep %s %s" % (repr(regexp), file_.short_path)
- ctx.file_action(
- output=exe,
- content=script,
- executable=True)
- return struct(runfiles=ctx.runfiles([exe, file_]))
+ output = exe,
+ content = script,
+ executable = True,
+ )
+ return struct(runfiles = ctx.runfiles([exe, file_]))
file_test = rule(
attrs = {
diff --git a/tools/build_rules/utilities.bzl b/tools/build_rules/utilities.bzl
index 5574c63187..2dc290c798 100644
--- a/tools/build_rules/utilities.bzl
+++ b/tools/build_rules/utilities.bzl
@@ -16,23 +16,22 @@
"""This rule exposes the source jar of a java_*_library rule as a label."""
def _java_library_srcs_impl(ctx):
- if len(ctx.attr.deps) != 1:
- fail("Only one deps value supported", "deps")
- dep = ctx.attr.deps[0]
- return [DefaultInfo(files=depset(dep.java.source_jars))]
-
+ if len(ctx.attr.deps) != 1:
+ fail("Only one deps value supported", "deps")
+ dep = ctx.attr.deps[0]
+ return [DefaultInfo(files = depset(dep.java.source_jars))]
_java_library_srcs = rule(
- implementation=_java_library_srcs_impl,
- attrs={
- "deps":
- attr.label_list(
- mandatory=True,
- non_empty=True,
- providers=["java"],)
- })
-
+ implementation = _java_library_srcs_impl,
+ attrs = {
+ "deps": attr.label_list(
+ mandatory = True,
+ non_empty = True,
+ providers = ["java"],
+ ),
+ },
+)
-def java_library_srcs(name, deps, visibility=None, **kwargs):
- """Provides the source jars generated by a java_*_library rule."""
- _java_library_srcs(name=name, deps=deps, visibility=visibility, **kwargs)
+def java_library_srcs(name, deps, visibility = None, **kwargs):
+ """Provides the source jars generated by a java_*_library rule."""
+ _java_library_srcs(name = name, deps = deps, visibility = visibility, **kwargs)
diff --git a/tools/cpp/alias_rules.bzl b/tools/cpp/alias_rules.bzl
index fa2423ad97..2ab85d8882 100644
--- a/tools/cpp/alias_rules.bzl
+++ b/tools/cpp/alias_rules.bzl
@@ -13,8 +13,9 @@
# limitations under the License.
"""Skylark rules that stub out C++-related alias rules."""
+
def cc_toolchain_alias(name):
- if hasattr(native, "cc_toolchain_alias"):
- native.cc_toolchain_alias(name=name)
- else:
- pass
+ if hasattr(native, "cc_toolchain_alias"):
+ native.cc_toolchain_alias(name = name)
+ else:
+ pass
diff --git a/tools/cpp/cc_configure.bzl b/tools/cpp/cc_configure.bzl
index 01736aa09e..57954f6f71 100644
--- a/tools/cpp/cc_configure.bzl
+++ b/tools/cpp/cc_configure.bzl
@@ -23,35 +23,37 @@ load(
)
def cc_autoconf_impl(repository_ctx, overriden_tools = dict()):
- paths = resolve_labels(repository_ctx, [
- "@bazel_tools//tools/cpp:BUILD.static.freebsd",
- "@bazel_tools//tools/cpp:CROSSTOOL",
- "@bazel_tools//tools/cpp:dummy_toolchain.bzl",
- ])
+ paths = resolve_labels(repository_ctx, [
+ "@bazel_tools//tools/cpp:BUILD.static.freebsd",
+ "@bazel_tools//tools/cpp:CROSSTOOL",
+ "@bazel_tools//tools/cpp:dummy_toolchain.bzl",
+ ])
- repository_ctx.symlink(
- paths["@bazel_tools//tools/cpp:dummy_toolchain.bzl"], "dummy_toolchain.bzl")
- env = repository_ctx.os.environ
- cpu_value = get_cpu_value(repository_ctx)
- if "BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN" in env and env["BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN"] == "1":
- repository_ctx.symlink(Label("@bazel_tools//tools/cpp:CROSSTOOL.empty"), "CROSSTOOL")
- repository_ctx.symlink(Label("@bazel_tools//tools/cpp:BUILD.empty"), "BUILD")
- elif cpu_value == "freebsd":
- # This is defaulting to the static crosstool, we should eventually
- # autoconfigure this platform too. Theorically, FreeBSD should be
- # straightforward to add but we cannot run it in a docker container so
- # skipping until we have proper tests for FreeBSD.
- repository_ctx.symlink(paths["@bazel_tools//tools/cpp:CROSSTOOL"], "CROSSTOOL")
- repository_ctx.symlink(paths["@bazel_tools//tools/cpp:BUILD.static.freebsd"], "BUILD")
- elif cpu_value == "x64_windows":
- # TODO(ibiryukov): overriden_tools are only supported in configure_unix_toolchain.
- # We might want to add that to Windows too(at least for msys toolchain).
- configure_windows_toolchain(repository_ctx)
- elif (cpu_value == "darwin" and
- ("BAZEL_USE_CPP_ONLY_TOOLCHAIN" not in env or env["BAZEL_USE_CPP_ONLY_TOOLCHAIN"] != "1")):
- configure_osx_toolchain(repository_ctx, overriden_tools)
- else:
- configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools)
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/cpp:dummy_toolchain.bzl"],
+ "dummy_toolchain.bzl",
+ )
+ env = repository_ctx.os.environ
+ cpu_value = get_cpu_value(repository_ctx)
+ if "BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN" in env and env["BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN"] == "1":
+ repository_ctx.symlink(Label("@bazel_tools//tools/cpp:CROSSTOOL.empty"), "CROSSTOOL")
+ repository_ctx.symlink(Label("@bazel_tools//tools/cpp:BUILD.empty"), "BUILD")
+ elif cpu_value == "freebsd":
+ # This is defaulting to the static crosstool, we should eventually
+ # autoconfigure this platform too. Theorically, FreeBSD should be
+ # straightforward to add but we cannot run it in a docker container so
+ # skipping until we have proper tests for FreeBSD.
+ repository_ctx.symlink(paths["@bazel_tools//tools/cpp:CROSSTOOL"], "CROSSTOOL")
+ repository_ctx.symlink(paths["@bazel_tools//tools/cpp:BUILD.static.freebsd"], "BUILD")
+ elif cpu_value == "x64_windows":
+ # TODO(ibiryukov): overriden_tools are only supported in configure_unix_toolchain.
+ # We might want to add that to Windows too(at least for msys toolchain).
+ configure_windows_toolchain(repository_ctx)
+ elif (cpu_value == "darwin" and
+ ("BAZEL_USE_CPP_ONLY_TOOLCHAIN" not in env or env["BAZEL_USE_CPP_ONLY_TOOLCHAIN"] != "1")):
+ configure_osx_toolchain(repository_ctx, overriden_tools)
+ else:
+ configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools)
cc_autoconf = repository_rule(
environ = [
@@ -92,9 +94,10 @@ cc_autoconf = repository_rule(
)
def cc_configure():
- """A C++ configuration rules that generate the crosstool file."""
- cc_autoconf(name="local_config_cc")
- native.bind(name="cc_toolchain", actual="@local_config_cc//:toolchain")
- native.register_toolchains(
- # Use register_toolchain's target pattern expansion to register all toolchains in the package.
- "@local_config_cc//:all")
+ """A C++ configuration rules that generate the crosstool file."""
+ cc_autoconf(name = "local_config_cc")
+ native.bind(name = "cc_toolchain", actual = "@local_config_cc//:toolchain")
+ native.register_toolchains(
+ # Use register_toolchain's target pattern expansion to register all toolchains in the package.
+ "@local_config_cc//:all",
+ )
diff --git a/tools/cpp/compiler_flag.bzl b/tools/cpp/compiler_flag.bzl
index 4b134a1bf3..622a0b4c99 100644
--- a/tools/cpp/compiler_flag.bzl
+++ b/tools/cpp/compiler_flag.bzl
@@ -17,12 +17,12 @@
load("//tools/cpp:toolchain_utils.bzl", "find_cpp_toolchain")
def _compiler_flag_impl(ctx):
- toolchain = find_cpp_toolchain(ctx)
- return [config_common.FeatureFlagInfo(value = toolchain.compiler)]
+ toolchain = find_cpp_toolchain(ctx)
+ return [config_common.FeatureFlagInfo(value = toolchain.compiler)]
compiler_flag = rule(
implementation = _compiler_flag_impl,
attrs = {
- "_cc_toolchain": attr.label(default=Label("//tools/cpp:current_cc_toolchain")),
- }
+ "_cc_toolchain": attr.label(default = Label("//tools/cpp:current_cc_toolchain")),
+ },
)
diff --git a/tools/cpp/crosstool_lib.bzl b/tools/cpp/crosstool_lib.bzl
index a6c4916947..67db4c5237 100644
--- a/tools/cpp/crosstool_lib.bzl
+++ b/tools/cpp/crosstool_lib.bzl
@@ -14,400 +14,458 @@
# limitations under the License.
"""Library of common crosstool features."""
-load("@bazel_tools//tools/cpp:crosstool_utils.bzl",
- "feature",
- "simple_feature",
- "flag_set",
- "flag_group",
- "flags",
- "COMPILE_ACTIONS",
- "LINK_ACTIONS",
- "ARCHIVE_ACTIONS")
-
+load(
+ "@bazel_tools//tools/cpp:crosstool_utils.bzl",
+ "ARCHIVE_ACTIONS",
+ "COMPILE_ACTIONS",
+ "LINK_ACTIONS",
+ "feature",
+ "flag_group",
+ "flag_set",
+ "flags",
+ "simple_feature",
+)
def get_features_to_appear_first(platform):
- """Returns standard features that should appear in the top of the toolchain.
+ """Returns standard features that should appear in the top of the toolchain.
- Args:
- platform: one of [ k8, darwin, msvc ]
+ Args:
+ platform: one of [ k8, darwin, msvc ]
- Returns:
- a collection of features to be put into crosstool
- """
- return [
- simple_feature("no_legacy_features", [], []),
- simple_feature(
- "legacy_compile_flags",
- COMPILE_ACTIONS,
- ["%{legacy_compile_flags}"],
- expand_if_all_available=["legacy_compile_flags"],
- iterate_over="legacy_compile_flags"),
- simple_feature(
- "dependency_file",
- COMPILE_ACTIONS,
- ["-MD", "-MF", "%{dependency_file}"],
- expand_if_all_available=["dependency_file"]),
- simple_feature(
- "random_seed",
- COMPILE_ACTIONS,
- ["-frandom-seed=%{output_file}"]),
- simple_feature(
- "pic",
- COMPILE_ACTIONS,
- ["-fPIC"],
- expand_if_all_available=["pic"]),
- simple_feature(
- "per_object_debug_info",
- COMPILE_ACTIONS,
- ["-gsplit-dwarf"],
- expand_if_all_available=["per_object_debug_info_file"]),
- simple_feature(
- "preprocessor_defines",
- COMPILE_ACTIONS,
- ["-D%{preprocessor_defines}"],
- iterate_over="preprocessor_defines",
- expand_if_all_available=["preprocessor_defines"]),
- simple_feature(
- "includes",
- COMPILE_ACTIONS,
- ["-include", "%{includes}"],
- iterate_over="includes",
- expand_if_all_available=["includes"]),
- simple_feature(
- "quote_include_paths",
- COMPILE_ACTIONS,
- ["-iquote", "%{quote_include_paths}"],
- iterate_over="quote_include_paths",
- expand_if_all_available=["quote_include_paths"]),
- simple_feature(
- "include_paths",
- COMPILE_ACTIONS,
- ["-I%{include_paths}"],
- iterate_over="include_paths",
- expand_if_all_available=["include_paths"]),
- simple_feature(
- "system_include_paths",
- COMPILE_ACTIONS,
- ["-isystem", "%{system_include_paths}"],
- iterate_over="system_include_paths",
- expand_if_all_available=["system_include_paths"]),
- simple_feature(
- "symbol_counts",
- LINK_ACTIONS,
- ["-Wl,--print-symbol-counts=%{symbol_counts_output}"],
- expand_if_all_available=["symbol_counts_output"]),
- simple_feature(
- "shared_flag",
- LINK_ACTIONS,
- ["-shared"],
- expand_if_all_available=["symbol_counts_output"]),
- simple_feature(
- "output_execpath_flags",
- LINK_ACTIONS,
- ["-o", "%{output_execpath}"],
- expand_if_all_available=["output_execpath"]),
- simple_feature(
- "runtime_library_search_directories",
- LINK_ACTIONS,
- [_runtime_library_directory_flag(platform)],
- iterate_over="runtime_library_search_directories",
- expand_if_all_available=["runtime_library_search_directories"]),
- simple_feature(
- "library_search_directories",
- LINK_ACTIONS,
- ["-L%{library_search_directories}"],
- iterate_over="library_search_directories",
- expand_if_all_available=["library_search_directories"]),
- simple_feature("_archiver_flags", ARCHIVE_ACTIONS, _archiver_flags(platform)),
- feature(
- "libraries_to_link", [
- flag_set(ARCHIVE_ACTIONS, [
- flag_group([
+ Returns:
+ a collection of features to be put into crosstool
+ """
+ return [
+ simple_feature("no_legacy_features", [], []),
+ simple_feature(
+ "legacy_compile_flags",
+ COMPILE_ACTIONS,
+ ["%{legacy_compile_flags}"],
+ expand_if_all_available = ["legacy_compile_flags"],
+ iterate_over = "legacy_compile_flags",
+ ),
+ simple_feature(
+ "dependency_file",
+ COMPILE_ACTIONS,
+ ["-MD", "-MF", "%{dependency_file}"],
+ expand_if_all_available = ["dependency_file"],
+ ),
+ simple_feature(
+ "random_seed",
+ COMPILE_ACTIONS,
+ ["-frandom-seed=%{output_file}"],
+ ),
+ simple_feature(
+ "pic",
+ COMPILE_ACTIONS,
+ ["-fPIC"],
+ expand_if_all_available = ["pic"],
+ ),
+ simple_feature(
+ "per_object_debug_info",
+ COMPILE_ACTIONS,
+ ["-gsplit-dwarf"],
+ expand_if_all_available = ["per_object_debug_info_file"],
+ ),
+ simple_feature(
+ "preprocessor_defines",
+ COMPILE_ACTIONS,
+ ["-D%{preprocessor_defines}"],
+ iterate_over = "preprocessor_defines",
+ expand_if_all_available = ["preprocessor_defines"],
+ ),
+ simple_feature(
+ "includes",
+ COMPILE_ACTIONS,
+ ["-include", "%{includes}"],
+ iterate_over = "includes",
+ expand_if_all_available = ["includes"],
+ ),
+ simple_feature(
+ "quote_include_paths",
+ COMPILE_ACTIONS,
+ ["-iquote", "%{quote_include_paths}"],
+ iterate_over = "quote_include_paths",
+ expand_if_all_available = ["quote_include_paths"],
+ ),
+ simple_feature(
+ "include_paths",
+ COMPILE_ACTIONS,
+ ["-I%{include_paths}"],
+ iterate_over = "include_paths",
+ expand_if_all_available = ["include_paths"],
+ ),
+ simple_feature(
+ "system_include_paths",
+ COMPILE_ACTIONS,
+ ["-isystem", "%{system_include_paths}"],
+ iterate_over = "system_include_paths",
+ expand_if_all_available = ["system_include_paths"],
+ ),
+ simple_feature(
+ "symbol_counts",
+ LINK_ACTIONS,
+ ["-Wl,--print-symbol-counts=%{symbol_counts_output}"],
+ expand_if_all_available = ["symbol_counts_output"],
+ ),
+ simple_feature(
+ "shared_flag",
+ LINK_ACTIONS,
+ ["-shared"],
+ expand_if_all_available = ["symbol_counts_output"],
+ ),
+ simple_feature(
+ "output_execpath_flags",
+ LINK_ACTIONS,
+ ["-o", "%{output_execpath}"],
+ expand_if_all_available = ["output_execpath"],
+ ),
+ simple_feature(
+ "runtime_library_search_directories",
+ LINK_ACTIONS,
+ [_runtime_library_directory_flag(platform)],
+ iterate_over = "runtime_library_search_directories",
+ expand_if_all_available = ["runtime_library_search_directories"],
+ ),
+ simple_feature(
+ "library_search_directories",
+ LINK_ACTIONS,
+ ["-L%{library_search_directories}"],
+ iterate_over = "library_search_directories",
+ expand_if_all_available = ["library_search_directories"],
+ ),
+ simple_feature("_archiver_flags", ARCHIVE_ACTIONS, _archiver_flags(platform)),
+ feature(
+ "libraries_to_link",
+ [
+ flag_set(ARCHIVE_ACTIONS, [
flag_group(
- flags("%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "object_file"]]),
+ [
+ flag_group(
+ flags("%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "object_file"]],
+ ),
+ flag_group(
+ flags("%{libraries_to_link.object_files}"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ iterate_over = "libraries_to_link.object_files",
+ ),
+ ],
+ iterate_over = "libraries_to_link",
+ expand_if_all_available = ["libraries_to_link"],
+ ),
+ ]),
+ flag_set(LINK_ACTIONS, [
flag_group(
- flags("%{libraries_to_link.object_files}"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]],
- iterate_over="libraries_to_link.object_files"),
- ],
- iterate_over="libraries_to_link",
- expand_if_all_available=["libraries_to_link"])
- ]),
- flag_set(LINK_ACTIONS, [
- flag_group([
- flag_group(
- flags("-Wl,--start-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]
- ),
- ] +
- _libraries_to_link_flag_groupss(platform) + [
- flag_group(
- flags("-Wl,--end-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]
- ),
- ],
- iterate_over="libraries_to_link"
- ),
- flag_group(flags("-Wl,@%{thinlto_param_file}"), expand_if_true=["thinlto_param_file"])
- ])
- ]),
- simple_feature(
- "force_pic_flags",
- ["c++-link-executable"],
- ["-pie"],
- expand_if_all_available=["force_pic"]),
- simple_feature(
- "user_link_flags",
- LINK_ACTIONS,
- ["%{user_link_flags}"],
- iterate_over="user_link_flags",
- expand_if_all_available=["user_link_flags"]),
- simple_feature(
- "legacy_link_flags",
- LINK_ACTIONS,
- ["%{legacy_link_flags}"],
- iterate_over="legacy_link_flags",
- expand_if_all_available=["legacy_link_flags"]),
- simple_feature(
- "fission_support",
- LINK_ACTIONS,
- ["-Wl,--gdb-index"],
- expand_if_all_available=["is_using_fission"]),
- simple_feature(
- "strip_debug_symbols",
- LINK_ACTIONS,
- ["-Wl,-S"],
- expand_if_all_available=["strip_debug_symbols"]),
- _coverage_feature(platform),
- simple_feature("strip_flags", ["strip"], _strip_flags(platform)),
+ [
+ flag_group(
+ flags("-Wl,--start-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ ] +
+ _libraries_to_link_flag_groupss(platform) + [
+ flag_group(
+ flags("-Wl,--end-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ ],
+ iterate_over = "libraries_to_link",
+ ),
+ flag_group(flags("-Wl,@%{thinlto_param_file}"), expand_if_true = ["thinlto_param_file"]),
+ ]),
+ ],
+ ),
+ simple_feature(
+ "force_pic_flags",
+ ["c++-link-executable"],
+ ["-pie"],
+ expand_if_all_available = ["force_pic"],
+ ),
+ simple_feature(
+ "user_link_flags",
+ LINK_ACTIONS,
+ ["%{user_link_flags}"],
+ iterate_over = "user_link_flags",
+ expand_if_all_available = ["user_link_flags"],
+ ),
+ simple_feature(
+ "legacy_link_flags",
+ LINK_ACTIONS,
+ ["%{legacy_link_flags}"],
+ iterate_over = "legacy_link_flags",
+ expand_if_all_available = ["legacy_link_flags"],
+ ),
+ simple_feature(
+ "fission_support",
+ LINK_ACTIONS,
+ ["-Wl,--gdb-index"],
+ expand_if_all_available = ["is_using_fission"],
+ ),
+ simple_feature(
+ "strip_debug_symbols",
+ LINK_ACTIONS,
+ ["-Wl,-S"],
+ expand_if_all_available = ["strip_debug_symbols"],
+ ),
+ _coverage_feature(platform),
+ simple_feature("strip_flags", ["strip"], _strip_flags(platform)),
]
-
def get_features_to_appear_last(platform):
- """Returns standard features that should appear at the end of the toolchain.
+ """Returns standard features that should appear at the end of the toolchain.
- Args:
- platform: one of [ k8, darwin, msvc ]
-
- Returns:
- a collection of features to be put into crosstool
- """
- return [
- simple_feature(
- "user_compile_flags",
- COMPILE_ACTIONS,
- ["%{user_compile_flags}"],
- expand_if_all_available=["user_compile_flags"],
- iterate_over="user_compile_flags"),
- simple_feature(
- "sysroot",
- COMPILE_ACTIONS + LINK_ACTIONS,
- ["--sysroot=%{sysroot}"],
- expand_if_all_available=["sysroot"]),
- simple_feature(
- "unfiltered_compile_flags",
- COMPILE_ACTIONS,
- ["%{unfiltered_compile_flags}"],
- expand_if_all_available=["unfiltered_compile_flags"],
- iterate_over="unfiltered_compile_flags"),
- simple_feature(
- "linker_param_file",
- LINK_ACTIONS,
- [_linker_param_file_flag(platform)],
- expand_if_all_available=["linker_param_file"]),
- simple_feature(
- "archiver_param_file",
- ARCHIVE_ACTIONS,
- [_archiver_param_file_flag(platform)],
- expand_if_all_available=["linker_param_file"]),
- simple_feature(
- "compiler_input_flags",
- COMPILE_ACTIONS,
- ["-c", "%{source_file}"],
- expand_if_all_available=["source_file"]),
- feature(
- "compiler_output_flags", [
- flag_set(COMPILE_ACTIONS,[
- flag_group(
- flags("-S"),
- expand_if_all_available=["output_assembly_file"],
- ),
- flag_group(
- flags("-E"),
- expand_if_all_available=["output_preprocess_file"],
- ),
- flag_group(
- flags("-o", "%{output_file}"),
- expand_if_all_available=["output_file"],
- ),
- ])
- ]
- ),
- ]
+ Args:
+ platform: one of [ k8, darwin, msvc ]
+ Returns:
+ a collection of features to be put into crosstool
+ """
+ return [
+ simple_feature(
+ "user_compile_flags",
+ COMPILE_ACTIONS,
+ ["%{user_compile_flags}"],
+ expand_if_all_available = ["user_compile_flags"],
+ iterate_over = "user_compile_flags",
+ ),
+ simple_feature(
+ "sysroot",
+ COMPILE_ACTIONS + LINK_ACTIONS,
+ ["--sysroot=%{sysroot}"],
+ expand_if_all_available = ["sysroot"],
+ ),
+ simple_feature(
+ "unfiltered_compile_flags",
+ COMPILE_ACTIONS,
+ ["%{unfiltered_compile_flags}"],
+ expand_if_all_available = ["unfiltered_compile_flags"],
+ iterate_over = "unfiltered_compile_flags",
+ ),
+ simple_feature(
+ "linker_param_file",
+ LINK_ACTIONS,
+ [_linker_param_file_flag(platform)],
+ expand_if_all_available = ["linker_param_file"],
+ ),
+ simple_feature(
+ "archiver_param_file",
+ ARCHIVE_ACTIONS,
+ [_archiver_param_file_flag(platform)],
+ expand_if_all_available = ["linker_param_file"],
+ ),
+ simple_feature(
+ "compiler_input_flags",
+ COMPILE_ACTIONS,
+ ["-c", "%{source_file}"],
+ expand_if_all_available = ["source_file"],
+ ),
+ feature(
+ "compiler_output_flags",
+ [
+ flag_set(COMPILE_ACTIONS, [
+ flag_group(
+ flags("-S"),
+ expand_if_all_available = ["output_assembly_file"],
+ ),
+ flag_group(
+ flags("-E"),
+ expand_if_all_available = ["output_preprocess_file"],
+ ),
+ flag_group(
+ flags("-o", "%{output_file}"),
+ expand_if_all_available = ["output_file"],
+ ),
+ ]),
+ ],
+ ),
+ ]
def _is_linux(platform):
- return platform == "k8"
-
+ return platform == "k8"
def _is_darwin(platform):
- return platform == "darwin"
-
+ return platform == "darwin"
def _is_msvc(platform):
- return platform == "msvc"
-
+ return platform == "msvc"
def _coverage_feature(use_llvm_format):
- if use_llvm_format:
- compile_flags = flags("-fprofile-instr-generate", "-fcoverage-mapping")
- link_flags = flags("-fprofile-instr-generate")
- else:
- compile_flags = flags("-fprofile-arcs", "-ftest-coverage")
- link_flags = flags("--coverage")
- return feature(
- "coverage",
- [
- flag_set(COMPILE_ACTIONS, [ flag_group(compile_flags) ]),
- flag_set(LINK_ACTIONS, [ flag_group(link_flags) ]),
- ],
- enabled = False,
- provides = "profile")
-
+ if use_llvm_format:
+ compile_flags = flags("-fprofile-instr-generate", "-fcoverage-mapping")
+ link_flags = flags("-fprofile-instr-generate")
+ else:
+ compile_flags = flags("-fprofile-arcs", "-ftest-coverage")
+ link_flags = flags("--coverage")
+ return feature(
+ "coverage",
+ [
+ flag_set(COMPILE_ACTIONS, [flag_group(compile_flags)]),
+ flag_set(LINK_ACTIONS, [flag_group(link_flags)]),
+ ],
+ enabled = False,
+ provides = "profile",
+ )
def _runtime_library_directory_flag(platform):
- if _is_linux(platform):
- return "-Wl,-rpath,$ORIGIN/%{runtime_library_search_directories}"
- elif _is_darwin(platform):
- return "-Wl,-rpath,@loader_path/%{runtime_library_search_directories}"
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
-
+ if _is_linux(platform):
+ return "-Wl,-rpath,$ORIGIN/%{runtime_library_search_directories}"
+ elif _is_darwin(platform):
+ return "-Wl,-rpath,@loader_path/%{runtime_library_search_directories}"
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
def _archiver_flags(platform):
- if _is_linux(platform):
- return ["rcsD", "%{output_execpath}"]
- elif _is_darwin(platform):
- return ["-static", "-s", "-o", "%{output_execpath}"]
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
-
+ if _is_linux(platform):
+ return ["rcsD", "%{output_execpath}"]
+ elif _is_darwin(platform):
+ return ["-static", "-s", "-o", "%{output_execpath}"]
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
def _library_to_link_with_worce_load(variable_type, variable, flag = "", iterate = False):
- return [
- flag_group([
- flag_group(
- flags(
- "-Wl,-force_load," + flag + "%{" + variable + "}",
- expand_if_true=["libraries_to_link.is_whole_archive"])),
- flag_group(
- flags(
- flag + "%{" + variable + "}",
- expand_if_false=["libraries_to_link.is_whole_archive"])),
- ],
- iterate_over=variable if iterate else None,
- expand_if_equal=[["libraries_to_link.type", variable_type]]),
- ]
-
-
-def _libraries_to_link_flag_groupss(platform):
- if _is_linux(platform):
- return [
- flag_group(
- flags("-Wl,-whole-archive"),
- expand_if_true=["libraries_to_link.is_whole_archive"]),
- flag_group(
- flags("-Wl,--start-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]),
- flag_group(
- flags("%{libraries_to_link.object_files}"),
- iterate_over="libraries_to_link.object_files",
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]),
- flag_group(
- flags("-Wl,--end-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]),
- flag_group(
- flags("%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "object_file"]]),
- flag_group(
- flags("%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "interface_library"]]),
- flag_group(
- flags("%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "static_library"]]),
- flag_group(
- flags("-l%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "dynamic_library"]]),
- flag_group(
- flags("-l:%{libraries_to_link.name}"),
- expand_if_equal=[["libraries_to_link.type", "versioned_dynamic_library"]]),
- flag_group(
- flags("-Wl,-no-whole-archive"),
- expand_if_true=["libraries_to_link.is_whole_archive"]),
- ]
- if _is_darwin(platform):
return [
flag_group(
- flags("-Wl,--start-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]),
- _library_to_link_with_worce_load(
- "object_file_group", "libraries_to_link.object_files", iterate = True),
- flag_group(
- flags("-Wl,--end-lib"),
- expand_if_equal=[["libraries_to_link.type", "object_file_group"]]),
- _library_to_link_with_worce_load("object_file", "libraries_to_link.name"),
- _library_to_link_with_worce_load("interface_library", "libraries_to_link.name"),
- _library_to_link_with_worce_load("static_library", "libraries_to_link.name"),
- _library_to_link_with_worce_load("dynamic_library", "libraries_to_link.name", flag="-l"),
- _library_to_link_with_worce_load("versioned_dynamic_library", "libraries_to_link.name", flag="-l:"),
+ [
+ flag_group(
+ flags(
+ "-Wl,-force_load," + flag + "%{" + variable + "}",
+ expand_if_true = ["libraries_to_link.is_whole_archive"],
+ ),
+ ),
+ flag_group(
+ flags(
+ flag + "%{" + variable + "}",
+ expand_if_false = ["libraries_to_link.is_whole_archive"],
+ ),
+ ),
+ ],
+ iterate_over = variable if iterate else None,
+ expand_if_equal = [["libraries_to_link.type", variable_type]],
+ ),
]
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
+def _libraries_to_link_flag_groupss(platform):
+ if _is_linux(platform):
+ return [
+ flag_group(
+ flags("-Wl,-whole-archive"),
+ expand_if_true = ["libraries_to_link.is_whole_archive"],
+ ),
+ flag_group(
+ flags("-Wl,--start-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ flag_group(
+ flags("%{libraries_to_link.object_files}"),
+ iterate_over = "libraries_to_link.object_files",
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ flag_group(
+ flags("-Wl,--end-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ flag_group(
+ flags("%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "object_file"]],
+ ),
+ flag_group(
+ flags("%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "interface_library"]],
+ ),
+ flag_group(
+ flags("%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "static_library"]],
+ ),
+ flag_group(
+ flags("-l%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "dynamic_library"]],
+ ),
+ flag_group(
+ flags("-l:%{libraries_to_link.name}"),
+ expand_if_equal = [["libraries_to_link.type", "versioned_dynamic_library"]],
+ ),
+ flag_group(
+ flags("-Wl,-no-whole-archive"),
+ expand_if_true = ["libraries_to_link.is_whole_archive"],
+ ),
+ ]
+ if _is_darwin(platform):
+ return [
+ flag_group(
+ flags("-Wl,--start-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ _library_to_link_with_worce_load(
+ "object_file_group",
+ "libraries_to_link.object_files",
+ iterate = True,
+ ),
+ flag_group(
+ flags("-Wl,--end-lib"),
+ expand_if_equal = [["libraries_to_link.type", "object_file_group"]],
+ ),
+ _library_to_link_with_worce_load("object_file", "libraries_to_link.name"),
+ _library_to_link_with_worce_load("interface_library", "libraries_to_link.name"),
+ _library_to_link_with_worce_load("static_library", "libraries_to_link.name"),
+ _library_to_link_with_worce_load("dynamic_library", "libraries_to_link.name", flag = "-l"),
+ _library_to_link_with_worce_load("versioned_dynamic_library", "libraries_to_link.name", flag = "-l:"),
+ ]
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
def _strip_flags(platform):
- if _is_linux(platform):
- return [ "-S", "-p", "-o", "%{output_file}",
- "-R", ".gnu.switches.text.quote_paths",
- "-R", ".gnu.switches.text.bracket_paths",
- "-R", ".gnu.switches.text.system_paths",
- "-R", ".gnu.switches.text.cpp_defines",
- "-R", ".gnu.switches.text.cpp_includes",
- "-R", ".gnu.switches.text.cl_args",
- "-R", ".gnu.switches.text.lipo_info",
- "-R", ".gnu.switches.text.annotation", ]
- elif _is_darwin(platform):
- return ["-S", "-o", "%{output_file}"]
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
-
+ if _is_linux(platform):
+ return [
+ "-S",
+ "-p",
+ "-o",
+ "%{output_file}",
+ "-R",
+ ".gnu.switches.text.quote_paths",
+ "-R",
+ ".gnu.switches.text.bracket_paths",
+ "-R",
+ ".gnu.switches.text.system_paths",
+ "-R",
+ ".gnu.switches.text.cpp_defines",
+ "-R",
+ ".gnu.switches.text.cpp_includes",
+ "-R",
+ ".gnu.switches.text.cl_args",
+ "-R",
+ ".gnu.switches.text.lipo_info",
+ "-R",
+ ".gnu.switches.text.annotation",
+ ]
+ elif _is_darwin(platform):
+ return ["-S", "-o", "%{output_file}"]
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
def _linker_param_file_flag(platform):
- if _is_linux(platform):
- return "-Wl,@%{linker_param_file}"
- elif _is_darwin(platform):
- return "-Wl,@%{linker_param_file}"
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
-
+ if _is_linux(platform):
+ return "-Wl,@%{linker_param_file}"
+ elif _is_darwin(platform):
+ return "-Wl,@%{linker_param_file}"
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
def _archiver_param_file_flag(platform):
- if _is_linux(platform):
- return "@%{linker_param_file}"
- elif _is_darwin(platform):
- return "@%{linker_param_file}"
- elif _is_msvc(platform):
- fail("todo")
- else:
- fail("Unsupported platform: " + platform)
+ if _is_linux(platform):
+ return "@%{linker_param_file}"
+ elif _is_darwin(platform):
+ return "@%{linker_param_file}"
+ elif _is_msvc(platform):
+ fail("todo")
+ else:
+ fail("Unsupported platform: " + platform)
diff --git a/tools/cpp/crosstool_utils.bzl b/tools/cpp/crosstool_utils.bzl
index 238d0cff6d..8ddeb86259 100644
--- a/tools/cpp/crosstool_utils.bzl
+++ b/tools/cpp/crosstool_utils.bzl
@@ -43,233 +43,242 @@ ARCHIVE_ACTIONS = [
# All remaining actions used by C++ rules that are configured in the CROSSTOOL
OTHER_ACTIONS = [
- "strip"
+ "strip",
]
-
def action_config(action_name, tool_path):
- """Emit action_config message.
-
- Examples:
- action_config("c-compile", "/usr/bin/gcc") ->
- action_config {
- config_name: 'c-compile'
- action_name: 'c-compile'
- tool {
- tool_path: '/usr/bin/gcc'
+ """Emit action_config message.
+
+ Examples:
+ action_config("c-compile", "/usr/bin/gcc") ->
+ action_config {
+ config_name: 'c-compile'
+ action_name: 'c-compile'
+ tool {
+ tool_path: '/usr/bin/gcc'
+ }
}
- }
-
- Args:
- action_name: name of the action
- tool_path: absolute or CROSSTOOL-relative path to the tool
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- if action_name == None or action_name == "":
- fail("action_name must be present")
- if tool_path == None or tool_path == "":
- fail("tool_path must be present")
- return """
+
+ Args:
+ action_name: name of the action
+ tool_path: absolute or CROSSTOOL-relative path to the tool
+
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ if action_name == None or action_name == "":
+ fail("action_name must be present")
+ if tool_path == None or tool_path == "":
+ fail("tool_path must be present")
+ return """
action_config {{
config_name: '{action_name}'
action_name: '{action_name}'
tool {{
tool_path: '{tool_path}'
}}
- }}""".format(action_name=action_name, tool_path=tool_path)
-
+ }}""".format(action_name = action_name, tool_path = tool_path)
def feature(name, flag_sets, enabled = True, provides = None):
- """Emit feature message.
-
- Examples:
- feature("fully_static_link", flag_sets, enabled = False) ->
- feature {
- name: 'fully_static_link'
- enabled = false
- <flags_sets>
- }
-
- Args:
- name: name of the feature
- flag_sets: a collection of flag_set messages
- enabled: whether this feature is turned on by default
- provides: a symbol this feature provides, used to implement mutually incompatible features
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- if name == None or name == "":
- fail("feature name must be present")
- return """
+ """Emit feature message.
+
+ Examples:
+ feature("fully_static_link", flag_sets, enabled = False) ->
+ feature {
+ name: 'fully_static_link'
+ enabled = false
+ <flags_sets>
+ }
+
+ Args:
+ name: name of the feature
+ flag_sets: a collection of flag_set messages
+ enabled: whether this feature is turned on by default
+ provides: a symbol this feature provides, used to implement mutually incompatible features
+
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ if name == None or name == "":
+ fail("feature name must be present")
+ return """
feature {{
name: '{name}'
enabled: {enabled}{provides}{flag_sets}
}}""".format(
- provides=("\n provides: '%s'" % provides if provides != None else ""),
- name=name,
- enabled=_to_proto_value(enabled),
- flag_sets="".join(flag_sets))
+ provides = ("\n provides: '%s'" % provides if provides != None else ""),
+ name = name,
+ enabled = _to_proto_value(enabled),
+ flag_sets = "".join(flag_sets),
+ )
+def simple_feature(
+ name,
+ actions,
+ flags,
+ enabled = True,
+ provides = None,
+ expand_if_all_available = [],
+ iterate_over = None):
+ """Sugar for emitting simple feature message.
+
+ Examples:
+ simple_feature("foo", ['c-compile'], flags("-foo")) ->
+ feature {
+ name: 'foo'
+ flag_set {
+ action: 'c-compile'
+ flag_group {
+ flag: '-foo'
+ }
+ }
+ }
-def simple_feature(name, actions, flags, enabled = True, provides = None,
- expand_if_all_available = [], iterate_over = None):
- """Sugar for emitting simple feature message.
+ Args:
+ name: name of the feature
+ actions: for which actions should flags be emitted
+ flags: a collection of flag messages
+ enabled: whether this feature is turned on by default
+ provides: a symbol this feature provides, used to implement mutually incompatible features
+ expand_if_all_available: specify which build variables need to be present
+ for this group to be expanded
+ iterate_over: expand this flag_group for every item in the build variable
+
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ if len(flags) == 0:
+ return feature(name, [])
+ else:
+ return feature(
+ name,
+ [flag_set(
+ actions,
+ [flag_group(
+ [flag(f) for f in flags],
+ iterate_over = iterate_over,
+ expand_if_all_available = expand_if_all_available,
+ )],
+ )],
+ enabled = enabled,
+ provides = provides,
+ )
+
+def flag_set(actions, flag_groups):
+ """Emit flag_set message.
- Examples:
- simple_feature("foo", ['c-compile'], flags("-foo")) ->
- feature {
- name: 'foo'
+ Examples:
+ flag_set(['c-compile'], flag_groups) ->
flag_set {
action: 'c-compile'
- flag_group {
- flag: '-foo'
- }
+ <flag_groups>
}
- }
-
- Args:
- name: name of the feature
- actions: for which actions should flags be emitted
- flags: a collection of flag messages
- enabled: whether this feature is turned on by default
- provides: a symbol this feature provides, used to implement mutually incompatible features
- expand_if_all_available: specify which build variables need to be present
- for this group to be expanded
- iterate_over: expand this flag_group for every item in the build variable
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- if len(flags) == 0:
- return feature(name, [])
- else:
- return feature(
- name,
- [flag_set(
- actions,
- [flag_group(
- [flag(f) for f in flags],
- iterate_over=iterate_over,
- expand_if_all_available=expand_if_all_available)])],
- enabled = enabled,
- provides = provides)
-
-def flag_set(actions, flag_groups):
- """Emit flag_set message.
-
- Examples:
- flag_set(['c-compile'], flag_groups) ->
- flag_set {
- action: 'c-compile'
- <flag_groups>
- }
-
- Args:
- actions: for which actions should flags be emitted
- flag_groups: a collection of flag_group messages
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- if actions == None or len(actions) == 0:
- fail("empty actions list is not allowed for flag_set")
- if flag_groups == None or len(flag_groups) == 0:
- fail("empty flag_groups list is not allowed for flag_set")
- actions_string = ""
- for action in actions: actions_string += "\n action: '%s'" % action
-
- return """
+ Args:
+ actions: for which actions should flags be emitted
+ flag_groups: a collection of flag_group messages
+
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ if actions == None or len(actions) == 0:
+ fail("empty actions list is not allowed for flag_set")
+ if flag_groups == None or len(flag_groups) == 0:
+ fail("empty flag_groups list is not allowed for flag_set")
+ actions_string = ""
+ for action in actions:
+ actions_string += "\n action: '%s'" % action
+
+ return """
flag_set {{{actions}{flag_groups}
- }}""".format(actions=actions_string, flag_groups="".join(flag_groups))
-
+ }}""".format(actions = actions_string, flag_groups = "".join(flag_groups))
def flag_group(
- content, expand_if_all_available = [], expand_if_none_available = [], expand_if_true = [],
- expand_if_false = [], expand_if_equal = [], iterate_over = None):
- """Emit flag_group message.
-
- Examples:
- flag_group(flags("-foo %{output_file}"), expand_if_all_available="output_file") ->
- flag_group { expand_if_all_available: "output_file"
- flag: "-foo %{output_file}"
- }
-
- Args:
- content: a collection of flag messages or a collection of flag_group messages
- expand_if_all_available: specify which build variables need to be present
- for this group to be expanded
- expand_if_none_available: specify which build variables need to be missing
- for this group to be expanded
- expand_if_true: specify which build variables need to be truthy for this group
- to be expanded
- expand_if_false: specify which build variables need to be falsey for this group
- to be expanded
- expand_if_equal: [[var1, value1], [var2, value2]...] specify what values
- should specific build variables have for this group to be expanded
- iterate_over: expand this flag_group for every item in the build variable
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- if content == None or len(content)== 0:
- fail("flag_group without flags is not allowed")
- conditions = ""
- for var in expand_if_all_available:
- conditions += "\n expand_if_all_available: '%s'" % var
- for var in expand_if_none_available:
- conditions += "\n expand_if_none_available: '%s'" % var
- for var in expand_if_true:
- conditions += "\n expand_if_true: '%s'" % var
- for var in expand_if_false:
- conditions += "\n expand_if_false: '%s'" % var
- for var in expand_if_equal:
- conditions += "\n expand_if_equal { variable: '%s' value: '%s' }" % (var[0], var[1])
- return """
+ content,
+ expand_if_all_available = [],
+ expand_if_none_available = [],
+ expand_if_true = [],
+ expand_if_false = [],
+ expand_if_equal = [],
+ iterate_over = None):
+ """Emit flag_group message.
+
+ Examples:
+ flag_group(flags("-foo %{output_file}"), expand_if_all_available="output_file") ->
+ flag_group { expand_if_all_available: "output_file"
+ flag: "-foo %{output_file}"
+ }
+
+ Args:
+ content: a collection of flag messages or a collection of flag_group messages
+ expand_if_all_available: specify which build variables need to be present
+ for this group to be expanded
+ expand_if_none_available: specify which build variables need to be missing
+ for this group to be expanded
+ expand_if_true: specify which build variables need to be truthy for this group
+ to be expanded
+ expand_if_false: specify which build variables need to be falsey for this group
+ to be expanded
+ expand_if_equal: [[var1, value1], [var2, value2]...] specify what values
+ should specific build variables have for this group to be expanded
+ iterate_over: expand this flag_group for every item in the build variable
+
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ if content == None or len(content) == 0:
+ fail("flag_group without flags is not allowed")
+ conditions = ""
+ for var in expand_if_all_available:
+ conditions += "\n expand_if_all_available: '%s'" % var
+ for var in expand_if_none_available:
+ conditions += "\n expand_if_none_available: '%s'" % var
+ for var in expand_if_true:
+ conditions += "\n expand_if_true: '%s'" % var
+ for var in expand_if_false:
+ conditions += "\n expand_if_false: '%s'" % var
+ for var in expand_if_equal:
+ conditions += "\n expand_if_equal { variable: '%s' value: '%s' }" % (var[0], var[1])
+ return """
flag_group {{{conditions}{iterate_over}{content}
}}""".format(
- content="".join(content),
- iterate_over=("\n iterate_over: '%s'" % iterate_over if iterate_over != None else ""),
- conditions=conditions)
-
+ content = "".join(content),
+ iterate_over = ("\n iterate_over: '%s'" % iterate_over if iterate_over != None else ""),
+ conditions = conditions,
+ )
def flag(flag):
- """Emit flag field.
+ """Emit flag field.
- Examples:
- flag("-foo") -> flag: '-foo'
+ Examples:
+ flag("-foo") -> flag: '-foo'
- Args:
- flag: value to be emitted to the command line
-
- Returns:
- a string to be placed into the CROSSTOOL
- """
- return "\n flag: '%s'" % flag
+ Args:
+ flag: value to be emitted to the command line
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ return "\n flag: '%s'" % flag
def flags(*flags):
- """Sugar for emitting sequence of flag fields.
-
- Examples:
- flags("-foo", "-bar") ->
- flag: '-foo'
- flag: '-bar'
+ """Sugar for emitting sequence of flag fields.
- Args:
- *flags: values to be emitted to the command line
+ Examples:
+ flags("-foo", "-bar") ->
+ flag: '-foo'
+ flag: '-bar'
- Returns:
- a string to be placed into the CROSSTOOL
- """
- return [flag(f) for f in flags]
+ Args:
+ *flags: values to be emitted to the command line
+ Returns:
+ a string to be placed into the CROSSTOOL
+ """
+ return [flag(f) for f in flags]
def _to_proto_value(boolean):
- if boolean:
- return "true"
- else:
- return "false"
+ if boolean:
+ return "true"
+ else:
+ return "false"
diff --git a/tools/cpp/dummy_toolchain.bzl b/tools/cpp/dummy_toolchain.bzl
index c787f7315d..45c0285d23 100644
--- a/tools/cpp/dummy_toolchain.bzl
+++ b/tools/cpp/dummy_toolchain.bzl
@@ -14,10 +14,10 @@
# limitations under the License.
"""Skylark rule that stubs a toolchain."""
+
def _dummy_toolchain_impl(ctx):
- ctx = ctx # unused argument
- toolchain = platform_common.ToolchainInfo()
- return [toolchain]
+ ctx = ctx # unused argument
+ toolchain = platform_common.ToolchainInfo()
+ return [toolchain]
dummy_toolchain = rule(_dummy_toolchain_impl, attrs = {})
-
diff --git a/tools/cpp/lib_cc_configure.bzl b/tools/cpp/lib_cc_configure.bzl
index 899980638e..d7a46a3428 100644
--- a/tools/cpp/lib_cc_configure.bzl
+++ b/tools/cpp/lib_cc_configure.bzl
@@ -14,181 +14,185 @@
# limitations under the License.
"""Base library for configuring the C++ toolchain."""
-
def resolve_labels(repository_ctx, labels):
- """Resolves a collection of labels to their paths.
-
- Label resolution can cause the evaluation of Skylark functions to restart.
- For functions with side-effects (like the auto-configuration functions, which
- inspect the system and touch the file system), such restarts are costly.
- We cannot avoid the restarts, but we can minimize their penalty by resolving
- all labels upfront.
+ """Resolves a collection of labels to their paths.
- Among other things, doing less work on restarts can cut analysis times by
- several seconds and may also prevent tickling kernel conditions that cause
- build failures. See https://github.com/bazelbuild/bazel/issues/5196 for
- more details.
+ Label resolution can cause the evaluation of Skylark functions to restart.
+ For functions with side-effects (like the auto-configuration functions, which
+ inspect the system and touch the file system), such restarts are costly.
+ We cannot avoid the restarts, but we can minimize their penalty by resolving
+ all labels upfront.
- Args:
- repository_ctx: The context with which to resolve the labels.
- labels: Labels to be resolved expressed as a list of strings.
+ Among other things, doing less work on restarts can cut analysis times by
+ several seconds and may also prevent tickling kernel conditions that cause
+ build failures. See https://github.com/bazelbuild/bazel/issues/5196 for
+ more details.
- Returns:
- A dictionary with the labels as keys and their paths as values.
- """
- return dict([(label, repository_ctx.path(Label(label))) for label in labels])
+ Args:
+ repository_ctx: The context with which to resolve the labels.
+ labels: Labels to be resolved expressed as a list of strings.
+ Returns:
+ A dictionary with the labels as keys and their paths as values.
+ """
+ return dict([(label, repository_ctx.path(Label(label))) for label in labels])
def escape_string(arg):
- """Escape percent sign (%) in the string so it can appear in the Crosstool."""
- if arg != None:
- return str(arg).replace("%", "%%")
- else:
- return None
-
+ """Escape percent sign (%) in the string so it can appear in the Crosstool."""
+ if arg != None:
+ return str(arg).replace("%", "%%")
+ else:
+ return None
def split_escaped(string, delimiter):
- """Split string on the delimiter unless %-escaped.
-
- Examples:
- Basic usage:
- split_escaped("a:b:c", ":") -> [ "a", "b", "c" ]
-
- Delimeter that is not supposed to be splitten on has to be %-escaped:
- split_escaped("a%:b", ":") -> [ "a:b" ]
-
- Literal % can be represented by escaping it as %%:
- split_escaped("a%%b", ":") -> [ "a%b" ]
-
- Consecutive delimiters produce empty strings:
- split_escaped("a::b", ":") -> [ "a", "", "", "b" ]
-
- Args:
- string: The string to be split.
- delimiter: Non-empty string not containing %-sign to be used as a
- delimiter.
-
- Returns:
- A list of substrings.
- """
- if delimiter == "": fail("Delimiter cannot be empty")
- if delimiter.find("%") != -1: fail("Delimiter cannot contain %-sign")
-
- i = 0
- result = []
- accumulator = []
- length = len(string)
- delimiter_length = len(delimiter)
- # Iterate over the length of string since Skylark doesn't have while loops
- for _ in range(length):
- if i >= length:
- break
- if i + 2 <= length and string[i : i + 2] == "%%":
- accumulator.append("%")
- i += 2
- elif (i + 1 + delimiter_length <= length and
- string[i : i + 1 + delimiter_length] == "%" + delimiter):
- accumulator.append(delimiter)
- i += 1 + delimiter_length
- elif i + delimiter_length <= length and string[i : i + delimiter_length] == delimiter:
- result.append(''.join(accumulator))
- accumulator = []
- i += delimiter_length
- else:
- accumulator.append(string[i])
- i += 1
-
- # Append the last group still in accumulator
- result.append(''.join(accumulator))
- return result
-
+ """Split string on the delimiter unless %-escaped.
+
+ Examples:
+ Basic usage:
+ split_escaped("a:b:c", ":") -> [ "a", "b", "c" ]
+
+ Delimeter that is not supposed to be splitten on has to be %-escaped:
+ split_escaped("a%:b", ":") -> [ "a:b" ]
+
+ Literal % can be represented by escaping it as %%:
+ split_escaped("a%%b", ":") -> [ "a%b" ]
+
+ Consecutive delimiters produce empty strings:
+ split_escaped("a::b", ":") -> [ "a", "", "", "b" ]
+
+ Args:
+ string: The string to be split.
+ delimiter: Non-empty string not containing %-sign to be used as a
+ delimiter.
+
+ Returns:
+ A list of substrings.
+ """
+ if delimiter == "":
+ fail("Delimiter cannot be empty")
+ if delimiter.find("%") != -1:
+ fail("Delimiter cannot contain %-sign")
+
+ i = 0
+ result = []
+ accumulator = []
+ length = len(string)
+ delimiter_length = len(delimiter)
+
+ # Iterate over the length of string since Skylark doesn't have while loops
+ for _ in range(length):
+ if i >= length:
+ break
+ if i + 2 <= length and string[i:i + 2] == "%%":
+ accumulator.append("%")
+ i += 2
+ elif (i + 1 + delimiter_length <= length and
+ string[i:i + 1 + delimiter_length] == "%" + delimiter):
+ accumulator.append(delimiter)
+ i += 1 + delimiter_length
+ elif i + delimiter_length <= length and string[i:i + delimiter_length] == delimiter:
+ result.append("".join(accumulator))
+ accumulator = []
+ i += delimiter_length
+ else:
+ accumulator.append(string[i])
+ i += 1
+
+ # Append the last group still in accumulator
+ result.append("".join(accumulator))
+ return result
def auto_configure_fail(msg):
- """Output failure message when auto configuration fails."""
- red = "\033[0;31m"
- no_color = "\033[0m"
- fail("\n%sAuto-Configuration Error:%s %s\n" % (red, no_color, msg))
-
+ """Output failure message when auto configuration fails."""
+ red = "\033[0;31m"
+ no_color = "\033[0m"
+ fail("\n%sAuto-Configuration Error:%s %s\n" % (red, no_color, msg))
def auto_configure_warning(msg):
- """Output warning message during auto configuration."""
- yellow = "\033[1;33m"
- no_color = "\033[0m"
- print("\n%sAuto-Configuration Warning:%s %s\n" % (yellow, no_color, msg))
-
+ """Output warning message during auto configuration."""
+ yellow = "\033[1;33m"
+ no_color = "\033[0m"
+ print("\n%sAuto-Configuration Warning:%s %s\n" % (yellow, no_color, msg))
def get_env_var(repository_ctx, name, default = None, enable_warning = True):
- """Find an environment variable in system path. Doesn't %-escape the value!"""
- if name in repository_ctx.os.environ:
- return repository_ctx.os.environ[name]
- if default != None:
- if enable_warning:
- auto_configure_warning("'%s' environment variable is not set, using '%s' as default" % (name, default))
- return default
- auto_configure_fail("'%s' environment variable is not set" % name)
-
+ """Find an environment variable in system path. Doesn't %-escape the value!"""
+ if name in repository_ctx.os.environ:
+ return repository_ctx.os.environ[name]
+ if default != None:
+ if enable_warning:
+ auto_configure_warning("'%s' environment variable is not set, using '%s' as default" % (name, default))
+ return default
+ auto_configure_fail("'%s' environment variable is not set" % name)
def which(repository_ctx, cmd, default = None):
- """A wrapper around repository_ctx.which() to provide a fallback value. Doesn't %-escape the value!"""
- result = repository_ctx.which(cmd)
- return default if result == None else str(result)
-
+ """A wrapper around repository_ctx.which() to provide a fallback value. Doesn't %-escape the value!"""
+ result = repository_ctx.which(cmd)
+ return default if result == None else str(result)
def which_cmd(repository_ctx, cmd, default = None):
- """Find cmd in PATH using repository_ctx.which() and fail if cannot find it. Doesn't %-escape the cmd!"""
- result = repository_ctx.which(cmd)
- if result != None:
+ """Find cmd in PATH using repository_ctx.which() and fail if cannot find it. Doesn't %-escape the cmd!"""
+ result = repository_ctx.which(cmd)
+ if result != None:
+ return str(result)
+ path = get_env_var(repository_ctx, "PATH")
+ if default != None:
+ auto_configure_warning("Cannot find %s in PATH, using '%s' as default.\nPATH=%s" % (cmd, default, path))
+ return default
+ auto_configure_fail("Cannot find %s in PATH, please make sure %s is installed and add its directory in PATH.\nPATH=%s" % (cmd, cmd, path))
return str(result)
- path = get_env_var(repository_ctx, "PATH")
- if default != None:
- auto_configure_warning("Cannot find %s in PATH, using '%s' as default.\nPATH=%s" % (cmd, default, path))
- return default
- auto_configure_fail("Cannot find %s in PATH, please make sure %s is installed and add its directory in PATH.\nPATH=%s" % (cmd, cmd, path))
- return str(result)
-
-
-def execute(repository_ctx, command, environment = None,
- expect_failure = False):
- """Execute a command, return stdout if succeed and throw an error if it fails. Doesn't %-escape the result!"""
- if environment:
- result = repository_ctx.execute(command, environment = environment)
- else:
- result = repository_ctx.execute(command)
- if expect_failure != (result.return_code != 0):
- if expect_failure:
- auto_configure_fail(
- "expected failure, command %s, stderr: (%s)" % (
- command, result.stderr))
- else:
- auto_configure_fail(
- "non-zero exit code: %d, command %s, stderr: (%s)" % (
- result.return_code, command, result.stderr))
- stripped_stdout = result.stdout.strip()
- if not stripped_stdout:
- auto_configure_fail(
- "empty output from command %s, stderr: (%s)" % (command, result.stderr))
- return stripped_stdout
+def execute(
+ repository_ctx,
+ command,
+ environment = None,
+ expect_failure = False):
+ """Execute a command, return stdout if succeed and throw an error if it fails. Doesn't %-escape the result!"""
+ if environment:
+ result = repository_ctx.execute(command, environment = environment)
+ else:
+ result = repository_ctx.execute(command)
+ if expect_failure != (result.return_code != 0):
+ if expect_failure:
+ auto_configure_fail(
+ "expected failure, command %s, stderr: (%s)" % (
+ command,
+ result.stderr,
+ ),
+ )
+ else:
+ auto_configure_fail(
+ "non-zero exit code: %d, command %s, stderr: (%s)" % (
+ result.return_code,
+ command,
+ result.stderr,
+ ),
+ )
+ stripped_stdout = result.stdout.strip()
+ if not stripped_stdout:
+ auto_configure_fail(
+ "empty output from command %s, stderr: (%s)" % (command, result.stderr),
+ )
+ return stripped_stdout
def get_cpu_value(repository_ctx):
- """Compute the cpu_value based on the OS name. Doesn't %-escape the result!"""
- os_name = repository_ctx.os.name.lower()
- if os_name.startswith("mac os"):
- return "darwin"
- if os_name.find("freebsd") != -1:
- return "freebsd"
- if os_name.find("windows") != -1:
- return "x64_windows"
- # Use uname to figure out whether we are on x86_32 or x86_64
- result = repository_ctx.execute(["uname", "-m"])
- if result.stdout.strip() in ["power", "ppc64le", "ppc", "ppc64"]:
- return "ppc"
- if result.stdout.strip() in ["arm", "armv7l", "aarch64"]:
- return "arm"
- return "k8" if result.stdout.strip() in ["amd64", "x86_64", "x64"] else "piii"
-
+ """Compute the cpu_value based on the OS name. Doesn't %-escape the result!"""
+ os_name = repository_ctx.os.name.lower()
+ if os_name.startswith("mac os"):
+ return "darwin"
+ if os_name.find("freebsd") != -1:
+ return "freebsd"
+ if os_name.find("windows") != -1:
+ return "x64_windows"
+
+ # Use uname to figure out whether we are on x86_32 or x86_64
+ result = repository_ctx.execute(["uname", "-m"])
+ if result.stdout.strip() in ["power", "ppc64le", "ppc", "ppc64"]:
+ return "ppc"
+ if result.stdout.strip() in ["arm", "armv7l", "aarch64"]:
+ return "arm"
+ return "k8" if result.stdout.strip() in ["amd64", "x86_64", "x64"] else "piii"
def is_cc_configure_debug(repository_ctx):
- """Returns True if CC_CONFIGURE_DEBUG is set to 1."""
- env = repository_ctx.os.environ
- return "CC_CONFIGURE_DEBUG" in env and env["CC_CONFIGURE_DEBUG"] == "1"
+ """Returns True if CC_CONFIGURE_DEBUG is set to 1."""
+ env = repository_ctx.os.environ
+ return "CC_CONFIGURE_DEBUG" in env and env["CC_CONFIGURE_DEBUG"] == "1"
diff --git a/tools/cpp/osx_cc_configure.bzl b/tools/cpp/osx_cc_configure.bzl
index 6787141ac6..aef9e34776 100644
--- a/tools/cpp/osx_cc_configure.bzl
+++ b/tools/cpp/osx_cc_configure.bzl
@@ -15,125 +15,146 @@
"""Configuring the C++ toolchain on macOS."""
load("@bazel_tools//tools/osx:xcode_configure.bzl", "run_xcode_locator")
-
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"escape_string",
"resolve_labels",
)
-
load(
"@bazel_tools//tools/cpp:unix_cc_configure.bzl",
- "get_escaped_cxx_inc_directories",
- "get_env",
+ "configure_unix_toolchain",
"find_cc",
- "configure_unix_toolchain"
+ "get_env",
+ "get_escaped_cxx_inc_directories",
)
-
def _get_escaped_xcode_cxx_inc_directories(repository_ctx, cc, xcode_toolchains):
- """Compute the list of default C++ include paths on Xcode-enabled darwin.
-
- Args:
- repository_ctx: The repository context.
- cc: The default C++ compiler on the local system.
- xcode_toolchains: A list containing the xcode toolchains available
- Returns:
- include_paths: A list of builtin include paths.
- """
+ """Compute the list of default C++ include paths on Xcode-enabled darwin.
- # TODO(cparsons): Falling back to the default C++ compiler builtin include
- # paths shouldn't be unnecessary once all actions are using xcrun.
- include_dirs = get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc++")
- for toolchain in xcode_toolchains:
- include_dirs.append(escape_string(toolchain.developer_dir))
- return include_dirs
+ Args:
+ repository_ctx: The repository context.
+ cc: The default C++ compiler on the local system.
+ xcode_toolchains: A list containing the xcode toolchains available
+ Returns:
+ include_paths: A list of builtin include paths.
+ """
+ # TODO(cparsons): Falling back to the default C++ compiler builtin include
+ # paths shouldn't be unnecessary once all actions are using xcrun.
+ include_dirs = get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc++")
+ for toolchain in xcode_toolchains:
+ include_dirs.append(escape_string(toolchain.developer_dir))
+ return include_dirs
def configure_osx_toolchain(repository_ctx, overriden_tools):
- """Configure C++ toolchain on macOS."""
- paths = resolve_labels(repository_ctx, [
- "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl",
- "@bazel_tools//tools/objc:libtool.sh",
- "@bazel_tools//tools/objc:make_hashed_objlist.py",
- "@bazel_tools//tools/objc:xcrunwrapper.sh",
- "@bazel_tools//tools/osx/crosstool:BUILD.tpl",
- "@bazel_tools//tools/osx/crosstool:CROSSTOOL.tpl",
- "@bazel_tools//tools/osx/crosstool:osx_archs.bzl",
- "@bazel_tools//tools/osx/crosstool:wrapped_ar.tpl",
- "@bazel_tools//tools/osx/crosstool:wrapped_clang.cc",
- "@bazel_tools//tools/osx/crosstool:wrapped_clang.tpl",
- "@bazel_tools//tools/osx/crosstool:wrapped_clang_pp.tpl",
- "@bazel_tools//tools/osx:xcode_locator.m",
- ])
+ """Configure C++ toolchain on macOS."""
+ paths = resolve_labels(repository_ctx, [
+ "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl",
+ "@bazel_tools//tools/objc:libtool.sh",
+ "@bazel_tools//tools/objc:make_hashed_objlist.py",
+ "@bazel_tools//tools/objc:xcrunwrapper.sh",
+ "@bazel_tools//tools/osx/crosstool:BUILD.tpl",
+ "@bazel_tools//tools/osx/crosstool:CROSSTOOL.tpl",
+ "@bazel_tools//tools/osx/crosstool:osx_archs.bzl",
+ "@bazel_tools//tools/osx/crosstool:wrapped_ar.tpl",
+ "@bazel_tools//tools/osx/crosstool:wrapped_clang.cc",
+ "@bazel_tools//tools/osx/crosstool:wrapped_clang.tpl",
+ "@bazel_tools//tools/osx/crosstool:wrapped_clang_pp.tpl",
+ "@bazel_tools//tools/osx:xcode_locator.m",
+ ])
- xcode_toolchains = []
- (xcode_toolchains, xcodeloc_err) = run_xcode_locator(
- repository_ctx,
- paths["@bazel_tools//tools/osx:xcode_locator.m"])
- if xcode_toolchains:
- cc = find_cc(repository_ctx, overriden_tools = {})
- repository_ctx.template(
- "cc_wrapper.sh",
- paths["@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl"],
- {
- "%{cc}": escape_string(str(cc)),
- "%{env}": escape_string(get_env(repository_ctx)),
- })
- repository_ctx.symlink(
- paths["@bazel_tools//tools/objc:xcrunwrapper.sh"], "xcrunwrapper.sh")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/objc:libtool.sh"], "libtool")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/objc:make_hashed_objlist.py"],
- "make_hashed_objlist.py")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/osx/crosstool:wrapped_ar.tpl"],
- "wrapped_ar")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/osx/crosstool:BUILD.tpl"],
- "BUILD")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/osx/crosstool:osx_archs.bzl"],
- "osx_archs.bzl")
+ xcode_toolchains = []
+ (xcode_toolchains, xcodeloc_err) = run_xcode_locator(
+ repository_ctx,
+ paths["@bazel_tools//tools/osx:xcode_locator.m"],
+ )
+ if xcode_toolchains:
+ cc = find_cc(repository_ctx, overriden_tools = {})
+ repository_ctx.template(
+ "cc_wrapper.sh",
+ paths["@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl"],
+ {
+ "%{cc}": escape_string(str(cc)),
+ "%{env}": escape_string(get_env(repository_ctx)),
+ },
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/objc:xcrunwrapper.sh"],
+ "xcrunwrapper.sh",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/objc:libtool.sh"],
+ "libtool",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/objc:make_hashed_objlist.py"],
+ "make_hashed_objlist.py",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/osx/crosstool:wrapped_ar.tpl"],
+ "wrapped_ar",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/osx/crosstool:BUILD.tpl"],
+ "BUILD",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/osx/crosstool:osx_archs.bzl"],
+ "osx_archs.bzl",
+ )
- wrapped_clang_src_path = str(repository_ctx.path(
- paths["@bazel_tools//tools/osx/crosstool:wrapped_clang.cc"]))
- xcrun_result = repository_ctx.execute(["env", "-i", "xcrun", "clang", "-std=c++11", "-lc++",
- "-o", "wrapped_clang", wrapped_clang_src_path], 30)
- if (xcrun_result.return_code == 0):
- repository_ctx.symlink("wrapped_clang", "wrapped_clang_pp")
- else:
- # If for some reason wrapped_clang couldn't be built, fall back to
- # using the bash scripts that don't support dSYM generation. This is to
- # avoid completely breaking a build. This should be removed after a whole
- # release cycle to keep from increasing code maintenance, if we haven't
- # received any issues as requested below.
- error_msg = (
- "return code {code}, stderr: {err}, stdout: {out}").format(
- code=xcrun_result.return_code,
- err=xcrun_result.stderr,
- out=xcrun_result.stdout)
- print("wrapped_clang failed to generate. This shouldn't cause " +
- "problems, but please file an issue at " +
- "https://github.com/bazelbuild/bazel/issues with the following:\n" +
- error_msg)
- repository_ctx.symlink(
- paths["@bazel_tools//tools/osx/crosstool:wrapped_clang.tpl"],
- "wrapped_clang")
- repository_ctx.symlink(
- paths["@bazel_tools//tools/osx/crosstool:wrapped_clang_pp.tpl"],
- "wrapped_clang_pp")
+ wrapped_clang_src_path = str(repository_ctx.path(
+ paths["@bazel_tools//tools/osx/crosstool:wrapped_clang.cc"],
+ ))
+ xcrun_result = repository_ctx.execute([
+ "env",
+ "-i",
+ "xcrun",
+ "clang",
+ "-std=c++11",
+ "-lc++",
+ "-o",
+ "wrapped_clang",
+ wrapped_clang_src_path,
+ ], 30)
+ if (xcrun_result.return_code == 0):
+ repository_ctx.symlink("wrapped_clang", "wrapped_clang_pp")
+ else:
+ # If for some reason wrapped_clang couldn't be built, fall back to
+ # using the bash scripts that don't support dSYM generation. This is to
+ # avoid completely breaking a build. This should be removed after a whole
+ # release cycle to keep from increasing code maintenance, if we haven't
+ # received any issues as requested below.
+ error_msg = (
+ "return code {code}, stderr: {err}, stdout: {out}"
+ ).format(
+ code = xcrun_result.return_code,
+ err = xcrun_result.stderr,
+ out = xcrun_result.stdout,
+ )
+ print("wrapped_clang failed to generate. This shouldn't cause " +
+ "problems, but please file an issue at " +
+ "https://github.com/bazelbuild/bazel/issues with the following:\n" +
+ error_msg)
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/osx/crosstool:wrapped_clang.tpl"],
+ "wrapped_clang",
+ )
+ repository_ctx.symlink(
+ paths["@bazel_tools//tools/osx/crosstool:wrapped_clang_pp.tpl"],
+ "wrapped_clang_pp",
+ )
- escaped_include_paths = _get_escaped_xcode_cxx_inc_directories(repository_ctx, cc, xcode_toolchains)
- escaped_cxx_include_directories = []
- for path in escaped_include_paths:
- escaped_cxx_include_directories.append(("cxx_builtin_include_directory: \"%s\"" % path))
- if xcodeloc_err:
- escaped_cxx_include_directories.append("# Error: " + xcodeloc_err + "\n")
- repository_ctx.template(
- "CROSSTOOL",
- paths["@bazel_tools//tools/osx/crosstool:CROSSTOOL.tpl"],
- {"%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories)})
- else:
- configure_unix_toolchain(repository_ctx, cpu_value = "darwin", overriden_tools = overriden_tools)
+ escaped_include_paths = _get_escaped_xcode_cxx_inc_directories(repository_ctx, cc, xcode_toolchains)
+ escaped_cxx_include_directories = []
+ for path in escaped_include_paths:
+ escaped_cxx_include_directories.append(("cxx_builtin_include_directory: \"%s\"" % path))
+ if xcodeloc_err:
+ escaped_cxx_include_directories.append("# Error: " + xcodeloc_err + "\n")
+ repository_ctx.template(
+ "CROSSTOOL",
+ paths["@bazel_tools//tools/osx/crosstool:CROSSTOOL.tpl"],
+ {"%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories)},
+ )
+ else:
+ configure_unix_toolchain(repository_ctx, cpu_value = "darwin", overriden_tools = overriden_tools)
diff --git a/tools/cpp/toolchain_utils.bzl b/tools/cpp/toolchain_utils.bzl
index 5ab9ade50c..e879299cd1 100644
--- a/tools/cpp/toolchain_utils.bzl
+++ b/tools/cpp/toolchain_utils.bzl
@@ -21,20 +21,20 @@ the CppConfiguration.
"""
def find_cpp_toolchain(ctx):
- """
- Finds the c++ toolchain.
+ """
+ Finds the c++ toolchain.
- If the c++ toolchain is in use, returns it. Otherwise, returns a c++
- toolchain derived from legacy toolchain selection.
+ If the c++ toolchain is in use, returns it. Otherwise, returns a c++
+ toolchain derived from legacy toolchain selection.
- Args:
- ctx: The rule context for which to find a toolchain.
+ Args:
+ ctx: The rule context for which to find a toolchain.
- Returns:
- A CcToolchainProvider.
- """
+ Returns:
+ A CcToolchainProvider.
+ """
- if Label("@bazel_tools//tools/cpp:toolchain_type") in ctx.fragments.platform.enabled_toolchain_types:
- return ctx.toolchains["@bazel_tools//tools/cpp:toolchain_type"]
- else:
- return ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
+ if Label("@bazel_tools//tools/cpp:toolchain_type") in ctx.fragments.platform.enabled_toolchain_types:
+ return ctx.toolchains["@bazel_tools//tools/cpp:toolchain_type"]
+ else:
+ return ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
diff --git a/tools/cpp/unix_cc_configure.bzl b/tools/cpp/unix_cc_configure.bzl
index d4b25c8776..82cb60d5b9 100644
--- a/tools/cpp/unix_cc_configure.bzl
+++ b/tools/cpp/unix_cc_configure.bzl
@@ -14,11 +14,10 @@
# limitations under the License.
"""Configuring the C++ toolchain on Unix platforms."""
-
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
- "auto_configure_warning",
"auto_configure_fail",
+ "auto_configure_warning",
"escape_string",
"get_env_var",
"resolve_labels",
@@ -27,349 +26,371 @@ load(
)
def _uniq(iterable):
- """Remove duplicates from a list."""
+ """Remove duplicates from a list."""
- unique_elements = {element: None for element in iterable}
- return unique_elements.keys()
+ unique_elements = {element: None for element in iterable}
+ return unique_elements.keys()
def _prepare_include_path(repo_ctx, path):
- """Resolve and sanitize include path before outputting it into the crosstool.
+ """Resolve and sanitize include path before outputting it into the crosstool.
- Args:
- repo_ctx: repository_ctx object.
- path: an include path to be sanitized.
+ Args:
+ repo_ctx: repository_ctx object.
+ path: an include path to be sanitized.
- Returns:
- Sanitized include path that can be written to the crosstoot. Resulting path
- is absolute if it is outside the repository and relative otherwise.
- """
+ Returns:
+ Sanitized include path that can be written to the crosstoot. Resulting path
+ is absolute if it is outside the repository and relative otherwise.
+ """
- repo_root = str(repo_ctx.path("."))
- # We're on UNIX, so the path delimiter is '/'.
- repo_root += "/"
- path = str(repo_ctx.path(path))
- if path.startswith(repo_root):
- return escape_string(path[len(repo_root):])
- return escape_string(path)
+ repo_root = str(repo_ctx.path("."))
+
+ # We're on UNIX, so the path delimiter is '/'.
+ repo_root += "/"
+ path = str(repo_ctx.path(path))
+ if path.startswith(repo_root):
+ return escape_string(path[len(repo_root):])
+ return escape_string(path)
def _get_value(it):
- """Convert `it` in serialized protobuf format."""
- if type(it) == "int":
- return str(it)
- elif type(it) == "bool":
- return "true" if it else "false"
- else:
- return "\"%s\"" % it
-
-
-def _build_crosstool(d, prefix=" "):
- """Convert `d` to a string version of a CROSSTOOL file content."""
- lines = []
- for k in d:
- if type(d[k]) == "list":
- for it in d[k]:
- lines.append("%s%s: %s" % (prefix, k, _get_value(it)))
+ """Convert `it` in serialized protobuf format."""
+ if type(it) == "int":
+ return str(it)
+ elif type(it) == "bool":
+ return "true" if it else "false"
else:
- lines.append("%s%s: %s" % (prefix, k, _get_value(d[k])))
- return "\n".join(lines)
-
+ return "\"%s\"" % it
+
+def _build_crosstool(d, prefix = " "):
+ """Convert `d` to a string version of a CROSSTOOL file content."""
+ lines = []
+ for k in d:
+ if type(d[k]) == "list":
+ for it in d[k]:
+ lines.append("%s%s: %s" % (prefix, k, _get_value(it)))
+ else:
+ lines.append("%s%s: %s" % (prefix, k, _get_value(d[k])))
+ return "\n".join(lines)
def _build_tool_path(d):
- """Build the list of %-escaped tool_path for the CROSSTOOL file."""
- lines = []
- for k in d:
- lines.append(" tool_path {name: \"%s\" path: \"%s\" }" % (k, escape_string(d[k])))
- return "\n".join(lines)
+ """Build the list of %-escaped tool_path for the CROSSTOOL file."""
+ lines = []
+ for k in d:
+ lines.append(" tool_path {name: \"%s\" path: \"%s\" }" % (k, escape_string(d[k])))
+ return "\n".join(lines)
def _find_tool(repository_ctx, tool, overriden_tools):
- """Find a tool for repository, taking overriden tools into account."""
- if tool in overriden_tools:
- return overriden_tools[tool]
- return which(repository_ctx, tool, "/usr/bin/" + tool)
+ """Find a tool for repository, taking overriden tools into account."""
+ if tool in overriden_tools:
+ return overriden_tools[tool]
+ return which(repository_ctx, tool, "/usr/bin/" + tool)
def _get_tool_paths(repository_ctx, overriden_tools):
- """Compute the path to the various tools. Doesn't %-escape the result!"""
- return dict({k: _find_tool(repository_ctx, k, overriden_tools)
- for k in [
- "ar",
- "ld",
- "cpp",
- "gcc",
- "dwp",
- "gcov",
- "nm",
- "objcopy",
- "objdump",
- "strip",
- ]}.items())
+ """Compute the path to the various tools. Doesn't %-escape the result!"""
+ return dict({
+ k: _find_tool(repository_ctx, k, overriden_tools)
+ for k in [
+ "ar",
+ "ld",
+ "cpp",
+ "gcc",
+ "dwp",
+ "gcov",
+ "nm",
+ "objcopy",
+ "objdump",
+ "strip",
+ ]
+ }.items())
def _escaped_cplus_include_paths(repository_ctx):
- """Use ${CPLUS_INCLUDE_PATH} to compute the %-escaped list of flags for cxxflag."""
- if "CPLUS_INCLUDE_PATH" in repository_ctx.os.environ:
- result = []
- for p in repository_ctx.os.environ["CPLUS_INCLUDE_PATH"].split(":"):
- p = escape_string(str(repository_ctx.path(p))) # Normalize the path
- result.append("-I" + p)
- return result
- else:
- return []
-
+ """Use ${CPLUS_INCLUDE_PATH} to compute the %-escaped list of flags for cxxflag."""
+ if "CPLUS_INCLUDE_PATH" in repository_ctx.os.environ:
+ result = []
+ for p in repository_ctx.os.environ["CPLUS_INCLUDE_PATH"].split(":"):
+ p = escape_string(str(repository_ctx.path(p))) # Normalize the path
+ result.append("-I" + p)
+ return result
+ else:
+ return []
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
-_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
+_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
- """Convert path returned by cc -E xc++ in a complete path. Doesn't %-escape the path!"""
- path = path.strip()
- if path.endswith(_OSX_FRAMEWORK_SUFFIX):
- path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
- return path
-
+ """Convert path returned by cc -E xc++ in a complete path. Doesn't %-escape the path!"""
+ path = path.strip()
+ if path.endswith(_OSX_FRAMEWORK_SUFFIX):
+ path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
+ return path
def get_escaped_cxx_inc_directories(repository_ctx, cc, lang_flag, additional_flags = []):
- """Compute the list of default %-escaped C++ include directories."""
- result = repository_ctx.execute([cc, "-E", lang_flag, "-", "-v"] + additional_flags)
- index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
- if index1 == -1:
- return []
- index1 = result.stderr.find("\n", index1)
- if index1 == -1:
- return []
- index2 = result.stderr.rfind("\n ")
- if index2 == -1 or index2 < index1:
- return []
- index2 = result.stderr.find("\n", index2 + 1)
- if index2 == -1:
- inc_dirs = result.stderr[index1 + 1:]
- else:
- inc_dirs = result.stderr[index1 + 1:index2].strip()
-
- return [_prepare_include_path(repository_ctx, _cxx_inc_convert(p))
- for p in inc_dirs.split("\n")]
+ """Compute the list of default %-escaped C++ include directories."""
+ result = repository_ctx.execute([cc, "-E", lang_flag, "-", "-v"] + additional_flags)
+ index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
+ if index1 == -1:
+ return []
+ index1 = result.stderr.find("\n", index1)
+ if index1 == -1:
+ return []
+ index2 = result.stderr.rfind("\n ")
+ if index2 == -1 or index2 < index1:
+ return []
+ index2 = result.stderr.find("\n", index2 + 1)
+ if index2 == -1:
+ inc_dirs = result.stderr[index1 + 1:]
+ else:
+ inc_dirs = result.stderr[index1 + 1:index2].strip()
+ return [
+ _prepare_include_path(repository_ctx, _cxx_inc_convert(p))
+ for p in inc_dirs.split("\n")
+ ]
def _is_option_supported(repository_ctx, cc, option):
- """Checks that `option` is supported by the C compiler. Doesn't %-escape the option."""
- result = repository_ctx.execute([
- cc,
- option,
- "-o",
- "/dev/null",
- "-c",
- str(repository_ctx.path("tools/cpp/empty.cc"))
- ])
- return result.stderr.find(option) == -1
-
+ """Checks that `option` is supported by the C compiler. Doesn't %-escape the option."""
+ result = repository_ctx.execute([
+ cc,
+ option,
+ "-o",
+ "/dev/null",
+ "-c",
+ str(repository_ctx.path("tools/cpp/empty.cc")),
+ ])
+ return result.stderr.find(option) == -1
def _add_option_if_supported(repository_ctx, cc, option):
- """Returns `[option]` if supported, `[]` otherwise. Doesn't %-escape the option."""
- return [option] if _is_option_supported(repository_ctx, cc, option) else []
-
+ """Returns `[option]` if supported, `[]` otherwise. Doesn't %-escape the option."""
+ return [option] if _is_option_supported(repository_ctx, cc, option) else []
def _is_gold_supported(repository_ctx, cc):
- """Checks that `gold` is supported by the C compiler."""
- result = repository_ctx.execute([
- cc,
- "-fuse-ld=gold",
- "-o",
- "/dev/null",
- # Some macos clang versions don't fail when setting -fuse-ld=gold, adding
- # these lines to force it to. This also means that we will not detect
- # gold when only a very old (year 2010 and older) is present.
- "-Wl,--start-lib",
- "-Wl,--end-lib",
- str(repository_ctx.path("tools/cpp/empty.cc"))
- ])
- return result.return_code == 0
+ """Checks that `gold` is supported by the C compiler."""
+ result = repository_ctx.execute([
+ cc,
+ "-fuse-ld=gold",
+ "-o",
+ "/dev/null",
+ # Some macos clang versions don't fail when setting -fuse-ld=gold, adding
+ # these lines to force it to. This also means that we will not detect
+ # gold when only a very old (year 2010 and older) is present.
+ "-Wl,--start-lib",
+ "-Wl,--end-lib",
+ str(repository_ctx.path("tools/cpp/empty.cc")),
+ ])
+ return result.return_code == 0
def _get_no_canonical_prefixes_opt(repository_ctx, cc):
- # If the compiler sometimes rewrites paths in the .d files without symlinks
- # (ie when they're shorter), it confuses Bazel's logic for verifying all
- # #included header files are listed as inputs to the action.
-
- # The '-fno-canonical-system-headers' should be enough, but clang does not
- # support it, so we also try '-no-canonical-prefixes' if first option does
- # not work.
- opt = _add_option_if_supported(repository_ctx, cc,
- "-fno-canonical-system-headers")
- if len(opt) == 0:
- return _add_option_if_supported(repository_ctx, cc,
- "-no-canonical-prefixes")
- return opt
+ # If the compiler sometimes rewrites paths in the .d files without symlinks
+ # (ie when they're shorter), it confuses Bazel's logic for verifying all
+ # #included header files are listed as inputs to the action.
+
+ # The '-fno-canonical-system-headers' should be enough, but clang does not
+ # support it, so we also try '-no-canonical-prefixes' if first option does
+ # not work.
+ opt = _add_option_if_supported(
+ repository_ctx,
+ cc,
+ "-fno-canonical-system-headers",
+ )
+ if len(opt) == 0:
+ return _add_option_if_supported(
+ repository_ctx,
+ cc,
+ "-no-canonical-prefixes",
+ )
+ return opt
def _crosstool_content(repository_ctx, cc, cpu_value, darwin):
- """Return the content for the CROSSTOOL file, in a dictionary."""
- supports_gold_linker = _is_gold_supported(repository_ctx, cc)
- cc_path = repository_ctx.path(cc)
- if not str(cc_path).startswith(str(repository_ctx.path(".")) + '/'):
- # cc is outside the repository, set -B
- bin_search_flag = ["-B" + escape_string(str(cc_path.dirname))]
- else:
- # cc is inside the repository, don't set -B.
- bin_search_flag = []
-
- escaped_cxx_include_directories = _uniq(
- get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc") +
- get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc++") +
- get_escaped_cxx_inc_directories(
- repository_ctx, cc, "-xc", _get_no_canonical_prefixes_opt(repository_ctx, cc)) +
- get_escaped_cxx_inc_directories(
- repository_ctx, cc, "-xc++", _get_no_canonical_prefixes_opt(repository_ctx, cc)))
- return {
- "abi_version": escape_string(get_env_var(repository_ctx, "ABI_VERSION", "local", False)),
- "abi_libc_version": escape_string(get_env_var(repository_ctx, "ABI_LIBC_VERSION", "local", False)),
- "builtin_sysroot": "",
- "compiler": escape_string(get_env_var(repository_ctx, "BAZEL_COMPILER", "compiler", False)),
- "host_system_name": escape_string(get_env_var(repository_ctx, "BAZEL_HOST_SYSTEM", "local", False)),
- "needsPic": True,
- "supports_gold_linker": supports_gold_linker,
- "supports_incremental_linker": False,
- "supports_fission": False,
- "supports_interface_shared_objects": False,
- "supports_normalizing_ar": False,
- "supports_start_end_lib": supports_gold_linker,
- "target_libc": "macosx" if darwin else escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_LIBC", "local", False)),
- "target_cpu": escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_CPU", cpu_value, False)),
- "target_system_name": escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_SYSTEM", "local", False)),
- "cxx_flag": [
- "-std=c++0x",
- ] + _escaped_cplus_include_paths(repository_ctx),
- "linker_flag": (
- ["-fuse-ld=gold"] if supports_gold_linker else []
- ) + _add_option_if_supported(
- repository_ctx, cc, "-Wl,-no-as-needed"
- ) + _add_option_if_supported(
- repository_ctx, cc, "-Wl,-z,relro,-z,now"
- ) + ([
- "-undefined",
- "dynamic_lookup",
- "-headerpad_max_install_names",
- ] if darwin else bin_search_flag + [
- # Always have -B/usr/bin, see https://github.com/bazelbuild/bazel/issues/760.
- "-B/usr/bin",
- # Gold linker only? Can we enable this by default?
- # "-Wl,--warn-execstack",
- # "-Wl,--detect-odr-violations"
- ] + _add_option_if_supported(
- # Have gcc return the exit code from ld.
- repository_ctx, cc, "-pass-exit-codes")
- ) + split_escaped(
- get_env_var(repository_ctx, "BAZEL_LINKOPTS", "-lstdc++:-lm", False), ":"),
- "cxx_builtin_include_directory": escaped_cxx_include_directories,
- "objcopy_embed_flag": ["-I", "binary"],
- "unfiltered_cxx_flag":
- _get_no_canonical_prefixes_opt(repository_ctx, cc) + [
- # Make C++ compilation deterministic. Use linkstamping instead of these
- # compiler symbols.
- "-Wno-builtin-macro-redefined",
- "-D__DATE__=\\\"redacted\\\"",
- "-D__TIMESTAMP__=\\\"redacted\\\"",
- "-D__TIME__=\\\"redacted\\\""
- ],
- "compiler_flag": [
- # Security hardening requires optimization.
- # We need to undef it as some distributions now have it enabled by default.
- "-U_FORTIFY_SOURCE",
- "-fstack-protector",
- # All warnings are enabled. Maybe enable -Werror as well?
- "-Wall",
- # Enable a few more warnings that aren't part of -Wall.
- ] + (["-Wthread-safety", "-Wself-assign"] if darwin else bin_search_flag + [
- # Always have -B/usr/bin, see https://github.com/bazelbuild/bazel/issues/760.
- "-B/usr/bin",
- ]) + (
- # Disable problematic warnings.
- _add_option_if_supported(repository_ctx, cc, "-Wunused-but-set-parameter") +
- # has false positives
- _add_option_if_supported(repository_ctx, cc, "-Wno-free-nonheap-object") +
- # Enable coloring even if there's no attached terminal. Bazel removes the
- # escape sequences if --nocolor is specified.
- _add_option_if_supported(repository_ctx, cc, "-fcolor-diagnostics")) + [
- # Keep stack frames for debugging, even in opt mode.
- "-fno-omit-frame-pointer",
- ],
- }
-
+ """Return the content for the CROSSTOOL file, in a dictionary."""
+ supports_gold_linker = _is_gold_supported(repository_ctx, cc)
+ cc_path = repository_ctx.path(cc)
+ if not str(cc_path).startswith(str(repository_ctx.path(".")) + "/"):
+ # cc is outside the repository, set -B
+ bin_search_flag = ["-B" + escape_string(str(cc_path.dirname))]
+ else:
+ # cc is inside the repository, don't set -B.
+ bin_search_flag = []
+
+ escaped_cxx_include_directories = _uniq(
+ get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc") +
+ get_escaped_cxx_inc_directories(repository_ctx, cc, "-xc++") +
+ get_escaped_cxx_inc_directories(
+ repository_ctx,
+ cc,
+ "-xc",
+ _get_no_canonical_prefixes_opt(repository_ctx, cc),
+ ) +
+ get_escaped_cxx_inc_directories(
+ repository_ctx,
+ cc,
+ "-xc++",
+ _get_no_canonical_prefixes_opt(repository_ctx, cc),
+ ),
+ )
+ return {
+ "abi_version": escape_string(get_env_var(repository_ctx, "ABI_VERSION", "local", False)),
+ "abi_libc_version": escape_string(get_env_var(repository_ctx, "ABI_LIBC_VERSION", "local", False)),
+ "builtin_sysroot": "",
+ "compiler": escape_string(get_env_var(repository_ctx, "BAZEL_COMPILER", "compiler", False)),
+ "host_system_name": escape_string(get_env_var(repository_ctx, "BAZEL_HOST_SYSTEM", "local", False)),
+ "needsPic": True,
+ "supports_gold_linker": supports_gold_linker,
+ "supports_incremental_linker": False,
+ "supports_fission": False,
+ "supports_interface_shared_objects": False,
+ "supports_normalizing_ar": False,
+ "supports_start_end_lib": supports_gold_linker,
+ "target_libc": "macosx" if darwin else escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_LIBC", "local", False)),
+ "target_cpu": escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_CPU", cpu_value, False)),
+ "target_system_name": escape_string(get_env_var(repository_ctx, "BAZEL_TARGET_SYSTEM", "local", False)),
+ "cxx_flag": [
+ "-std=c++0x",
+ ] + _escaped_cplus_include_paths(repository_ctx),
+ "linker_flag": (
+ ["-fuse-ld=gold"] if supports_gold_linker else []
+ ) + _add_option_if_supported(
+ repository_ctx,
+ cc,
+ "-Wl,-no-as-needed",
+ ) + _add_option_if_supported(
+ repository_ctx,
+ cc,
+ "-Wl,-z,relro,-z,now",
+ ) + (
+ [
+ "-undefined",
+ "dynamic_lookup",
+ "-headerpad_max_install_names",
+ ] if darwin else bin_search_flag + [
+ # Always have -B/usr/bin, see https://github.com/bazelbuild/bazel/issues/760.
+ "-B/usr/bin",
+ # Gold linker only? Can we enable this by default?
+ # "-Wl,--warn-execstack",
+ # "-Wl,--detect-odr-violations"
+ ] + _add_option_if_supported(
+ # Have gcc return the exit code from ld.
+ repository_ctx,
+ cc,
+ "-pass-exit-codes",
+ )
+ ) + split_escaped(
+ get_env_var(repository_ctx, "BAZEL_LINKOPTS", "-lstdc++:-lm", False),
+ ":",
+ ),
+ "cxx_builtin_include_directory": escaped_cxx_include_directories,
+ "objcopy_embed_flag": ["-I", "binary"],
+ "unfiltered_cxx_flag": _get_no_canonical_prefixes_opt(repository_ctx, cc) + [
+ # Make C++ compilation deterministic. Use linkstamping instead of these
+ # compiler symbols.
+ "-Wno-builtin-macro-redefined",
+ "-D__DATE__=\\\"redacted\\\"",
+ "-D__TIMESTAMP__=\\\"redacted\\\"",
+ "-D__TIME__=\\\"redacted\\\"",
+ ],
+ "compiler_flag": [
+ # Security hardening requires optimization.
+ # We need to undef it as some distributions now have it enabled by default.
+ "-U_FORTIFY_SOURCE",
+ "-fstack-protector",
+ # All warnings are enabled. Maybe enable -Werror as well?
+ "-Wall",
+ # Enable a few more warnings that aren't part of -Wall.
+ ] + (["-Wthread-safety", "-Wself-assign"] if darwin else bin_search_flag + [
+ # Always have -B/usr/bin, see https://github.com/bazelbuild/bazel/issues/760.
+ "-B/usr/bin",
+ ]) + (
+ # Disable problematic warnings.
+ _add_option_if_supported(repository_ctx, cc, "-Wunused-but-set-parameter") +
+ # has false positives
+ _add_option_if_supported(repository_ctx, cc, "-Wno-free-nonheap-object") +
+ # Enable coloring even if there's no attached terminal. Bazel removes the
+ # escape sequences if --nocolor is specified.
+ _add_option_if_supported(repository_ctx, cc, "-fcolor-diagnostics")
+ ) + [
+ # Keep stack frames for debugging, even in opt mode.
+ "-fno-omit-frame-pointer",
+ ],
+ }
def _opt_content(darwin):
- """Return the content of the opt specific section of the CROSSTOOL file."""
- return {
- "compiler_flag": [
- # No debug symbols.
- # Maybe we should enable https://gcc.gnu.org/wiki/DebugFission for opt or
- # even generally? However, that can't happen here, as it requires special
- # handling in Bazel.
- "-g0",
-
- # Conservative choice for -O
- # -O3 can increase binary size and even slow down the resulting binaries.
- # Profile first and / or use FDO if you need better performance than this.
- "-O2",
-
- # Security hardening on by default.
- # Conservative choice; -D_FORTIFY_SOURCE=2 may be unsafe in some cases.
- "-D_FORTIFY_SOURCE=1",
-
- # Disable assertions
- "-DNDEBUG",
-
- # Removal of unused code and data at link time (can this increase binary size in some cases?).
- "-ffunction-sections",
- "-fdata-sections"
- ],
- "linker_flag": [] if darwin else ["-Wl,--gc-sections"]
- }
-
+ """Return the content of the opt specific section of the CROSSTOOL file."""
+ return {
+ "compiler_flag": [
+ # No debug symbols.
+ # Maybe we should enable https://gcc.gnu.org/wiki/DebugFission for opt or
+ # even generally? However, that can't happen here, as it requires special
+ # handling in Bazel.
+ "-g0",
+
+ # Conservative choice for -O
+ # -O3 can increase binary size and even slow down the resulting binaries.
+ # Profile first and / or use FDO if you need better performance than this.
+ "-O2",
+
+ # Security hardening on by default.
+ # Conservative choice; -D_FORTIFY_SOURCE=2 may be unsafe in some cases.
+ "-D_FORTIFY_SOURCE=1",
+
+ # Disable assertions
+ "-DNDEBUG",
+
+ # Removal of unused code and data at link time (can this increase binary size in some cases?).
+ "-ffunction-sections",
+ "-fdata-sections",
+ ],
+ "linker_flag": [] if darwin else ["-Wl,--gc-sections"],
+ }
def _dbg_content():
- """Return the content of the dbg specific section of the CROSSTOOL file."""
- # Enable debug symbols
- return {"compiler_flag": "-g"}
+ """Return the content of the dbg specific section of the CROSSTOOL file."""
+ # Enable debug symbols
+ return {"compiler_flag": "-g"}
def get_env(repository_ctx):
- """Convert the environment in a list of export if in Homebrew. Doesn't %-escape the result!"""
- env = repository_ctx.os.environ
- if "HOMEBREW_RUBY_PATH" in env:
- return "\n".join([
- "export %s='%s'" % (k, env[k].replace("'", "'\\''"))
- for k in env
- if k != "_" and k.find(".") == -1
- ])
- else:
- return ""
-
+ """Convert the environment in a list of export if in Homebrew. Doesn't %-escape the result!"""
+ env = repository_ctx.os.environ
+ if "HOMEBREW_RUBY_PATH" in env:
+ return "\n".join([
+ "export %s='%s'" % (k, env[k].replace("'", "'\\''"))
+ for k in env
+ if k != "_" and k.find(".") == -1
+ ])
+ else:
+ return ""
def _coverage_feature(repository_ctx, darwin):
- use_llvm_cov = "1" == get_env_var(
- repository_ctx,
- "BAZEL_USE_LLVM_NATIVE_COVERAGE",
- default="0",
- enable_warning=False)
- if darwin or use_llvm_cov:
- compile_flags = """flag_group {
+ use_llvm_cov = "1" == get_env_var(
+ repository_ctx,
+ "BAZEL_USE_LLVM_NATIVE_COVERAGE",
+ default = "0",
+ enable_warning = False,
+ )
+ if darwin or use_llvm_cov:
+ compile_flags = """flag_group {
flag: '-fprofile-instr-generate'
flag: '-fcoverage-mapping'
}"""
- link_flags = """flag_group {
+ link_flags = """flag_group {
flag: '-fprofile-instr-generate'
}"""
- else:
- # gcc requires --coverage being passed for compilation and linking
- # https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
- compile_flags = """flag_group {
+ else:
+ # gcc requires --coverage being passed for compilation and linking
+ # https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
+ compile_flags = """flag_group {
flag: '--coverage'
}"""
- link_flags = """flag_group {
+ link_flags = """flag_group {
flag: '--coverage'
}"""
- # Note that we also set --coverage for c++-link-nodeps-dynamic-library. The
- # generated code contains references to gcov symbols, and the dynamic linker
- # can't resolve them unless the library is linked against gcov.
- return """
+
+ # Note that we also set --coverage for c++-link-nodeps-dynamic-library. The
+ # generated code contains references to gcov symbols, and the dynamic linker
+ # can't resolve them unless the library is linked against gcov.
+ return """
feature {
name: 'coverage'
provides: 'profile'
@@ -392,111 +413,127 @@ def _coverage_feature(repository_ctx, darwin):
"""
def _find_generic(repository_ctx, name, env_name, overriden_tools, warn = False):
- """Find a generic C++ toolchain tool. Doesn't %-escape the result."""
-
- if name in overriden_tools:
- return overriden_tools[name]
-
- result = name
- env_value = repository_ctx.os.environ.get(env_name)
- env_value_with_paren = ""
- if env_value != None:
- env_value = env_value.strip()
- if env_value:
- result = env_value
- env_value_with_paren = " (%s)" % env_value
- if result.startswith("/"):
- # Absolute path, maybe we should make this suported by our which function.
+ """Find a generic C++ toolchain tool. Doesn't %-escape the result."""
+
+ if name in overriden_tools:
+ return overriden_tools[name]
+
+ result = name
+ env_value = repository_ctx.os.environ.get(env_name)
+ env_value_with_paren = ""
+ if env_value != None:
+ env_value = env_value.strip()
+ if env_value:
+ result = env_value
+ env_value_with_paren = " (%s)" % env_value
+ if result.startswith("/"):
+ # Absolute path, maybe we should make this suported by our which function.
+ return result
+ result = repository_ctx.which(result)
+ if result == None:
+ msg = ("Cannot find %s or %s%s; either correct your path or set the %s" +
+ " environment variable") % (name, env_name, env_value_with_paren, env_name)
+ if warn:
+ auto_configure_warning(msg)
+ else:
+ auto_configure_fail(msg)
return result
- result = repository_ctx.which(result)
- if result == None:
- msg = ("Cannot find %s or %s%s; either correct your path or set the %s"
- + " environment variable") % (name, env_name, env_value_with_paren, env_name)
- if warn:
- auto_configure_warning(msg)
- else:
- auto_configure_fail(msg)
- return result
def find_cc(repository_ctx, overriden_tools):
- return _find_generic(repository_ctx, "gcc", "CC", overriden_tools)
+ return _find_generic(repository_ctx, "gcc", "CC", overriden_tools)
def configure_unix_toolchain(repository_ctx, cpu_value, overriden_tools):
- """Configure C++ toolchain on Unix platforms."""
- paths = resolve_labels(repository_ctx, [
- "@bazel_tools//tools/cpp:BUILD.tpl",
- "@bazel_tools//tools/cpp:CROSSTOOL.tpl",
- "@bazel_tools//tools/cpp:linux_cc_wrapper.sh.tpl",
- "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl",
- ])
-
- repository_ctx.file("tools/cpp/empty.cc", "int main() {}")
- darwin = cpu_value == "darwin"
-
- cc = _find_generic(repository_ctx, "gcc", "CC", overriden_tools)
- overriden_tools = dict(overriden_tools)
- overriden_tools["gcc"] = cc
- overriden_tools["gcov"] = _find_generic(
- repository_ctx, "gcov", "GCOV", overriden_tools, warn = True)
- if darwin:
- overriden_tools["gcc"] = "cc_wrapper.sh"
- overriden_tools["ar"] = "/usr/bin/libtool"
-
- tool_paths = _get_tool_paths(repository_ctx, overriden_tools)
- crosstool_content = _crosstool_content(repository_ctx, cc, cpu_value, darwin)
- opt_content = _opt_content(darwin)
- dbg_content = _dbg_content()
-
- repository_ctx.template(
- "BUILD",
- paths["@bazel_tools//tools/cpp:BUILD.tpl"],
- {
- "%{name}": cpu_value,
- "%{supports_param_files}": "0" if darwin else "1",
- "%{cc_compiler_deps}": ":cc_wrapper" if darwin else ":empty",
- "%{compiler}": get_env_var(
- repository_ctx, "BAZEL_COMPILER", "compiler", False),
- })
-
- cc_wrapper_src = (
- "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl"
- if darwin else "@bazel_tools//tools/cpp:linux_cc_wrapper.sh.tpl")
- repository_ctx.template(
- "cc_wrapper.sh",
- paths[cc_wrapper_src],
- {
- "%{cc}": escape_string(str(cc)),
- "%{env}": escape_string(get_env(repository_ctx)),
- })
-
- repository_ctx.template(
- "CROSSTOOL",
- paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
- {
- "%{cpu}": escape_string(cpu_value),
- "%{default_toolchain_name}": escape_string(
- get_env_var(repository_ctx,
- "CC_TOOLCHAIN_NAME",
- "local",
- False)),
- "%{toolchain_name}": escape_string(
- get_env_var(repository_ctx, "CC_TOOLCHAIN_NAME", "local", False)),
- "%{content}": _build_crosstool(crosstool_content) + "\n" +
- _build_tool_path(tool_paths),
- "%{opt_content}": _build_crosstool(opt_content, " "),
- "%{dbg_content}": _build_crosstool(dbg_content, " "),
- "%{cxx_builtin_include_directory}": "",
- "%{coverage}": _coverage_feature(repository_ctx, darwin),
- "%{msvc_env_tmp}": "",
- "%{msvc_env_path}": "",
- "%{msvc_env_include}": "",
- "%{msvc_env_lib}": "",
- "%{msvc_cl_path}": "",
- "%{msvc_ml_path}": "",
- "%{msvc_link_path}": "",
- "%{msvc_lib_path}": "",
- "%{msys_x64_mingw_content}": "",
- "%{dbg_mode_debug}": "",
- "%{fastbuild_mode_debug}": "",
- "%{compilation_mode_content}": "",
- })
+ """Configure C++ toolchain on Unix platforms."""
+ paths = resolve_labels(repository_ctx, [
+ "@bazel_tools//tools/cpp:BUILD.tpl",
+ "@bazel_tools//tools/cpp:CROSSTOOL.tpl",
+ "@bazel_tools//tools/cpp:linux_cc_wrapper.sh.tpl",
+ "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl",
+ ])
+
+ repository_ctx.file("tools/cpp/empty.cc", "int main() {}")
+ darwin = cpu_value == "darwin"
+
+ cc = _find_generic(repository_ctx, "gcc", "CC", overriden_tools)
+ overriden_tools = dict(overriden_tools)
+ overriden_tools["gcc"] = cc
+ overriden_tools["gcov"] = _find_generic(
+ repository_ctx,
+ "gcov",
+ "GCOV",
+ overriden_tools,
+ warn = True,
+ )
+ if darwin:
+ overriden_tools["gcc"] = "cc_wrapper.sh"
+ overriden_tools["ar"] = "/usr/bin/libtool"
+
+ tool_paths = _get_tool_paths(repository_ctx, overriden_tools)
+ crosstool_content = _crosstool_content(repository_ctx, cc, cpu_value, darwin)
+ opt_content = _opt_content(darwin)
+ dbg_content = _dbg_content()
+
+ repository_ctx.template(
+ "BUILD",
+ paths["@bazel_tools//tools/cpp:BUILD.tpl"],
+ {
+ "%{name}": cpu_value,
+ "%{supports_param_files}": "0" if darwin else "1",
+ "%{cc_compiler_deps}": ":cc_wrapper" if darwin else ":empty",
+ "%{compiler}": get_env_var(
+ repository_ctx,
+ "BAZEL_COMPILER",
+ "compiler",
+ False,
+ ),
+ },
+ )
+
+ cc_wrapper_src = (
+ "@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl" if darwin else "@bazel_tools//tools/cpp:linux_cc_wrapper.sh.tpl"
+ )
+ repository_ctx.template(
+ "cc_wrapper.sh",
+ paths[cc_wrapper_src],
+ {
+ "%{cc}": escape_string(str(cc)),
+ "%{env}": escape_string(get_env(repository_ctx)),
+ },
+ )
+
+ repository_ctx.template(
+ "CROSSTOOL",
+ paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
+ {
+ "%{cpu}": escape_string(cpu_value),
+ "%{default_toolchain_name}": escape_string(
+ get_env_var(
+ repository_ctx,
+ "CC_TOOLCHAIN_NAME",
+ "local",
+ False,
+ ),
+ ),
+ "%{toolchain_name}": escape_string(
+ get_env_var(repository_ctx, "CC_TOOLCHAIN_NAME", "local", False),
+ ),
+ "%{content}": _build_crosstool(crosstool_content) + "\n" +
+ _build_tool_path(tool_paths),
+ "%{opt_content}": _build_crosstool(opt_content, " "),
+ "%{dbg_content}": _build_crosstool(dbg_content, " "),
+ "%{cxx_builtin_include_directory}": "",
+ "%{coverage}": _coverage_feature(repository_ctx, darwin),
+ "%{msvc_env_tmp}": "",
+ "%{msvc_env_path}": "",
+ "%{msvc_env_include}": "",
+ "%{msvc_env_lib}": "",
+ "%{msvc_cl_path}": "",
+ "%{msvc_ml_path}": "",
+ "%{msvc_link_path}": "",
+ "%{msvc_lib_path}": "",
+ "%{msys_x64_mingw_content}": "",
+ "%{dbg_mode_debug}": "",
+ "%{fastbuild_mode_debug}": "",
+ "%{compilation_mode_content}": "",
+ },
+ )
diff --git a/tools/cpp/windows_cc_configure.bzl b/tools/cpp/windows_cc_configure.bzl
index 8c3210231e..11dc0ad608 100644
--- a/tools/cpp/windows_cc_configure.bzl
+++ b/tools/cpp/windows_cc_configure.bzl
@@ -16,318 +16,437 @@
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
- "escape_string",
"auto_configure_fail",
"auto_configure_warning",
- "get_env_var",
- "which",
- "which_cmd",
+ "escape_string",
"execute",
+ "get_env_var",
"is_cc_configure_debug",
"resolve_labels",
+ "which",
+ "which_cmd",
)
def _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = False):
- """Return the content of msys crosstool which is still the default CROSSTOOL on Windows."""
- bazel_sh = get_env_var(repository_ctx, "BAZEL_SH").replace("\\", "/").lower()
- tokens = bazel_sh.rsplit("/", 1)
- prefix = "mingw64" if use_mingw else "usr"
- msys_root = None
- if tokens[0].endswith("/usr/bin"):
- msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")]
- elif tokens[0].endswith("/bin"):
- msys_root = tokens[0][:len(tokens[0]) - len("bin")]
- if not msys_root:
- auto_configure_fail(
- "Could not determine MSYS/Cygwin root from BAZEL_SH (%s)" % bazel_sh)
- escaped_msys_root = escape_string(msys_root)
- return (((
- ' abi_version: "local"\n' +
- ' abi_libc_version: "local"\n' +
- ' builtin_sysroot: ""\n' +
- ' compiler: "msys-gcc"\n' +
- ' host_system_name: "local"\n' +
- ' needsPic: false\n' +
- ' target_libc: "msys"\n' +
- ' target_cpu: "x64_windows"\n' +
- ' target_system_name: "local"\n') if not use_mingw else '') +
- ' tool_path { name: "ar" path: "%s%s/bin/ar" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "compat-ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "cpp" path: "%s%s/bin/cpp" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "dwp" path: "%s%s/bin/dwp" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "gcc" path: "%s%s/bin/gcc" }\n' % (escaped_msys_root, prefix) +
- ' artifact_name_pattern { category_name: "executable" prefix: "" extension: ".exe"}\n' +
- ' cxx_flag: "-std=gnu++0x"\n' +
- ' linker_flag: "-lstdc++"\n' +
- ' cxx_builtin_include_directory: "%s%s/"\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "gcov" path: "%s%s/bin/gcov" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "nm" path: "%s%s/bin/nm" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "objcopy" path: "%s%s/bin/objcopy" }\n' % (escaped_msys_root, prefix) +
- ' objcopy_embed_flag: "-I"\n' +
- ' objcopy_embed_flag: "binary"\n' +
- ' tool_path { name: "objdump" path: "%s%s/bin/objdump" }\n' % (escaped_msys_root, prefix) +
- ' tool_path { name: "strip" path: "%s%s/bin/strip" }'% (escaped_msys_root, prefix) +
- ' feature { name: "targets_windows" implies: "copy_dynamic_libraries_to_binary" enabled: true }' +
- ' feature { name: "copy_dynamic_libraries_to_binary" }' )
+ """Return the content of msys crosstool which is still the default CROSSTOOL on Windows."""
+ bazel_sh = get_env_var(repository_ctx, "BAZEL_SH").replace("\\", "/").lower()
+ tokens = bazel_sh.rsplit("/", 1)
+ prefix = "mingw64" if use_mingw else "usr"
+ msys_root = None
+ if tokens[0].endswith("/usr/bin"):
+ msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")]
+ elif tokens[0].endswith("/bin"):
+ msys_root = tokens[0][:len(tokens[0]) - len("bin")]
+ if not msys_root:
+ auto_configure_fail(
+ "Could not determine MSYS/Cygwin root from BAZEL_SH (%s)" % bazel_sh,
+ )
+ escaped_msys_root = escape_string(msys_root)
+ return (((
+ ' abi_version: "local"\n' +
+ ' abi_libc_version: "local"\n' +
+ ' builtin_sysroot: ""\n' +
+ ' compiler: "msys-gcc"\n' +
+ ' host_system_name: "local"\n' +
+ " needsPic: false\n" +
+ ' target_libc: "msys"\n' +
+ ' target_cpu: "x64_windows"\n' +
+ ' target_system_name: "local"\n'
+ ) if not use_mingw else "") +
+ ' tool_path { name: "ar" path: "%s%s/bin/ar" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "compat-ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "cpp" path: "%s%s/bin/cpp" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "dwp" path: "%s%s/bin/dwp" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "gcc" path: "%s%s/bin/gcc" }\n' % (escaped_msys_root, prefix) +
+ ' artifact_name_pattern { category_name: "executable" prefix: "" extension: ".exe"}\n' +
+ ' cxx_flag: "-std=gnu++0x"\n' +
+ ' linker_flag: "-lstdc++"\n' +
+ ' cxx_builtin_include_directory: "%s%s/"\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "gcov" path: "%s%s/bin/gcov" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "nm" path: "%s%s/bin/nm" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "objcopy" path: "%s%s/bin/objcopy" }\n' % (escaped_msys_root, prefix) +
+ ' objcopy_embed_flag: "-I"\n' +
+ ' objcopy_embed_flag: "binary"\n' +
+ ' tool_path { name: "objdump" path: "%s%s/bin/objdump" }\n' % (escaped_msys_root, prefix) +
+ ' tool_path { name: "strip" path: "%s%s/bin/strip" }' % (escaped_msys_root, prefix) +
+ ' feature { name: "targets_windows" implies: "copy_dynamic_libraries_to_binary" enabled: true }' +
+ ' feature { name: "copy_dynamic_libraries_to_binary" }')
def _get_system_root(repository_ctx):
- r"""Get System root path on Windows, default is C:\\Windows. Doesn't %-escape the result."""
- if "SYSTEMROOT" in repository_ctx.os.environ:
- return escape_string(repository_ctx.os.environ["SYSTEMROOT"])
- auto_configure_warning("SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows")
- return "C:\\Windows"
+ """Get System root path on Windows, default is C:\\\Windows. Doesn't %-escape the result."""
+ if "SYSTEMROOT" in repository_ctx.os.environ:
+ return escape_string(repository_ctx.os.environ["SYSTEMROOT"])
+ auto_configure_warning("SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows")
+ return "C:\\Windows"
def _find_cuda(repository_ctx):
- """Find out if and where cuda is installed. Doesn't %-escape the result."""
- if "CUDA_PATH" in repository_ctx.os.environ:
- return repository_ctx.os.environ["CUDA_PATH"]
- nvcc = which(repository_ctx, "nvcc.exe")
- if nvcc:
- return nvcc[:-len("/bin/nvcc.exe")]
- return None
+ """Find out if and where cuda is installed. Doesn't %-escape the result."""
+ if "CUDA_PATH" in repository_ctx.os.environ:
+ return repository_ctx.os.environ["CUDA_PATH"]
+ nvcc = which(repository_ctx, "nvcc.exe")
+ if nvcc:
+ return nvcc[:-len("/bin/nvcc.exe")]
+ return None
def _find_python(repository_ctx):
- """Find where is python on Windows. Doesn't %-escape the result."""
- if "BAZEL_PYTHON" in repository_ctx.os.environ:
- python_binary = repository_ctx.os.environ["BAZEL_PYTHON"]
- if not python_binary.endswith(".exe"):
- python_binary = python_binary + ".exe"
+ """Find where is python on Windows. Doesn't %-escape the result."""
+ if "BAZEL_PYTHON" in repository_ctx.os.environ:
+ python_binary = repository_ctx.os.environ["BAZEL_PYTHON"]
+ if not python_binary.endswith(".exe"):
+ python_binary = python_binary + ".exe"
+ return python_binary
+ auto_configure_warning("'BAZEL_PYTHON' is not set, start looking for python in PATH.")
+ python_binary = which_cmd(repository_ctx, "python.exe")
+ auto_configure_warning("Python found at %s" % python_binary)
return python_binary
- auto_configure_warning("'BAZEL_PYTHON' is not set, start looking for python in PATH.")
- python_binary = which_cmd(repository_ctx, "python.exe")
- auto_configure_warning("Python found at %s" % python_binary)
- return python_binary
def _add_system_root(repository_ctx, env):
- r"""Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\system32 in PATH."""
- if "PATH" not in env:
- env["PATH"] = ""
- env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32"
- return env
+ """Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\\\system32 in PATH."""
+ if "PATH" not in env:
+ env["PATH"] = ""
+ env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32"
+ return env
def find_vc_path(repository_ctx):
- """Find Visual C++ build tools install path. Doesn't %-escape the result."""
- # 1. Check if BAZEL_VC or BAZEL_VS is already set by user.
- if "BAZEL_VC" in repository_ctx.os.environ:
- return repository_ctx.os.environ["BAZEL_VC"]
-
- if "BAZEL_VS" in repository_ctx.os.environ:
- return repository_ctx.os.environ["BAZEL_VS"] + "\\VC\\"
- auto_configure_warning("'BAZEL_VC' is not set, " +
- "start looking for the latest Visual C++ installed.")
-
- # 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use
- # vcvarsqueryregistry.bat to detect VC++.
- auto_configure_warning("Looking for VS%VERSION%COMNTOOLS environment variables, " +
- "eg. VS140COMNTOOLS")
- for vscommontools_env in ["VS140COMNTOOLS", "VS120COMNTOOLS",
- "VS110COMNTOOLS", "VS100COMNTOOLS", "VS90COMNTOOLS"]:
- if vscommontools_env not in repository_ctx.os.environ:
- continue
- vcvarsqueryregistry = repository_ctx.os.environ[vscommontools_env] + "\\vcvarsqueryregistry.bat"
- if not repository_ctx.path(vcvarsqueryregistry).exists:
- continue
- repository_ctx.file("get_vc_dir.bat",
- "@echo off\n" +
- "call \"" + vcvarsqueryregistry + "\"\n" +
- "echo %VCINSTALLDIR%", True)
- env = _add_system_root(repository_ctx, repository_ctx.os.environ)
- vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment=env)
-
+ """Find Visual C++ build tools install path. Doesn't %-escape the result."""
+
+ # 1. Check if BAZEL_VC or BAZEL_VS is already set by user.
+ if "BAZEL_VC" in repository_ctx.os.environ:
+ return repository_ctx.os.environ["BAZEL_VC"]
+
+ if "BAZEL_VS" in repository_ctx.os.environ:
+ return repository_ctx.os.environ["BAZEL_VS"] + "\\VC\\"
+ auto_configure_warning("'BAZEL_VC' is not set, " +
+ "start looking for the latest Visual C++ installed.")
+
+ # 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use
+ # vcvarsqueryregistry.bat to detect VC++.
+ auto_configure_warning("Looking for VS%VERSION%COMNTOOLS environment variables, " +
+ "eg. VS140COMNTOOLS")
+ for vscommontools_env in [
+ "VS140COMNTOOLS",
+ "VS120COMNTOOLS",
+ "VS110COMNTOOLS",
+ "VS100COMNTOOLS",
+ "VS90COMNTOOLS",
+ ]:
+ if vscommontools_env not in repository_ctx.os.environ:
+ continue
+ vcvarsqueryregistry = repository_ctx.os.environ[vscommontools_env] + "\\vcvarsqueryregistry.bat"
+ if not repository_ctx.path(vcvarsqueryregistry).exists:
+ continue
+ repository_ctx.file(
+ "get_vc_dir.bat",
+ "@echo off\n" +
+ "call \"" + vcvarsqueryregistry + "\"\n" +
+ "echo %VCINSTALLDIR%",
+ True,
+ )
+ env = _add_system_root(repository_ctx, repository_ctx.os.environ)
+ vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment = env)
+
+ auto_configure_warning("Visual C++ build tools found at %s" % vc_dir)
+ return vc_dir
+
+ # 3. User might clean up all environment variables, if so looking for Visual C++ through registry.
+ # Works for all VS versions, including Visual Studio 2017.
+ auto_configure_warning("Looking for Visual C++ through registry")
+ reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
+ vc_dir = None
+ for key, suffix in (("VC7", ""), ("VS7", "\\VC")):
+ for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]:
+ if vc_dir:
+ break
+ result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version])
+ if is_cc_configure_debug(repository_ctx):
+ auto_configure_warning("registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
+ (version, result.stdout, result.stderr))
+ if not result.stderr:
+ for line in result.stdout.split("\n"):
+ line = line.strip()
+ if line.startswith(version) and line.find("REG_SZ") != -1:
+ vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix
+
+ if not vc_dir:
+ return None
auto_configure_warning("Visual C++ build tools found at %s" % vc_dir)
return vc_dir
- # 3. User might clean up all environment variables, if so looking for Visual C++ through registry.
- # Works for all VS versions, including Visual Studio 2017.
- auto_configure_warning("Looking for Visual C++ through registry")
- reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
- vc_dir = None
- for key, suffix in (("VC7", ""), ("VS7", "\\VC")):
- for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]:
- if vc_dir:
- break
- result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version])
- if is_cc_configure_debug(repository_ctx):
- auto_configure_warning("registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
- (version, result.stdout, result.stderr))
- if not result.stderr:
- for line in result.stdout.split("\n"):
- line = line.strip()
- if line.startswith(version) and line.find("REG_SZ") != -1:
- vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix
-
- if not vc_dir:
- return None
- auto_configure_warning("Visual C++ build tools found at %s" % vc_dir)
- return vc_dir
-
def _is_vs_2017(vc_path):
- """Check if the installed VS version is Visual Studio 2017."""
- # In VS 2017, the location of VC is like:
- # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\
- # In VS 2015 or older version, it is like:
- # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\
- return vc_path.find("2017") != -1
+ """Check if the installed VS version is Visual Studio 2017."""
+
+ # In VS 2017, the location of VC is like:
+ # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\
+ # In VS 2015 or older version, it is like:
+ # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\
+ return vc_path.find("2017") != -1
def _find_vcvarsall_bat_script(repository_ctx, vc_path):
- """Find vcvarsall.bat script. Doesn't %-escape the result."""
- if _is_vs_2017(vc_path):
- vcvarsall = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT"
- else:
- vcvarsall = vc_path + "\\VCVARSALL.BAT"
+ """Find vcvarsall.bat script. Doesn't %-escape the result."""
+ if _is_vs_2017(vc_path):
+ vcvarsall = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT"
+ else:
+ vcvarsall = vc_path + "\\VCVARSALL.BAT"
- if not repository_ctx.path(vcvarsall).exists:
- return None
+ if not repository_ctx.path(vcvarsall).exists:
+ return None
- return vcvarsall
+ return vcvarsall
def setup_vc_env_vars(repository_ctx, vc_path):
- """Get environment variables set by VCVARSALL.BAT. Doesn't %-escape the result!"""
- vcvarsall = _find_vcvarsall_bat_script(repository_ctx, vc_path)
- if not vcvarsall:
- return None
- repository_ctx.file("get_env.bat",
- "@echo off\n" +
- "call \"" + vcvarsall + "\" amd64 > NUL \n" +
- "echo PATH=%PATH%,INCLUDE=%INCLUDE%,LIB=%LIB%,WINDOWSSDKDIR=%WINDOWSSDKDIR% \n", True)
- env = _add_system_root(repository_ctx,
- {"PATH": "", "INCLUDE": "", "LIB": "", "WINDOWSSDKDIR": ""})
- envs = execute(repository_ctx, ["./get_env.bat"], environment=env).split(",")
- env_map = {}
- for env in envs:
- key, value = env.split("=", 1)
- env_map[key] = escape_string(value.replace("\\", "\\\\"))
- return env_map
+ """Get environment variables set by VCVARSALL.BAT. Doesn't %-escape the result!"""
+ vcvarsall = _find_vcvarsall_bat_script(repository_ctx, vc_path)
+ if not vcvarsall:
+ return None
+ repository_ctx.file(
+ "get_env.bat",
+ "@echo off\n" +
+ "call \"" + vcvarsall + "\" amd64 > NUL \n" +
+ "echo PATH=%PATH%,INCLUDE=%INCLUDE%,LIB=%LIB%,WINDOWSSDKDIR=%WINDOWSSDKDIR% \n",
+ True,
+ )
+ env = _add_system_root(
+ repository_ctx,
+ {"PATH": "", "INCLUDE": "", "LIB": "", "WINDOWSSDKDIR": ""},
+ )
+ envs = execute(repository_ctx, ["./get_env.bat"], environment = env).split(",")
+ env_map = {}
+ for env in envs:
+ key, value = env.split("=", 1)
+ env_map[key] = escape_string(value.replace("\\", "\\\\"))
+ return env_map
def find_msvc_tool(repository_ctx, vc_path, tool):
- """Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result."""
- tool_path = ""
- if _is_vs_2017(vc_path):
- # For VS 2017, the tools are under a directory like:
- # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Tools\MSVC\14.10.24930\bin\HostX64\x64
- dirs = repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir()
- if len(dirs) < 1:
- return None
- # Normally there should be only one child directory under %VC_PATH%\TOOLS\MSVC,
- # but iterate every directory to be more robust.
- for path in dirs:
- tool_path = str(path) + "\\bin\\HostX64\\x64\\" + tool
- if repository_ctx.path(tool_path).exists:
- break
- else:
- # For VS 2015 and older version, the tools are under:
- # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64
- tool_path = vc_path + "\\bin\\amd64\\" + tool
-
- if not repository_ctx.path(tool_path).exists:
- return None
+ """Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result."""
+ tool_path = ""
+ if _is_vs_2017(vc_path):
+ # For VS 2017, the tools are under a directory like:
+ # C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Tools\MSVC\14.10.24930\bin\HostX64\x64
+ dirs = repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir()
+ if len(dirs) < 1:
+ return None
+
+ # Normally there should be only one child directory under %VC_PATH%\TOOLS\MSVC,
+ # but iterate every directory to be more robust.
+ for path in dirs:
+ tool_path = str(path) + "\\bin\\HostX64\\x64\\" + tool
+ if repository_ctx.path(tool_path).exists:
+ break
+ else:
+ # For VS 2015 and older version, the tools are under:
+ # C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64
+ tool_path = vc_path + "\\bin\\amd64\\" + tool
+
+ if not repository_ctx.path(tool_path).exists:
+ return None
- return tool_path
+ return tool_path
def _find_missing_vc_tools(repository_ctx, vc_path):
- """Check if any required tool is missing under given VC path."""
- missing_tools = []
- if not _find_vcvarsall_bat_script(repository_ctx, vc_path):
- missing_tools.append("VCVARSALL.BAT")
+ """Check if any required tool is missing under given VC path."""
+ missing_tools = []
+ if not _find_vcvarsall_bat_script(repository_ctx, vc_path):
+ missing_tools.append("VCVARSALL.BAT")
- for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]:
- if not find_msvc_tool(repository_ctx, vc_path, tool):
- missing_tools.append(tool)
+ for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]:
+ if not find_msvc_tool(repository_ctx, vc_path, tool):
+ missing_tools.append(tool)
- return missing_tools
+ return missing_tools
def _is_support_whole_archive(repository_ctx, vc_path):
- """Run MSVC linker alone to see if it supports /WHOLEARCHIVE."""
- env = repository_ctx.os.environ
- if "NO_WHOLE_ARCHIVE_OPTION" in env and env["NO_WHOLE_ARCHIVE_OPTION"] == "1":
- return False
- linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
- result = execute(repository_ctx, [linker], expect_failure = True)
- return result.find("/WHOLEARCHIVE") != -1
+ """Run MSVC linker alone to see if it supports /WHOLEARCHIVE."""
+ env = repository_ctx.os.environ
+ if "NO_WHOLE_ARCHIVE_OPTION" in env and env["NO_WHOLE_ARCHIVE_OPTION"] == "1":
+ return False
+ linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
+ result = execute(repository_ctx, [linker], expect_failure = True)
+ return result.find("/WHOLEARCHIVE") != -1
def _is_support_debug_fastlink(repository_ctx, vc_path):
- """Run MSVC linker alone to see if it supports /DEBUG:FASTLINK."""
- linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
- result = execute(repository_ctx, [linker], expect_failure = True)
- return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1
+ """Run MSVC linker alone to see if it supports /DEBUG:FASTLINK."""
+ linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
+ result = execute(repository_ctx, [linker], expect_failure = True)
+ return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1
def _is_use_msvc_wrapper(repository_ctx):
- """Returns True if USE_MSVC_WRAPPER is set to 1."""
- env = repository_ctx.os.environ
- return "USE_MSVC_WRAPPER" in env and env["USE_MSVC_WRAPPER"] == "1"
+ """Returns True if USE_MSVC_WRAPPER is set to 1."""
+ env = repository_ctx.os.environ
+ return "USE_MSVC_WRAPPER" in env and env["USE_MSVC_WRAPPER"] == "1"
def _get_compilation_mode_content():
- """Return the content for adding flags for different compilation modes when using MSVC wrapper."""
- return "\n".join([
- " compilation_mode_flags {",
- " mode: DBG",
- " compiler_flag: '-Xcompilation-mode=dbg'",
- " linker_flag: '-Xcompilation-mode=dbg'",
- " }",
- " compilation_mode_flags {",
- " mode: FASTBUILD",
- " compiler_flag: '-Xcompilation-mode=fastbuild'",
- " linker_flag: '-Xcompilation-mode=fastbuild'",
- " }",
- " compilation_mode_flags {",
- " mode: OPT",
- " compiler_flag: '-Xcompilation-mode=opt'",
- " linker_flag: '-Xcompilation-mode=opt'",
- " }"])
+ """Return the content for adding flags for different compilation modes when using MSVC wrapper."""
+ return "\n".join([
+ " compilation_mode_flags {",
+ " mode: DBG",
+ " compiler_flag: '-Xcompilation-mode=dbg'",
+ " linker_flag: '-Xcompilation-mode=dbg'",
+ " }",
+ " compilation_mode_flags {",
+ " mode: FASTBUILD",
+ " compiler_flag: '-Xcompilation-mode=fastbuild'",
+ " linker_flag: '-Xcompilation-mode=fastbuild'",
+ " }",
+ " compilation_mode_flags {",
+ " mode: OPT",
+ " compiler_flag: '-Xcompilation-mode=opt'",
+ " linker_flag: '-Xcompilation-mode=opt'",
+ " }",
+ ])
def _escaped_cuda_compute_capabilities(repository_ctx):
- """Returns a %-escaped list of strings representing cuda compute capabilities."""
-
- if "CUDA_COMPUTE_CAPABILITIES" not in repository_ctx.os.environ:
- return ["3.5", "5.2"]
- capabilities_str = escape_string(repository_ctx.os.environ["CUDA_COMPUTE_CAPABILITIES"])
- capabilities = capabilities_str.split(",")
- for capability in capabilities:
- # Workaround for Skylark's lack of support for regex. This check should
- # be equivalent to checking:
- # if re.match("[0-9]+.[0-9]+", capability) == None:
- parts = capability.split(".")
- if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
- auto_configure_fail("Invalid compute capability: %s" % capability)
- return capabilities
+ """Returns a %-escaped list of strings representing cuda compute capabilities."""
+
+ if "CUDA_COMPUTE_CAPABILITIES" not in repository_ctx.os.environ:
+ return ["3.5", "5.2"]
+ capabilities_str = escape_string(repository_ctx.os.environ["CUDA_COMPUTE_CAPABILITIES"])
+ capabilities = capabilities_str.split(",")
+ for capability in capabilities:
+ # Workaround for Skylark's lack of support for regex. This check should
+ # be equivalent to checking:
+ # if re.match("[0-9]+.[0-9]+", capability) == None:
+ parts = capability.split(".")
+ if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
+ auto_configure_fail("Invalid compute capability: %s" % capability)
+ return capabilities
def configure_windows_toolchain(repository_ctx):
- """Configure C++ toolchain on Windows."""
- paths = resolve_labels(repository_ctx, [
- "@bazel_tools//tools/cpp:BUILD.static.windows",
- "@bazel_tools//tools/cpp:CROSSTOOL",
- "@bazel_tools//tools/cpp:CROSSTOOL.tpl",
- "@bazel_tools//tools/cpp:vc_installation_error.bat.tpl",
- "@bazel_tools//tools/cpp:wrapper/bin/call_python.bat.tpl",
- "@bazel_tools//tools/cpp:wrapper/bin/pydir/msvc_tools.py.tpl",
- ])
-
- repository_ctx.symlink(paths["@bazel_tools//tools/cpp:BUILD.static.windows"], "BUILD")
-
- vc_path = find_vc_path(repository_ctx)
- missing_tools = None
- if not vc_path:
- repository_ctx.template(
- "vc_installation_error.bat",
- paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
- {"%{vc_error_message}": ""})
- else:
- missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
- if missing_tools:
- message = "\r\n".join([
- "echo. 1>&2",
- "echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
- "echo But Bazel can't find the following tools: 1>&2",
- "echo %s 1>&2" % ", ".join(missing_tools),
- "echo. 1>&2",
- ])
- repository_ctx.template(
- "vc_installation_error.bat",
- paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
- {"%{vc_error_message}": message})
-
- if not vc_path or missing_tools:
+ """Configure C++ toolchain on Windows."""
+ paths = resolve_labels(repository_ctx, [
+ "@bazel_tools//tools/cpp:BUILD.static.windows",
+ "@bazel_tools//tools/cpp:CROSSTOOL",
+ "@bazel_tools//tools/cpp:CROSSTOOL.tpl",
+ "@bazel_tools//tools/cpp:vc_installation_error.bat.tpl",
+ "@bazel_tools//tools/cpp:wrapper/bin/call_python.bat.tpl",
+ "@bazel_tools//tools/cpp:wrapper/bin/pydir/msvc_tools.py.tpl",
+ ])
+
+ repository_ctx.symlink(paths["@bazel_tools//tools/cpp:BUILD.static.windows"], "BUILD")
+
+ vc_path = find_vc_path(repository_ctx)
+ missing_tools = None
+ if not vc_path:
+ repository_ctx.template(
+ "vc_installation_error.bat",
+ paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
+ {"%{vc_error_message}": ""},
+ )
+ else:
+ missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
+ if missing_tools:
+ message = "\r\n".join([
+ "echo. 1>&2",
+ "echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
+ "echo But Bazel can't find the following tools: 1>&2",
+ "echo %s 1>&2" % ", ".join(missing_tools),
+ "echo. 1>&2",
+ ])
+ repository_ctx.template(
+ "vc_installation_error.bat",
+ paths["@bazel_tools//tools/cpp:vc_installation_error.bat.tpl"],
+ {"%{vc_error_message}": message},
+ )
+
+ if not vc_path or missing_tools:
+ repository_ctx.template(
+ "CROSSTOOL",
+ paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
+ {
+ "%{cpu}": "x64_windows",
+ "%{default_toolchain_name}": "msvc_x64",
+ "%{toolchain_name}": "msys_x64",
+ "%{msvc_env_tmp}": "",
+ "%{msvc_env_path}": "",
+ "%{msvc_env_include}": "",
+ "%{msvc_env_lib}": "",
+ "%{msvc_cl_path}": "vc_installation_error.bat",
+ "%{msvc_ml_path}": "vc_installation_error.bat",
+ "%{msvc_link_path}": "vc_installation_error.bat",
+ "%{msvc_lib_path}": "vc_installation_error.bat",
+ "%{dbg_mode_debug}": "/DEBUG",
+ "%{fastbuild_mode_debug}": "/DEBUG",
+ "%{compilation_mode_content}": "",
+ "%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
+ "%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
+ "%{opt_content}": "",
+ "%{dbg_content}": "",
+ "%{link_content}": "",
+ "%{cxx_builtin_include_directory}": "",
+ "%{coverage}": "",
+ },
+ )
+ return
+
+ env = setup_vc_env_vars(repository_ctx, vc_path)
+ escaped_paths = escape_string(env["PATH"])
+ escaped_include_paths = escape_string(env["INCLUDE"])
+ escaped_lib_paths = escape_string(env["LIB"])
+ escaped_tmp_dir = escape_string(
+ get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"),
+ )
+ msvc_cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
+ msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/")
+ msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/")
+ msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/")
+ escaped_cxx_include_directories = []
+ compilation_mode_content = ""
+
+ if _is_use_msvc_wrapper(repository_ctx):
+ if _is_support_whole_archive(repository_ctx, vc_path):
+ support_whole_archive = "True"
+ else:
+ support_whole_archive = "False"
+ nvcc_tmp_dir_name = escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
+
+ # Make sure nvcc.exe is in PATH
+ cuda_path = _find_cuda(repository_ctx)
+ if cuda_path:
+ escaped_paths = escape_string(cuda_path.replace("\\", "\\\\") + "/bin;") + escaped_paths
+ escaped_compute_capabilities = _escaped_cuda_compute_capabilities(repository_ctx)
+ repository_ctx.template(
+ "wrapper/bin/pydir/msvc_tools.py",
+ paths["@bazel_tools//tools/cpp:wrapper/bin/pydir/msvc_tools.py.tpl"],
+ {
+ "%{lib_tool}": escape_string(msvc_lib_path),
+ "%{support_whole_archive}": support_whole_archive,
+ "%{cuda_compute_capabilities}": ", ".join(
+ ["\"%s\"" % c for c in escaped_compute_capabilities],
+ ),
+ "%{nvcc_tmp_dir_name}": nvcc_tmp_dir_name,
+ },
+ )
+
+ # nvcc will generate some source files under %{nvcc_tmp_dir_name}
+ # The generated files are guranteed to have unique name, so they can share the same tmp directory
+ escaped_cxx_include_directories += ["cxx_builtin_include_directory: \"%s\"" % nvcc_tmp_dir_name]
+ msvc_wrapper = repository_ctx.path(
+ paths["@bazel_tools//tools/cpp:CROSSTOOL"],
+ ).dirname.get_child(
+ "wrapper",
+ ).get_child("bin")
+ for f in ["msvc_cl.bat", "msvc_link.bat", "msvc_nop.bat"]:
+ repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/" + f)
+ msvc_wrapper = msvc_wrapper.get_child("pydir")
+ for f in ["msvc_cl.py", "msvc_link.py"]:
+ repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/pydir/" + f)
+ python_binary = _find_python(repository_ctx)
+ repository_ctx.template(
+ "wrapper/bin/call_python.bat",
+ paths["@bazel_tools//tools/cpp:wrapper/bin/call_python.bat.tpl"],
+ {"%{python_binary}": escape_string(python_binary)},
+ )
+ msvc_cl_path = "wrapper/bin/msvc_cl.bat"
+ msvc_link_path = "wrapper/bin/msvc_link.bat"
+ msvc_lib_path = "wrapper/bin/msvc_link.bat"
+ compilation_mode_content = _get_compilation_mode_content()
+
+ for path in escaped_include_paths.split(";"):
+ if path:
+ escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path)
+
+ support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, vc_path)
+
repository_ctx.template(
"CROSSTOOL",
paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
@@ -335,111 +454,23 @@ def configure_windows_toolchain(repository_ctx):
"%{cpu}": "x64_windows",
"%{default_toolchain_name}": "msvc_x64",
"%{toolchain_name}": "msys_x64",
- "%{msvc_env_tmp}": "",
- "%{msvc_env_path}": "",
- "%{msvc_env_include}": "",
- "%{msvc_env_lib}": "",
- "%{msvc_cl_path}": "vc_installation_error.bat",
- "%{msvc_ml_path}": "vc_installation_error.bat",
- "%{msvc_link_path}": "vc_installation_error.bat",
- "%{msvc_lib_path}": "vc_installation_error.bat",
- "%{dbg_mode_debug}": "/DEBUG",
- "%{fastbuild_mode_debug}": "/DEBUG",
- "%{compilation_mode_content}": "",
+ "%{msvc_env_tmp}": escaped_tmp_dir,
+ "%{msvc_env_path}": escaped_paths,
+ "%{msvc_env_include}": escaped_include_paths,
+ "%{msvc_env_lib}": escaped_lib_paths,
+ "%{msvc_cl_path}": msvc_cl_path,
+ "%{msvc_ml_path}": msvc_ml_path,
+ "%{msvc_link_path}": msvc_link_path,
+ "%{msvc_lib_path}": msvc_lib_path,
+ "%{dbg_mode_debug}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
+ "%{fastbuild_mode_debug}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
+ "%{compilation_mode_content}": compilation_mode_content,
"%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
"%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
"%{opt_content}": "",
"%{dbg_content}": "",
"%{link_content}": "",
- "%{cxx_builtin_include_directory}": "",
+ "%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories),
"%{coverage}": "",
- })
- return
-
- env = setup_vc_env_vars(repository_ctx, vc_path)
- escaped_paths = escape_string(env["PATH"])
- escaped_include_paths = escape_string(env["INCLUDE"])
- escaped_lib_paths = escape_string(env["LIB"])
- escaped_tmp_dir = escape_string(
- get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"))
- msvc_cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
- msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/")
- msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/")
- msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/")
- escaped_cxx_include_directories = []
- compilation_mode_content = ""
-
- if _is_use_msvc_wrapper(repository_ctx):
- if _is_support_whole_archive(repository_ctx, vc_path):
- support_whole_archive = "True"
- else:
- support_whole_archive = "False"
- nvcc_tmp_dir_name = escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
- # Make sure nvcc.exe is in PATH
- cuda_path = _find_cuda(repository_ctx)
- if cuda_path:
- escaped_paths = escape_string(cuda_path.replace("\\", "\\\\") + "/bin;") + escaped_paths
- escaped_compute_capabilities = _escaped_cuda_compute_capabilities(repository_ctx)
- repository_ctx.template(
- "wrapper/bin/pydir/msvc_tools.py",
- paths["@bazel_tools//tools/cpp:wrapper/bin/pydir/msvc_tools.py.tpl"],
- {
- "%{lib_tool}": escape_string(msvc_lib_path),
- "%{support_whole_archive}": support_whole_archive,
- "%{cuda_compute_capabilities}": ", ".join(
- ["\"%s\"" % c for c in escaped_compute_capabilities]),
- "%{nvcc_tmp_dir_name}": nvcc_tmp_dir_name,
- })
- # nvcc will generate some source files under %{nvcc_tmp_dir_name}
- # The generated files are guranteed to have unique name, so they can share the same tmp directory
- escaped_cxx_include_directories += [ "cxx_builtin_include_directory: \"%s\"" % nvcc_tmp_dir_name ]
- msvc_wrapper = repository_ctx.path(
- paths["@bazel_tools//tools/cpp:CROSSTOOL"]).dirname.get_child(
- "wrapper").get_child("bin")
- for f in ["msvc_cl.bat", "msvc_link.bat", "msvc_nop.bat"]:
- repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/" + f)
- msvc_wrapper = msvc_wrapper.get_child("pydir")
- for f in ["msvc_cl.py", "msvc_link.py"]:
- repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/pydir/" + f)
- python_binary = _find_python(repository_ctx)
- repository_ctx.template(
- "wrapper/bin/call_python.bat",
- paths["@bazel_tools//tools/cpp:wrapper/bin/call_python.bat.tpl"],
- {"%{python_binary}": escape_string(python_binary)})
- msvc_cl_path = "wrapper/bin/msvc_cl.bat"
- msvc_link_path = "wrapper/bin/msvc_link.bat"
- msvc_lib_path = "wrapper/bin/msvc_link.bat"
- compilation_mode_content = _get_compilation_mode_content()
-
- for path in escaped_include_paths.split(";"):
- if path:
- escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path)
-
- support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, vc_path)
-
- repository_ctx.template(
- "CROSSTOOL",
- paths["@bazel_tools//tools/cpp:CROSSTOOL.tpl"],
- {
- "%{cpu}": "x64_windows",
- "%{default_toolchain_name}": "msvc_x64",
- "%{toolchain_name}": "msys_x64",
- "%{msvc_env_tmp}": escaped_tmp_dir,
- "%{msvc_env_path}": escaped_paths,
- "%{msvc_env_include}": escaped_include_paths,
- "%{msvc_env_lib}": escaped_lib_paths,
- "%{msvc_cl_path}": msvc_cl_path,
- "%{msvc_ml_path}": msvc_ml_path,
- "%{msvc_link_path}": msvc_link_path,
- "%{msvc_lib_path}": msvc_lib_path,
- "%{dbg_mode_debug}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
- "%{fastbuild_mode_debug}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
- "%{compilation_mode_content}": compilation_mode_content,
- "%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
- "%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
- "%{opt_content}": "",
- "%{dbg_content}": "",
- "%{link_content}": "",
- "%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories),
- "%{coverage}": "",
- })
+ },
+ )
diff --git a/tools/jdk/alias_rules.bzl b/tools/jdk/alias_rules.bzl
index 90847e964b..134c1e898f 100644
--- a/tools/jdk/alias_rules.bzl
+++ b/tools/jdk/alias_rules.bzl
@@ -13,10 +13,10 @@
# limitations under the License.
def _java_host_runtime_alias_impl(ctx):
- vars = ctx.attr._host_java_runtime[platform_common.TemplateVariableInfo]
- runtime_info = ctx.attr._host_java_runtime[java_common.JavaRuntimeInfo]
- runtime_toolchain = ctx.attr._host_java_runtime[platform_common.ToolchainInfo]
- return struct(providers=[vars, runtime_info, runtime_toolchain])
+ vars = ctx.attr._host_java_runtime[platform_common.TemplateVariableInfo]
+ runtime_info = ctx.attr._host_java_runtime[java_common.JavaRuntimeInfo]
+ runtime_toolchain = ctx.attr._host_java_runtime[platform_common.ToolchainInfo]
+ return struct(providers = [vars, runtime_info, runtime_toolchain])
java_host_runtime_alias = rule(
attrs = {
diff --git a/tools/jdk/default_java_toolchain.bzl b/tools/jdk/default_java_toolchain.bzl
index ca53e165c2..f4dc03577c 100644
--- a/tools/jdk/default_java_toolchain.bzl
+++ b/tools/jdk/default_java_toolchain.bzl
@@ -76,12 +76,12 @@ DEFAULT_TOOLCHAIN_CONFIGURATION = {
}
def default_java_toolchain(name, **kwargs):
- """Defines a java_toolchain with appropriate defaults for Bazel."""
+ """Defines a java_toolchain with appropriate defaults for Bazel."""
- toolchain_args = dict(DEFAULT_TOOLCHAIN_CONFIGURATION)
- toolchain_args.update(kwargs)
+ toolchain_args = dict(DEFAULT_TOOLCHAIN_CONFIGURATION)
+ toolchain_args.update(kwargs)
- native.java_toolchain(
- name = name,
- **toolchain_args
- )
+ native.java_toolchain(
+ name = name,
+ **toolchain_args
+ )
diff --git a/tools/osx/alias_rules.bzl b/tools/osx/alias_rules.bzl
index 43acbb1559..c1a5d27a53 100644
--- a/tools/osx/alias_rules.bzl
+++ b/tools/osx/alias_rules.bzl
@@ -13,8 +13,9 @@
# limitations under the License.
"""Rule that stubs out the xcode_config_alias rule if it is not supported."""
+
def xcode_config_alias(name):
- if hasattr(native, "xcode_config_alias"):
- native.xcode_config_alias(name=name)
- else:
- native.filegroup(name=name)
+ if hasattr(native, "xcode_config_alias"):
+ native.xcode_config_alias(name = name)
+ else:
+ native.filegroup(name = name)
diff --git a/tools/osx/xcode_configure.bzl b/tools/osx/xcode_configure.bzl
index 960754bb0a..44a273b980 100644
--- a/tools/osx/xcode_configure.bzl
+++ b/tools/osx/xcode_configure.bzl
@@ -17,221 +17,239 @@
installed on the local host.
"""
-
def _search_string(fullstring, prefix, suffix):
- """Returns the substring between two given substrings of a larger string.
-
- Args:
- fullstring: The larger string to search.
- prefix: The substring that should occur directly before the returned string.
- suffix: The substring that should occur direclty after the returned string.
- Returns:
- A string occurring in fullstring exactly prefixed by prefix, and exactly
- terminated by suffix. For example, ("hello goodbye", "lo ", " bye") will
- return "good". If there is no such string, returns the empty string.
- """
-
- prefix_index = fullstring.find(prefix)
- if (prefix_index < 0):
- return ""
- result_start_index = prefix_index + len(prefix)
- suffix_index = fullstring.find(suffix, result_start_index)
- if (suffix_index < 0):
- return ""
- return fullstring[result_start_index:suffix_index]
-
+ """Returns the substring between two given substrings of a larger string.
+
+ Args:
+ fullstring: The larger string to search.
+ prefix: The substring that should occur directly before the returned string.
+ suffix: The substring that should occur direclty after the returned string.
+ Returns:
+ A string occurring in fullstring exactly prefixed by prefix, and exactly
+ terminated by suffix. For example, ("hello goodbye", "lo ", " bye") will
+ return "good". If there is no such string, returns the empty string.
+ """
+
+ prefix_index = fullstring.find(prefix)
+ if (prefix_index < 0):
+ return ""
+ result_start_index = prefix_index + len(prefix)
+ suffix_index = fullstring.find(suffix, result_start_index)
+ if (suffix_index < 0):
+ return ""
+ return fullstring[result_start_index:suffix_index]
def _search_sdk_output(output, sdkname):
- """Returns the sdk version given xcodebuild stdout and an sdkname."""
- return _search_string(output, "(%s" % sdkname, ")")
-
+ """Returns the sdk version given xcodebuild stdout and an sdkname."""
+ return _search_string(output, "(%s" % sdkname, ")")
def _xcode_version_output(repository_ctx, name, version, aliases, developer_dir):
- """Returns a string containing an xcode_version build target."""
- build_contents = ""
- decorated_aliases = []
- error_msg = ""
- for alias in aliases:
- decorated_aliases.append("'%s'" % alias)
- xcodebuild_result = repository_ctx.execute(["xcrun", "xcodebuild", "-version", "-sdk"], 30,
- {"DEVELOPER_DIR": developer_dir})
- if (xcodebuild_result.return_code != 0):
- error_msg = (
- "Invoking xcodebuild failed, developer dir: {devdir} ," +
- "return code {code}, stderr: {err}, stdout: {out}").format(
- devdir=developer_dir,
- code=xcodebuild_result.return_code,
- err=xcodebuild_result.stderr,
- out=xcodebuild_result.stdout)
- ios_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "iphoneos")
- tvos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "appletvos")
- macos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "macosx")
- watchos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "watchos")
- build_contents += "xcode_version(\n name = '%s'," % name
- build_contents += "\n version = '%s'," % version
- if aliases:
- build_contents += "\n aliases = [%s]," % " ,".join(decorated_aliases)
- if ios_sdk_version:
- build_contents += "\n default_ios_sdk_version = '%s'," % ios_sdk_version
- if tvos_sdk_version:
- build_contents += "\n default_tvos_sdk_version = '%s'," % tvos_sdk_version
- if macos_sdk_version:
- build_contents += "\n default_macos_sdk_version = '%s'," % macos_sdk_version
- if watchos_sdk_version:
- build_contents += "\n default_watchos_sdk_version = '%s'," % watchos_sdk_version
- build_contents += "\n)\n"
- if error_msg:
- build_contents += "\n# Error: " + error_msg.replace("\n", " ") + "\n"
- print(error_msg)
- return build_contents
-
+ """Returns a string containing an xcode_version build target."""
+ build_contents = ""
+ decorated_aliases = []
+ error_msg = ""
+ for alias in aliases:
+ decorated_aliases.append("'%s'" % alias)
+ xcodebuild_result = repository_ctx.execute(
+ ["xcrun", "xcodebuild", "-version", "-sdk"],
+ 30,
+ {"DEVELOPER_DIR": developer_dir},
+ )
+ if (xcodebuild_result.return_code != 0):
+ error_msg = (
+ "Invoking xcodebuild failed, developer dir: {devdir} ," +
+ "return code {code}, stderr: {err}, stdout: {out}"
+ ).format(
+ devdir = developer_dir,
+ code = xcodebuild_result.return_code,
+ err = xcodebuild_result.stderr,
+ out = xcodebuild_result.stdout,
+ )
+ ios_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "iphoneos")
+ tvos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "appletvos")
+ macos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "macosx")
+ watchos_sdk_version = _search_sdk_output(xcodebuild_result.stdout, "watchos")
+ build_contents += "xcode_version(\n name = '%s'," % name
+ build_contents += "\n version = '%s'," % version
+ if aliases:
+ build_contents += "\n aliases = [%s]," % " ,".join(decorated_aliases)
+ if ios_sdk_version:
+ build_contents += "\n default_ios_sdk_version = '%s'," % ios_sdk_version
+ if tvos_sdk_version:
+ build_contents += "\n default_tvos_sdk_version = '%s'," % tvos_sdk_version
+ if macos_sdk_version:
+ build_contents += "\n default_macos_sdk_version = '%s'," % macos_sdk_version
+ if watchos_sdk_version:
+ build_contents += "\n default_watchos_sdk_version = '%s'," % watchos_sdk_version
+ build_contents += "\n)\n"
+ if error_msg:
+ build_contents += "\n# Error: " + error_msg.replace("\n", " ") + "\n"
+ print(error_msg)
+ return build_contents
VERSION_CONFIG_STUB = "xcode_config(name = 'host_xcodes')"
-
def run_xcode_locator(repository_ctx, xcode_locator_src_label):
- """Generates xcode-locator from source and runs it.
-
- Builds xcode-locator in the current repository directory.
- Returns the standard output of running xcode-locator with -v, which will
- return information about locally installed Xcode toolchains and the versions
- they are associated with.
-
- This should only be invoked on a darwin OS, as xcode-locator cannot be built
- otherwise.
-
- Args:
- repository_ctx: The repository context.
- xcode_locator_src_label: The label of the source file for xcode-locator.
- Returns:
- A 2-tuple containing:
- output: A list representing installed xcode toolchain information. Each
- element of the list is a struct containing information for one installed
- toolchain. This is an empty list if there was an error building or
- running xcode-locator.
- err: An error string describing the error that occurred when attempting
- to build and run xcode-locator, or None if the run was successful.
- """
- xcodeloc_src_path = str(repository_ctx.path(xcode_locator_src_label))
- xcrun_result = repository_ctx.execute(["env", "-i", "xcrun", "clang", "-fobjc-arc", "-framework",
- "CoreServices", "-framework", "Foundation", "-o",
- "xcode-locator-bin", xcodeloc_src_path], 30)
-
- if (xcrun_result.return_code != 0):
- suggestion = ""
- if "Agreeing to the Xcode/iOS license" in xcrun_result.stderr:
- suggestion = ("(You may need to sign the xcode license." +
- " Try running 'sudo xcodebuild -license')")
- error_msg = (
- "Generating xcode-locator-bin failed. {suggestion} " +
- "return code {code}, stderr: {err}, stdout: {out}").format(
- suggestion=suggestion,
- code=xcrun_result.return_code,
- err=xcrun_result.stderr,
- out=xcrun_result.stdout)
- return ([], error_msg.replace("\n", " "))
-
- xcode_locator_result = repository_ctx.execute(["./xcode-locator-bin", "-v"], 30)
- if (xcode_locator_result.return_code != 0):
- error_msg = (
- "Invoking xcode-locator failed, " +
- "return code {code}, stderr: {err}, stdout: {out}").format(
- code=xcode_locator_result.return_code,
- err=xcode_locator_result.stderr,
- out=xcode_locator_result.stdout)
- return ([], error_msg.replace("\n", " "))
- xcode_toolchains = []
- # xcode_dump is comprised of newlines with different installed xcode versions,
- # each line of the form <version>:<comma_separated_aliases>:<developer_dir>.
- xcode_dump = xcode_locator_result.stdout
- for xcodeversion in xcode_dump.split("\n"):
- if ":" in xcodeversion:
- infosplit = xcodeversion.split(":")
- toolchain = struct(
- version = infosplit[0],
- aliases = infosplit[1].split(","),
- developer_dir = infosplit[2]
- )
- xcode_toolchains.append(toolchain)
- return (xcode_toolchains, None)
-
+ """Generates xcode-locator from source and runs it.
+
+ Builds xcode-locator in the current repository directory.
+ Returns the standard output of running xcode-locator with -v, which will
+ return information about locally installed Xcode toolchains and the versions
+ they are associated with.
+
+ This should only be invoked on a darwin OS, as xcode-locator cannot be built
+ otherwise.
+
+ Args:
+ repository_ctx: The repository context.
+ xcode_locator_src_label: The label of the source file for xcode-locator.
+ Returns:
+ A 2-tuple containing:
+ output: A list representing installed xcode toolchain information. Each
+ element of the list is a struct containing information for one installed
+ toolchain. This is an empty list if there was an error building or
+ running xcode-locator.
+ err: An error string describing the error that occurred when attempting
+ to build and run xcode-locator, or None if the run was successful.
+ """
+ xcodeloc_src_path = str(repository_ctx.path(xcode_locator_src_label))
+ xcrun_result = repository_ctx.execute([
+ "env",
+ "-i",
+ "xcrun",
+ "clang",
+ "-fobjc-arc",
+ "-framework",
+ "CoreServices",
+ "-framework",
+ "Foundation",
+ "-o",
+ "xcode-locator-bin",
+ xcodeloc_src_path,
+ ], 30)
+
+ if (xcrun_result.return_code != 0):
+ suggestion = ""
+ if "Agreeing to the Xcode/iOS license" in xcrun_result.stderr:
+ suggestion = ("(You may need to sign the xcode license." +
+ " Try running 'sudo xcodebuild -license')")
+ error_msg = (
+ "Generating xcode-locator-bin failed. {suggestion} " +
+ "return code {code}, stderr: {err}, stdout: {out}"
+ ).format(
+ suggestion = suggestion,
+ code = xcrun_result.return_code,
+ err = xcrun_result.stderr,
+ out = xcrun_result.stdout,
+ )
+ return ([], error_msg.replace("\n", " "))
+
+ xcode_locator_result = repository_ctx.execute(["./xcode-locator-bin", "-v"], 30)
+ if (xcode_locator_result.return_code != 0):
+ error_msg = (
+ "Invoking xcode-locator failed, " +
+ "return code {code}, stderr: {err}, stdout: {out}"
+ ).format(
+ code = xcode_locator_result.return_code,
+ err = xcode_locator_result.stderr,
+ out = xcode_locator_result.stdout,
+ )
+ return ([], error_msg.replace("\n", " "))
+ xcode_toolchains = []
+
+ # xcode_dump is comprised of newlines with different installed xcode versions,
+ # each line of the form <version>:<comma_separated_aliases>:<developer_dir>.
+ xcode_dump = xcode_locator_result.stdout
+ for xcodeversion in xcode_dump.split("\n"):
+ if ":" in xcodeversion:
+ infosplit = xcodeversion.split(":")
+ toolchain = struct(
+ version = infosplit[0],
+ aliases = infosplit[1].split(","),
+ developer_dir = infosplit[2],
+ )
+ xcode_toolchains.append(toolchain)
+ return (xcode_toolchains, None)
def _darwin_build_file(repository_ctx):
- """Evaluates local system state to create xcode_config and xcode_version targets."""
- xcodebuild_result = repository_ctx.execute(["env", "-i", "xcrun", "xcodebuild", "-version"], 30)
- # "xcodebuild -version" failing may be indicative of no versions of xcode
- # installed, which is an acceptable machine configuration to have for using
- # bazel. Thus no print warning should be emitted here.
- if (xcodebuild_result.return_code != 0):
- error_msg = (
- "Running xcodebuild -version failed, " +
- "return code {code}, stderr: {err}, stdout: {out}").format(
- code=xcodebuild_result.return_code,
- err=xcodebuild_result.stderr,
- out=xcodebuild_result.stdout)
- return VERSION_CONFIG_STUB + "\n# Error: " + error_msg.replace("\n", " ") + "\n"
-
- (toolchains, xcodeloc_err) = run_xcode_locator(repository_ctx,
- Label(repository_ctx.attr.xcode_locator))
-
- if xcodeloc_err:
- return VERSION_CONFIG_STUB + "\n# Error: " + xcodeloc_err + "\n"
-
- default_xcode_version = _search_string(xcodebuild_result.stdout, "Xcode ", "\n")
- default_xcode_target = ""
- target_names = []
- buildcontents = ""
-
- for toolchain in toolchains:
- version = toolchain.version
- aliases = toolchain.aliases
- developer_dir = toolchain.developer_dir
- target_name = "version%s" % version.replace(".", "_")
- buildcontents += _xcode_version_output(repository_ctx, target_name, version, aliases, developer_dir)
- target_names.append("':%s'" % target_name)
- if (version == default_xcode_version or default_xcode_version in aliases):
- default_xcode_target = target_name
- buildcontents += "xcode_config(name = 'host_xcodes',"
- if target_names:
- buildcontents += "\n versions = [%s]," % ", ".join(target_names)
- if default_xcode_target:
- buildcontents += "\n default = ':%s'," % default_xcode_target
- buildcontents += "\n)\n"
- return buildcontents
-
+ """Evaluates local system state to create xcode_config and xcode_version targets."""
+ xcodebuild_result = repository_ctx.execute(["env", "-i", "xcrun", "xcodebuild", "-version"], 30)
+
+ # "xcodebuild -version" failing may be indicative of no versions of xcode
+ # installed, which is an acceptable machine configuration to have for using
+ # bazel. Thus no print warning should be emitted here.
+ if (xcodebuild_result.return_code != 0):
+ error_msg = (
+ "Running xcodebuild -version failed, " +
+ "return code {code}, stderr: {err}, stdout: {out}"
+ ).format(
+ code = xcodebuild_result.return_code,
+ err = xcodebuild_result.stderr,
+ out = xcodebuild_result.stdout,
+ )
+ return VERSION_CONFIG_STUB + "\n# Error: " + error_msg.replace("\n", " ") + "\n"
+
+ (toolchains, xcodeloc_err) = run_xcode_locator(
+ repository_ctx,
+ Label(repository_ctx.attr.xcode_locator),
+ )
+
+ if xcodeloc_err:
+ return VERSION_CONFIG_STUB + "\n# Error: " + xcodeloc_err + "\n"
+
+ default_xcode_version = _search_string(xcodebuild_result.stdout, "Xcode ", "\n")
+ default_xcode_target = ""
+ target_names = []
+ buildcontents = ""
+
+ for toolchain in toolchains:
+ version = toolchain.version
+ aliases = toolchain.aliases
+ developer_dir = toolchain.developer_dir
+ target_name = "version%s" % version.replace(".", "_")
+ buildcontents += _xcode_version_output(repository_ctx, target_name, version, aliases, developer_dir)
+ target_names.append("':%s'" % target_name)
+ if (version == default_xcode_version or default_xcode_version in aliases):
+ default_xcode_target = target_name
+ buildcontents += "xcode_config(name = 'host_xcodes',"
+ if target_names:
+ buildcontents += "\n versions = [%s]," % ", ".join(target_names)
+ if default_xcode_target:
+ buildcontents += "\n default = ':%s'," % default_xcode_target
+ buildcontents += "\n)\n"
+ return buildcontents
def _impl(repository_ctx):
- """Implementation for the local_config_xcode repository rule.
+ """Implementation for the local_config_xcode repository rule.
- Generates a BUILD file containing a root xcode_config target named 'host_xcodes',
- which points to an xcode_version target for each version of xcode installed on
- the local host machine. If no versions of xcode are present on the machine
- (for instance, if this is a non-darwin OS), creates a stub target.
+ Generates a BUILD file containing a root xcode_config target named 'host_xcodes',
+ which points to an xcode_version target for each version of xcode installed on
+ the local host machine. If no versions of xcode are present on the machine
+ (for instance, if this is a non-darwin OS), creates a stub target.
- Args:
- repository_ctx: The repository context.
- """
+ Args:
+ repository_ctx: The repository context.
+ """
- os_name = repository_ctx.os.name.lower()
- build_contents = "package(default_visibility = ['//visibility:public'])\n\n"
- if (os_name.startswith("mac os")):
- build_contents += _darwin_build_file(repository_ctx)
- else:
- build_contents += VERSION_CONFIG_STUB
- repository_ctx.file("BUILD", build_contents)
+ os_name = repository_ctx.os.name.lower()
+ build_contents = "package(default_visibility = ['//visibility:public'])\n\n"
+ if (os_name.startswith("mac os")):
+ build_contents += _darwin_build_file(repository_ctx)
+ else:
+ build_contents += VERSION_CONFIG_STUB
+ repository_ctx.file("BUILD", build_contents)
xcode_autoconf = repository_rule(
- implementation=_impl,
- local=True,
- attrs={
+ implementation = _impl,
+ local = True,
+ attrs = {
"xcode_locator": attr.string(),
- }
+ },
)
-
def xcode_configure(xcode_locator_label):
- """Generates a repository containing host xcode version information."""
- xcode_autoconf(
- name="local_config_xcode",
- xcode_locator=xcode_locator_label
- )
+ """Generates a repository containing host xcode version information."""
+ xcode_autoconf(
+ name = "local_config_xcode",
+ xcode_locator = xcode_locator_label,
+ )
diff --git a/tools/osx/xcode_version_flag.bzl b/tools/osx/xcode_version_flag.bzl
index f8924409db..7f48da470d 100644
--- a/tools/osx/xcode_version_flag.bzl
+++ b/tools/osx/xcode_version_flag.bzl
@@ -15,106 +15,129 @@
"""Rules that allows select() to differentiate between Apple OS versions."""
def _strip_version(version):
- """Strip trailing characters that aren't digits or '.' from version names.
+ """Strip trailing characters that aren't digits or '.' from version names.
- Some OS versions look like "9.0gm", which is not useful for select()
- statements. Thus, we strip the trailing "gm" part.
+ Some OS versions look like "9.0gm", which is not useful for select()
+ statements. Thus, we strip the trailing "gm" part.
- Args:
- version: the version string
+ Args:
+ version: the version string
- Returns:
- The version with trailing letters stripped.
- """
- result = ""
- for ch in str(version):
- if not ch.isdigit() and ch != ".":
- break
+ Returns:
+ The version with trailing letters stripped.
+ """
+ result = ""
+ for ch in str(version):
+ if not ch.isdigit() and ch != ".":
+ break
- result += ch
-
- return result
+ result += ch
+ return result
def _xcode_version_flag_impl(ctx):
- """A rule that allows select() to differentiate between Xcode versions."""
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
- return struct(providers = [
- config_common.FeatureFlagInfo(value = _strip_version(
- xcode_config.xcode_version()))])
-
+ """A rule that allows select() to differentiate between Xcode versions."""
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ return struct(providers = [
+ config_common.FeatureFlagInfo(value = _strip_version(
+ xcode_config.xcode_version(),
+ )),
+ ])
def _ios_sdk_version_flag_impl(ctx):
- """A rule that allows select() to select based on the iOS SDK version."""
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
-
- return struct(providers = [
- config_common.FeatureFlagInfo(value = _strip_version(
- xcode_config.sdk_version_for_platform(
- apple_common.platform.ios_device)))])
+ """A rule that allows select() to select based on the iOS SDK version."""
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ return struct(providers = [
+ config_common.FeatureFlagInfo(value = _strip_version(
+ xcode_config.sdk_version_for_platform(
+ apple_common.platform.ios_device,
+ ),
+ )),
+ ])
def _tvos_sdk_version_flag_impl(ctx):
- """A rule that allows select() to select based on the tvOS SDK version."""
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
-
- return struct(providers = [
- config_common.FeatureFlagInfo(value = _strip_version(
- xcode_config.sdk_version_for_platform(
- apple_common.platform.tvos_device)))])
+ """A rule that allows select() to select based on the tvOS SDK version."""
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ return struct(providers = [
+ config_common.FeatureFlagInfo(value = _strip_version(
+ xcode_config.sdk_version_for_platform(
+ apple_common.platform.tvos_device,
+ ),
+ )),
+ ])
def _watchos_sdk_version_flag_impl(ctx):
- """A rule that allows select() to select based on the watchOS SDK version."""
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
-
- return struct(providers = [
- config_common.FeatureFlagInfo(value = _strip_version(
- xcode_config.sdk_version_for_platform(
- apple_common.platform.watchos_device)))])
+ """A rule that allows select() to select based on the watchOS SDK version."""
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ return struct(providers = [
+ config_common.FeatureFlagInfo(value = _strip_version(
+ xcode_config.sdk_version_for_platform(
+ apple_common.platform.watchos_device,
+ ),
+ )),
+ ])
def _macos_sdk_version_flag_impl(ctx):
- """A rule that allows select() to select based on the macOS SDK version."""
- xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
-
- return struct(providers = [
- config_common.FeatureFlagInfo(value = _strip_version(
- xcode_config.sdk_version_for_platform(
- apple_common.platform.macos)))])
+ """A rule that allows select() to select based on the macOS SDK version."""
+ xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
+ return struct(providers = [
+ config_common.FeatureFlagInfo(value = _strip_version(
+ xcode_config.sdk_version_for_platform(
+ apple_common.platform.macos,
+ ),
+ )),
+ ])
xcode_version_flag = rule(
implementation = _xcode_version_flag_impl,
attrs = {
- "_xcode_config": attr.label(default=configuration_field(
- fragment="apple", name="xcode_config_label")),
- })
+ "_xcode_config": attr.label(default = configuration_field(
+ fragment = "apple",
+ name = "xcode_config_label",
+ )),
+ },
+)
ios_sdk_version_flag = rule(
implementation = _ios_sdk_version_flag_impl,
attrs = {
- "_xcode_config": attr.label(default=configuration_field(
- fragment="apple", name="xcode_config_label")),
- })
+ "_xcode_config": attr.label(default = configuration_field(
+ fragment = "apple",
+ name = "xcode_config_label",
+ )),
+ },
+)
tvos_sdk_version_flag = rule(
implementation = _tvos_sdk_version_flag_impl,
attrs = {
- "_xcode_config": attr.label(default=configuration_field(
- fragment="apple", name="xcode_config_label")),
- })
+ "_xcode_config": attr.label(default = configuration_field(
+ fragment = "apple",
+ name = "xcode_config_label",
+ )),
+ },
+)
watchos_sdk_version_flag = rule(
implementation = _watchos_sdk_version_flag_impl,
attrs = {
- "_xcode_config": attr.label(default=configuration_field(
- fragment="apple", name="xcode_config_label")),
- })
+ "_xcode_config": attr.label(default = configuration_field(
+ fragment = "apple",
+ name = "xcode_config_label",
+ )),
+ },
+)
macos_sdk_version_flag = rule(
implementation = _macos_sdk_version_flag_impl,
attrs = {
- "_xcode_config": attr.label(default=configuration_field(
- fragment="apple", name="xcode_config_label")),
- })
+ "_xcode_config": attr.label(default = configuration_field(
+ fragment = "apple",
+ name = "xcode_config_label",
+ )),
+ },
+)
diff --git a/tools/sh/sh_configure.bzl b/tools/sh/sh_configure.bzl
index f7c8899420..5ca9e050d9 100644
--- a/tools/sh/sh_configure.bzl
+++ b/tools/sh/sh_configure.bzl
@@ -14,42 +14,42 @@
"""Configure the shell toolchain on the local machine."""
def _is_windows(repository_ctx):
- """Returns true if the host OS is Windows."""
- return repository_ctx.os.name.startswith("windows")
+ """Returns true if the host OS is Windows."""
+ return repository_ctx.os.name.startswith("windows")
def _sh_config_impl(repository_ctx):
- """sh_config rule implementation.
+ """sh_config rule implementation.
- Detects the path of the shell interpreter on the local machine and
- stores it in a sh_toolchain rule.
+ Detects the path of the shell interpreter on the local machine and
+ stores it in a sh_toolchain rule.
- Args:
- repository_ctx: the repository rule context object
- """
- sh_path = repository_ctx.os.environ.get("BAZEL_SH")
- if not sh_path:
- if _is_windows(repository_ctx):
- sh_path = repository_ctx.which("bash.exe")
- if sh_path:
- # When the Windows Subsystem for Linux is installed there's a
- # bash.exe under %WINDIR%\system32\bash.exe that launches Ubuntu
- # Bash which cannot run native Windows programs so it's not what
- # we want.
- windir = repository_ctx.os.environ.get("WINDIR")
- if windir and sh_path.startswith(windir):
- sh_path = None
- else:
- sh_path = repository_ctx.which("bash")
- if not sh_path:
- sh_path = repository_ctx.which("sh")
+ Args:
+ repository_ctx: the repository rule context object
+ """
+ sh_path = repository_ctx.os.environ.get("BAZEL_SH")
+ if not sh_path:
+ if _is_windows(repository_ctx):
+ sh_path = repository_ctx.which("bash.exe")
+ if sh_path:
+ # When the Windows Subsystem for Linux is installed there's a
+ # bash.exe under %WINDIR%\system32\bash.exe that launches Ubuntu
+ # Bash which cannot run native Windows programs so it's not what
+ # we want.
+ windir = repository_ctx.os.environ.get("WINDIR")
+ if windir and sh_path.startswith(windir):
+ sh_path = None
+ else:
+ sh_path = repository_ctx.which("bash")
+ if not sh_path:
+ sh_path = repository_ctx.which("sh")
- if not sh_path:
- sh_path = ""
+ if not sh_path:
+ sh_path = ""
- if sh_path and _is_windows(repository_ctx):
- sh_path = sh_path.replace("\\", "/")
+ if sh_path and _is_windows(repository_ctx):
+ sh_path = sh_path.replace("\\", "/")
- repository_ctx.file("BUILD", """
+ repository_ctx.file("BUILD", """
load("@bazel_tools//tools/sh:sh_toolchain.bzl", "sh_toolchain")
sh_toolchain(
@@ -75,6 +75,6 @@ sh_config = repository_rule(
)
def sh_configure():
- """Detect the local shell interpreter and register its toolchain."""
- sh_config(name = "local_config_sh")
- native.register_toolchains("@local_config_sh//:local_sh_toolchain")
+ """Detect the local shell interpreter and register its toolchain."""
+ sh_config(name = "local_config_sh")
+ native.register_toolchains("@local_config_sh//:local_sh_toolchain")
diff --git a/tools/sh/sh_toolchain.bzl b/tools/sh/sh_toolchain.bzl
index ddd7de7980..2a3370469c 100644
--- a/tools/sh/sh_toolchain.bzl
+++ b/tools/sh/sh_toolchain.bzl
@@ -14,8 +14,8 @@
"""Define a toolchain rule for the shell."""
def _sh_toolchain_impl(ctx):
- """sh_toolchain rule implementation."""
- return [platform_common.ToolchainInfo(path = ctx.attr.path)]
+ """sh_toolchain rule implementation."""
+ return [platform_common.ToolchainInfo(path = ctx.attr.path)]
sh_toolchain = rule(
attrs = {