aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party
diff options
context:
space:
mode:
authorGravatar Guangda Lai <laigd@google.com>2018-01-25 23:59:19 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-26 00:02:44 -0800
commit76f6938bafeb81a4ca41b8dac2b9c83e1286fa95 (patch)
tree75dd6285db559b19d7a0449065973780a748ef7c /third_party
parentffda6079ed619df8fd3edb4db71ffc7d005c2430 (diff)
Set up TensorRT configurations for external use, and add a test.
PiperOrigin-RevId: 183347199
Diffstat (limited to 'third_party')
-rw-r--r--third_party/gpus/cuda_configure.bzl104
-rw-r--r--third_party/tensorrt/BUILD0
-rw-r--r--third_party/tensorrt/BUILD.tpl67
-rw-r--r--third_party/tensorrt/build_defs.bzl.tpl7
-rw-r--r--third_party/tensorrt/tensorrt_configure.bzl224
5 files changed, 355 insertions, 47 deletions
diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl
index 2727fa5efe..8e1dd8a54f 100644
--- a/third_party/gpus/cuda_configure.bzl
+++ b/third_party/gpus/cuda_configure.bzl
@@ -236,7 +236,7 @@ def _cudnn_install_basedir(repository_ctx):
return cudnn_install_path
-def _matches_version(environ_version, detected_version):
+def matches_version(environ_version, detected_version):
"""Checks whether the user-specified version matches the detected version.
This function performs a weak matching so that if the user specifies only the
@@ -317,7 +317,7 @@ def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value):
environ_version = ""
if _TF_CUDA_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
- if environ_version and not _matches_version(environ_version, full_version):
+ if environ_version and not matches_version(environ_version, full_version):
auto_configure_fail(
("CUDA version detected from nvcc (%s) does not match " +
"TF_CUDA_VERSION (%s)") % (full_version, environ_version))
@@ -338,35 +338,49 @@ _DEFINE_CUDNN_MINOR = "#define CUDNN_MINOR"
_DEFINE_CUDNN_PATCHLEVEL = "#define CUDNN_PATCHLEVEL"
-def _find_cuda_define(repository_ctx, cudnn_header_dir, define):
- """Returns the value of a #define in cudnn.h
+def find_cuda_define(repository_ctx, header_dir, header_file, define):
+ """Returns the value of a #define in a header file.
- Greps through cudnn.h and returns the value of the specified #define. If the
- #define is not found, then raise an error.
+ Greps through a header file and returns the value of the specified #define.
+ If the #define is not found, then raise an error.
Args:
repository_ctx: The repository context.
- cudnn_header_dir: The directory containing the cuDNN header.
+ header_dir: The directory containing the header file.
+ header_file: The header file name.
define: The #define to search for.
Returns:
- The value of the #define found in cudnn.h.
+ The value of the #define found in the header.
"""
- # Confirm location of cudnn.h and grep for the line defining CUDNN_MAJOR.
- cudnn_h_path = repository_ctx.path("%s/cudnn.h" % cudnn_header_dir)
- if not cudnn_h_path.exists:
- auto_configure_fail("Cannot find cudnn.h at %s" % str(cudnn_h_path))
- result = repository_ctx.execute(["grep", "--color=never", "-E", define, str(cudnn_h_path)])
+ # Confirm location of the header and grep for the line defining the macro.
+ h_path = repository_ctx.path("%s/%s" % (header_dir, header_file))
+ if not h_path.exists:
+ auto_configure_fail("Cannot find %s at %s" % (header_file, str(h_path)))
+ result = repository_ctx.execute(
+ # Grep one more lines as some #defines are splitted into two lines.
+ ["grep", "--color=never", "-A1", "-E", define, str(h_path)])
if result.stderr:
- auto_configure_fail("Error reading %s: %s" %
- (result.stderr, str(cudnn_h_path)))
+ auto_configure_fail("Error reading %s: %s" % (str(h_path), result.stderr))
- # Parse the cuDNN major version from the line defining CUDNN_MAJOR
- lines = result.stdout.splitlines()
- if len(lines) == 0 or lines[0].find(define) == -1:
+ # Parse the version from the line defining the macro.
+ if result.stdout.find(define) == -1:
auto_configure_fail("Cannot find line containing '%s' in %s" %
- (define, str(cudnn_h_path)))
- return lines[0].replace(define, "").strip()
+ (define, h_path))
+ version = result.stdout
+ # Remove the new line and '\' character if any.
+ version = version.replace("\\", " ")
+ version = version.replace("\n", " ")
+ version = version.replace(define, "").lstrip()
+ # Remove the code after the version number.
+ version_end = version.find(" ")
+ if version_end != -1:
+ if version_end == 0:
+ auto_configure_fail(
+ "Cannot extract the version from line containing '%s' in %s" %
+ (define, str(h_path)))
+ version = version[:version_end].strip()
+ return version
def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
@@ -382,12 +396,12 @@ def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
"""
cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
cudnn_install_basedir)
- major_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
- _DEFINE_CUDNN_MAJOR)
- minor_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
- _DEFINE_CUDNN_MINOR)
- patch_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
- _DEFINE_CUDNN_PATCHLEVEL)
+ major_version = find_cuda_define(
+ repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_MAJOR)
+ minor_version = find_cuda_define(
+ repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_MINOR)
+ patch_version = find_cuda_define(
+ repository_ctx, cudnn_header_dir, "cudnn.h", _DEFINE_CUDNN_PATCHLEVEL)
full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
# Check whether TF_CUDNN_VERSION was set by the user and fail if it does not
@@ -395,7 +409,7 @@ def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
environ_version = ""
if _TF_CUDNN_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip()
- if environ_version and not _matches_version(environ_version, full_version):
+ if environ_version and not matches_version(environ_version, full_version):
cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" %
cudnn_install_basedir)
auto_configure_fail(
@@ -427,7 +441,7 @@ def _compute_capabilities(repository_ctx):
return capabilities
-def _cpu_value(repository_ctx):
+def get_cpu_value(repository_ctx):
"""Returns the name of the host operating system.
Args:
@@ -447,7 +461,7 @@ def _cpu_value(repository_ctx):
def _is_windows(repository_ctx):
"""Returns true if the host operating system is windows."""
- return _cpu_value(repository_ctx) == "Windows"
+ return get_cpu_value(repository_ctx) == "Windows"
def _lib_name(lib, cpu_value, version="", static=False):
"""Constructs the platform-specific name of a library.
@@ -582,11 +596,8 @@ def _find_libs(repository_ctx, cuda_config):
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
- Map of library names to structs of filename and path as returned by
- _find_cuda_lib and _find_cupti_lib.
+ Map of library names to structs of filename and path.
"""
- cudnn_version = cuda_config.cudnn_version
- cudnn_ext = ".%s" % cudnn_version if cudnn_version else ""
cpu_value = cuda_config.cpu_value
return {
"cuda": _find_cuda_lib("cuda", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path),
@@ -611,7 +622,7 @@ def _find_libs(repository_ctx, cuda_config):
"cudnn": _find_cuda_lib(
"cudnn", repository_ctx, cpu_value, cuda_config.cudnn_install_basedir,
cuda_config.cudnn_version),
- "cupti": _find_cupti_lib(repository_ctx, cuda_config),
+ "cupti": _find_cupti_lib(repository_ctx, cuda_config)
}
@@ -654,7 +665,7 @@ def _get_cuda_config(repository_ctx):
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
- cpu_value = _cpu_value(repository_ctx)
+ cpu_value = get_cpu_value(repository_ctx)
cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
cuda_version = _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value)
cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
@@ -712,13 +723,13 @@ error_gpu_disabled()
def _create_dummy_repository(repository_ctx):
- cpu_value = _cpu_value(repository_ctx)
+ cpu_value = get_cpu_value(repository_ctx)
# Set up BUILD file for cuda/.
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
- "%{cuda_extra_copts}": "[]"
+ "%{cuda_extra_copts}": "[]",
})
_tpl(repository_ctx, "cuda:BUILD",
{
@@ -805,8 +816,8 @@ def _norm_path(path):
return path
-def _symlink_genrule_for_dir(repository_ctx, src_dir, dest_dir, genrule_name,
- src_files = [], dest_files = []):
+def symlink_genrule_for_dir(repository_ctx, src_dir, dest_dir, genrule_name,
+ src_files = [], dest_files = []):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
@@ -913,11 +924,11 @@ def _create_local_cuda_repository(repository_ctx):
# cuda_toolkit_path
cuda_toolkit_path = cuda_config.cuda_toolkit_path
cuda_include_path = cuda_toolkit_path + "/include"
- genrules = [_symlink_genrule_for_dir(repository_ctx,
+ genrules = [symlink_genrule_for_dir(repository_ctx,
cuda_include_path, "cuda/include", "cuda-include")]
- genrules.append(_symlink_genrule_for_dir(repository_ctx,
+ genrules.append(symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/nvvm", "cuda/nvvm", "cuda-nvvm"))
- genrules.append(_symlink_genrule_for_dir(repository_ctx,
+ genrules.append(symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/extras/CUPTI/include",
"cuda/extras/CUPTI/include", "cuda-extras"))
@@ -927,15 +938,15 @@ def _create_local_cuda_repository(repository_ctx):
for lib in cuda_libs.values():
cuda_lib_src.append(lib.path)
cuda_lib_dest.append("cuda/lib/" + lib.file_name)
- genrules.append(_symlink_genrule_for_dir(repository_ctx, None, "", "cuda-lib",
- cuda_lib_src, cuda_lib_dest))
+ genrules.append(symlink_genrule_for_dir(repository_ctx, None, "", "cuda-lib",
+ cuda_lib_src, cuda_lib_dest))
- # Set up the symbolic links for cudnn if cudnn was was not installed to
+ # Set up the symbolic links for cudnn if cndnn was not installed to
# CUDA_TOOLKIT_PATH.
included_files = _read_dir(repository_ctx, cuda_include_path).replace(
cuda_include_path, '').splitlines()
if '/cudnn.h' not in included_files:
- genrules.append(_symlink_genrule_for_dir(repository_ctx, None,
+ genrules.append(symlink_genrule_for_dir(repository_ctx, None,
"cuda/include/", "cudnn-include", [cudnn_header_dir + "/cudnn.h"],
["cudnn.h"]))
else:
@@ -952,7 +963,6 @@ def _create_local_cuda_repository(repository_ctx):
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx, cuda_config.compute_capabilities),
-
})
_tpl(repository_ctx, "cuda:BUILD",
{
diff --git a/third_party/tensorrt/BUILD b/third_party/tensorrt/BUILD
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/tensorrt/BUILD
diff --git a/third_party/tensorrt/BUILD.tpl b/third_party/tensorrt/BUILD.tpl
new file mode 100644
index 0000000000..feaeb0bea6
--- /dev/null
+++ b/third_party/tensorrt/BUILD.tpl
@@ -0,0 +1,67 @@
+# NVIDIA TensorRT
+# A high-performance deep learning inference optimizer and runtime.
+
+licenses(["notice"])
+
+load("@local_config_cuda//cuda:build_defs.bzl", "cuda_default_copts")
+
+package(default_visibility = ["//visibility:public"])
+
+cc_library(
+ name = "tensorrt_headers",
+ hdrs = [%{tensorrt_headers}],
+ includes = [
+ "include",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "nv_infer",
+ srcs = [%{nv_infer}],
+ data = [%{nv_infer}],
+ includes = [
+ "include",
+ ],
+ copts= cuda_default_copts(),
+ deps = [
+ "@local_config_cuda//cuda:cuda",
+ ":tensorrt_headers",
+ ],
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "nv_infer_plugin",
+ srcs = [%{nv_infer_plugin}],
+ data = [%{nv_infer_plugin}],
+ includes = [
+ "include",
+ ],
+ copts= cuda_default_copts(),
+ deps = [
+ "@local_config_cuda//cuda:cuda",
+ ":nv_infer",
+ ":tensorrt_headers",
+ ],
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "nv_parsers",
+ srcs = [%{nv_parsers}],
+ data = [%{nv_parsers}],
+ includes = [
+ "include",
+ ],
+ copts= cuda_default_copts(),
+ deps = [
+ ":tensorrt_headers",
+ ],
+ linkstatic = 1,
+ visibility = ["//visibility:public"],
+)
+
+%{tensorrt_genrules}
diff --git a/third_party/tensorrt/build_defs.bzl.tpl b/third_party/tensorrt/build_defs.bzl.tpl
new file mode 100644
index 0000000000..0dc3a7ba2d
--- /dev/null
+++ b/third_party/tensorrt/build_defs.bzl.tpl
@@ -0,0 +1,7 @@
+# Build configurations for TensorRT.
+
+def if_tensorrt(if_true, if_false=[]):
+ """Tests whether TensorRT was enabled during the configure process."""
+ if %{tensorrt_is_configured}:
+ return if_true
+ return if_false
diff --git a/third_party/tensorrt/tensorrt_configure.bzl b/third_party/tensorrt/tensorrt_configure.bzl
new file mode 100644
index 0000000000..8aa0f28f39
--- /dev/null
+++ b/third_party/tensorrt/tensorrt_configure.bzl
@@ -0,0 +1,224 @@
+# -*- Python -*-
+"""Repository rule for TensorRT configuration.
+
+`tensorrt_configure` depends on the following environment variables:
+
+ * `TF_TENSORRT_VERSION`: The TensorRT libnvinfer version.
+ * `TENSORRT_INSTALL_PATH`: The installation path of the TensorRT library.
+"""
+
+load(
+ "//third_party/gpus:cuda_configure.bzl",
+ "auto_configure_fail",
+ "get_cpu_value",
+ "find_cuda_define",
+ "matches_version",
+ "symlink_genrule_for_dir",
+)
+
+_TENSORRT_INSTALL_PATH = "TENSORRT_INSTALL_PATH"
+_TF_TENSORRT_VERSION = "TF_TENSORRT_VERSION"
+
+_TF_TENSORRT_LIBS = ["nvinfer", "nvinfer_plugin", "nvparsers"]
+_TF_TENSORRT_HEADERS = [
+ "NvInfer.h", "NvInferPlugin.h", "NvCaffeParser.h", "NvUffParser.h",
+ "NvUtils.h"
+]
+
+_DEFINE_TENSORRT_SONAME_MAJOR = "#define NV_TENSORRT_SONAME_MAJOR"
+_DEFINE_TENSORRT_SONAME_MINOR = "#define NV_TENSORRT_SONAME_MINOR"
+_DEFINE_TENSORRT_SONAME_PATCH = "#define NV_TENSORRT_SONAME_PATCH"
+
+
+def _headers_exist(repository_ctx, path):
+ """Returns whether all TensorRT header files could be found in 'path'.
+
+ Args:
+ repository_ctx: The repository context.
+ path: The TensorRT include path to check.
+
+ Returns:
+ True if all TensorRT header files can be found in the path.
+ """
+ for h in _TF_TENSORRT_HEADERS:
+ if not repository_ctx.path("%s/%s" % (path, h)).exists:
+ return False
+ return True
+
+
+def _find_trt_header_dir(repository_ctx, trt_install_path):
+ """Returns the path to the directory containing headers of TensorRT.
+
+ Args:
+ repository_ctx: The repository context.
+ trt_install_path: The TensorRT library install directory.
+
+ Returns:
+ The path of the directory containing the TensorRT header.
+ """
+ if trt_install_path == "/usr/lib/x86_64-linux-gnu":
+ path = "/usr/include/x86_64-linux-gnu"
+ if _headers_exist(repository_ctx, path):
+ return path
+ path = str(repository_ctx.path("%s/../include" % trt_install_path).realpath)
+ if _headers_exist(repository_ctx, path):
+ return path
+ auto_configure_fail(
+ "Cannot find NvInfer.h with TensorRT install path %s" % trt_install_path)
+
+
+def _trt_lib_version(repository_ctx, trt_install_path):
+ """Detects the library (e.g. libnvinfer) version of TensorRT.
+
+ Args:
+ repository_ctx: The repository context.
+ trt_install_path: The TensorRT library install directory.
+
+ Returns:
+ A string containing the library version of TensorRT.
+ """
+ trt_header_dir = _find_trt_header_dir(repository_ctx, trt_install_path)
+ major_version = find_cuda_define(repository_ctx, trt_header_dir, "NvInfer.h",
+ _DEFINE_TENSORRT_SONAME_MAJOR)
+ minor_version = find_cuda_define(repository_ctx, trt_header_dir, "NvInfer.h",
+ _DEFINE_TENSORRT_SONAME_MINOR)
+ patch_version = find_cuda_define(repository_ctx, trt_header_dir, "NvInfer.h",
+ _DEFINE_TENSORRT_SONAME_PATCH)
+ full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
+ environ_version = repository_ctx.os.environ[_TF_TENSORRT_VERSION].strip()
+ if not matches_version(environ_version, full_version):
+ auto_configure_fail(
+ ("TensorRT library version detected from %s/%s (%s) does not match " +
+ "TF_TENSORRT_VERSION (%s). To fix this rerun configure again.") %
+ (trt_header_dir, "NvInfer.h", full_version, environ_version))
+ return environ_version
+
+
+def _find_trt_libs(repository_ctx, trt_install_path, trt_lib_version):
+ """Finds the given TensorRT library on the system.
+
+ Adapted from code contributed by Sami Kama (https://github.com/samikama).
+
+ Args:
+ repository_ctx: The repository context.
+ trt_install_path: The TensorRT library installation directory.
+ trt_lib_version: The version of TensorRT library files as returned
+ by _trt_lib_version.
+
+ Returns:
+ Map of library names to structs with the following fields:
+ src_file_path: The full path to the library found on the system.
+ dst_file_name: The basename of the target library.
+ """
+ objdump = repository_ctx.which("objdump")
+ result = {}
+ for lib in _TF_TENSORRT_LIBS:
+ dst_file_name = "lib%s.so.%s" % (lib, trt_lib_version)
+ src_file_path = repository_ctx.path("%s/%s" % (trt_install_path,
+ dst_file_name))
+ if not src_file_path.exists:
+ auto_configure_fail(
+ "Cannot find TensorRT library %s" % str(src_file_path))
+ if objdump != None:
+ objdump_out = repository_ctx.execute([objdump, "-p", str(src_file_path)])
+ for line in objdump_out.stdout.splitlines():
+ if "SONAME" in line:
+ dst_file_name = line.strip().split(" ")[-1]
+ result.update({
+ lib:
+ struct(
+ dst_file_name=dst_file_name,
+ src_file_path=str(src_file_path.realpath))
+ })
+ return result
+
+
+def _tpl(repository_ctx, tpl, substitutions):
+ repository_ctx.template(tpl, Label("//third_party/tensorrt:%s.tpl" % tpl),
+ substitutions)
+
+
+def _create_dummy_repository(repository_ctx):
+ """Create a dummy TensorRT repository."""
+ _tpl(repository_ctx, "build_defs.bzl", {"%{tensorrt_is_configured}": "False"})
+ substitutions = {
+ "%{tensorrt_genrules}": "",
+ "%{tensorrt_headers}": "",
+ }
+ for lib in _TF_TENSORRT_LIBS:
+ k = "%%{%s}" % lib.replace("nv", "nv_")
+ substitutions.update({k: ""})
+ _tpl(repository_ctx, "BUILD", substitutions)
+
+
+def _tensorrt_configure_impl(repository_ctx):
+ """Implementation of the tensorrt_configure repository rule."""
+ if _TENSORRT_INSTALL_PATH not in repository_ctx.os.environ:
+ _create_dummy_repository(repository_ctx)
+ return
+
+ if (get_cpu_value(repository_ctx) != "Linux"):
+ auto_configure_fail("TensorRT is supported only on Linux.")
+ if _TF_TENSORRT_VERSION not in repository_ctx.os.environ:
+ auto_configure_fail("TensorRT library (libnvinfer) version is not set.")
+ trt_install_path = repository_ctx.os.environ[_TENSORRT_INSTALL_PATH].strip()
+ if not repository_ctx.path(trt_install_path).exists:
+ auto_configure_fail(
+ "Cannot find TensorRT install path %s." % trt_install_path)
+
+ # Set up the symbolic links for the library files.
+ trt_lib_version = _trt_lib_version(repository_ctx, trt_install_path)
+ trt_libs = _find_trt_libs(repository_ctx, trt_install_path, trt_lib_version)
+ trt_lib_src = []
+ trt_lib_dest = []
+ for lib in trt_libs.values():
+ trt_lib_src.append(lib.src_file_path)
+ trt_lib_dest.append(lib.dst_file_name)
+ genrules = [
+ symlink_genrule_for_dir(repository_ctx, None, "tensorrt/lib/",
+ "tensorrt_lib", trt_lib_src, trt_lib_dest)
+ ]
+
+ # Set up the symbolic links for the header files.
+ trt_header_dir = _find_trt_header_dir(repository_ctx, trt_install_path)
+ src_files = [
+ "%s/%s" % (trt_header_dir, header) for header in _TF_TENSORRT_HEADERS
+ ]
+ dest_files = _TF_TENSORRT_HEADERS
+ genrules.append(
+ symlink_genrule_for_dir(repository_ctx, None, "tensorrt/include/",
+ "tensorrt_include", src_files, dest_files))
+
+ # Set up config file.
+ _tpl(repository_ctx, "build_defs.bzl", {"%{tensorrt_is_configured}": "True"})
+
+ # Set up BUILD file.
+ substitutions = {
+ "%{tensorrt_genrules}": "\n".join(genrules),
+ "%{tensorrt_headers}": '":tensorrt_include"',
+ }
+ for lib in _TF_TENSORRT_LIBS:
+ k = "%%{%s}" % lib.replace("nv", "nv_")
+ v = '"tensorrt/lib/%s"' % trt_libs[lib].dst_file_name
+ substitutions.update({k: v})
+ _tpl(repository_ctx, "BUILD", substitutions)
+
+
+tensorrt_configure = repository_rule(
+ implementation=_tensorrt_configure_impl,
+ environ=[
+ _TENSORRT_INSTALL_PATH,
+ _TF_TENSORRT_VERSION,
+ ],
+)
+"""Detects and configures the local CUDA toolchain.
+
+Add the following to your WORKSPACE FILE:
+
+```python
+tensorrt_configure(name = "local_config_tensorrt")
+```
+
+Args:
+ name: A unique name for this workspace rule.
+"""