From c63d21b0bfc534b6377b332e9d2ba2abbdb7e0eb Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 25 Sep 2018 22:57:54 -0700 Subject: Adds a build flag to enable MKL (mkl_enabled=true). PiperOrigin-RevId: 214557082 --- third_party/mkl/BUILD | 23 ++++++++++++++------- third_party/mkl/build_defs.bzl | 41 ++++++++++++++++++++++++++++---------- third_party/mkl_dnn/BUILD | 6 +++--- third_party/mkl_dnn/build_defs.bzl | 2 +- 4 files changed, 50 insertions(+), 22 deletions(-) (limited to 'third_party') diff --git a/third_party/mkl/BUILD b/third_party/mkl/BUILD index efff7fd51b..15a3e5cfa7 100644 --- a/third_party/mkl/BUILD +++ b/third_party/mkl/BUILD @@ -1,26 +1,26 @@ licenses(["notice"]) # 3-Clause BSD config_setting( - name = "using_mkl", + name = "build_with_mkl", define_values = { - "using_mkl": "true", + "build_with_mkl": "true", }, visibility = ["//visibility:public"], ) config_setting( - name = "using_mkl_ml_only", + name = "build_with_mkl_ml_only", define_values = { - "using_mkl": "true", - "using_mkl_ml_only": "true", + "build_with_mkl": "true", + "build_with_mkl_ml_only": "true", }, visibility = ["//visibility:public"], ) config_setting( - name = "using_mkl_lnx_x64", + name = "build_with_mkl_lnx_x64", define_values = { - "using_mkl": "true", + "build_with_mkl": "true", }, values = { "cpu": "k8", @@ -28,6 +28,15 @@ config_setting( visibility = ["//visibility:public"], ) +config_setting( + name = "enable_mkl", + define_values = { + "enable_mkl": "true", + "build_with_mkl": "true", + }, + visibility = ["//visibility:public"], +) + load( "//third_party/mkl:build_defs.bzl", "if_mkl", diff --git a/third_party/mkl/build_defs.bzl b/third_party/mkl/build_defs.bzl index b645c0fc5c..bb798e715a 100644 --- a/third_party/mkl/build_defs.bzl +++ b/third_party/mkl/build_defs.bzl @@ -1,9 +1,11 @@ # -*- Python -*- """Skylark macros for MKL. -if_mkl is a conditional to check if MKL is enabled or not. -if_mkl_ml is a conditional to check if MKL-ML is enabled. + +if_mkl is a conditional to check if we are building with MKL. +if_mkl_ml is a conditional to check if we are building with MKL-ML. if_mkl_ml_only is a conditional to check for MKL-ML-only (no MKL-DNN) mode. if_mkl_lnx_x64 is a conditional to check for MKL +if_enable_mkl is a conditional to check if building with MKL and MKL is enabled. mkl_repository is a repository rule for creating MKL repository rule that can be pointed to either a local folder, or download it from the internet. @@ -24,7 +26,7 @@ def if_mkl(if_true, if_false = []): a select evaluating to either if_true or if_false as appropriate. """ return select({ - str(Label("//third_party/mkl:using_mkl")): if_true, + str(Label("//third_party/mkl:build_with_mkl")): if_true, "//conditions:default": if_false, }) @@ -40,8 +42,8 @@ def if_mkl_ml(if_true, if_false = []): a select evaluating to either if_true or if_false as appropriate. """ return select({ - str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): if_false, - str(Label("//third_party/mkl:using_mkl")): if_true, + str(Label("//third_party/mkl_dnn:build_with_mkl_dnn_only")): if_false, + str(Label("//third_party/mkl:build_with_mkl")): if_true, "//conditions:default": if_false, }) @@ -56,12 +58,12 @@ def if_mkl_ml_only(if_true, if_false = []): a select evaluating to either if_true or if_false as appropriate. """ return select({ - str(Label("//third_party/mkl:using_mkl_ml_only")): if_true, + str(Label("//third_party/mkl:build_with_mkl_ml_only")): if_true, "//conditions:default": if_false, }) def if_mkl_lnx_x64(if_true, if_false = []): - """Shorthand to select() on if MKL is on and the target is Linux x86-64. + """Shorthand to select() if building with MKL and the target is Linux x86-64. Args: if_true: expression to evaluate if building with MKL is enabled and the @@ -73,7 +75,24 @@ def if_mkl_lnx_x64(if_true, if_false = []): a select evaluating to either if_true or if_false as appropriate. """ return select({ - str(Label("//third_party/mkl:using_mkl_lnx_x64")): if_true, + str(Label("//third_party/mkl:build_with_mkl_lnx_x64")): if_true, + "//conditions:default": if_false, + }) + +def if_enable_mkl(if_true, if_false = []): + """Shorthand to select() if we are building with MKL and MKL is enabled. + + This is only effective when built with MKL. + + Args: + if_true: expression to evaluate if building with MKL and MKL is enabled + if_false: expression to evaluate if building without MKL or MKL is not enabled. + + Returns: + A select evaluating to either if_true or if_false as appropriate. + """ + return select({ + "//third_party/mkl:enable_mkl": if_true, "//conditions:default": if_false, }) @@ -87,9 +106,9 @@ def mkl_deps(): inclusion in the deps attribute of rules. """ return select({ - str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): ["@mkl_dnn"], - str(Label("//third_party/mkl:using_mkl_ml_only")): ["//third_party/mkl:intel_binary_blob"], - str(Label("//third_party/mkl:using_mkl")): [ + str(Label("//third_party/mkl_dnn:build_with_mkl_dnn_only")): ["@mkl_dnn"], + str(Label("//third_party/mkl:build_with_mkl_ml_only")): ["//third_party/mkl:intel_binary_blob"], + str(Label("//third_party/mkl:build_with_mkl")): [ "//third_party/mkl:intel_binary_blob", "@mkl_dnn", ], diff --git a/third_party/mkl_dnn/BUILD b/third_party/mkl_dnn/BUILD index 3e567fa9fc..58ecda55e6 100644 --- a/third_party/mkl_dnn/BUILD +++ b/third_party/mkl_dnn/BUILD @@ -3,10 +3,10 @@ licenses(["notice"]) exports_files(["LICENSE"]) config_setting( - name = "using_mkl_dnn_only", + name = "build_with_mkl_dnn_only", define_values = { - "using_mkl": "true", - "using_mkl_dnn_only": "true", + "build_with_mkl": "true", + "build_with_mkl_dnn_only": "true", }, visibility = ["//visibility:public"], ) diff --git a/third_party/mkl_dnn/build_defs.bzl b/third_party/mkl_dnn/build_defs.bzl index 7ce2a7d9b0..6388f31971 100644 --- a/third_party/mkl_dnn/build_defs.bzl +++ b/third_party/mkl_dnn/build_defs.bzl @@ -8,6 +8,6 @@ def if_mkl_open_source_only(if_true, if_false = []): """ return select({ - str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): if_true, + str(Label("//third_party/mkl_dnn:build_with_mkl_dnn_only")): if_true, "//conditions:default": if_false, }) -- cgit v1.2.3 From ffa90fc521c6051addd50236872a4afaa45e0a49 Mon Sep 17 00:00:00 2001 From: Gunhan Gulsoy Date: Wed, 26 Sep 2018 01:38:55 -0700 Subject: Fixes for building with CUDA on ppc64le. PiperOrigin-RevId: 214569615 --- configure.py | 188 +++++++++++++++++++----------------- third_party/gpus/cuda_configure.bzl | 2 + 2 files changed, 101 insertions(+), 89 deletions(-) (limited to 'third_party') diff --git a/configure.py b/configure.py index f0b9fada5e..1064f6a9d4 100644 --- a/configure.py +++ b/configure.py @@ -41,7 +41,6 @@ _DEFAULT_CUDA_PATH = '/usr/local/cuda' _DEFAULT_CUDA_PATH_LINUX = '/opt/cuda' _DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing ' 'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION) -_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine() _TF_OPENCL_VERSION = '1.2' _DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp' _DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include' @@ -54,6 +53,11 @@ _TF_BAZELRC_FILENAME = '.tf_configure.bazelrc' _TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME) _TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE') +if platform.machine() == 'ppc64le': + _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/' +else: + _DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine() + class UserInputError(Exception): pass @@ -153,14 +157,18 @@ def get_python_path(environ_cp, python_bin_path): if environ_cp.get('PYTHONPATH'): python_paths = environ_cp.get('PYTHONPATH').split(':') try: - library_paths = run_shell( - [python_bin_path, '-c', - 'import site; print("\\n".join(site.getsitepackages()))']).split('\n') + library_paths = run_shell([ + python_bin_path, '-c', + 'import site; print("\\n".join(site.getsitepackages()))' + ]).split('\n') except subprocess.CalledProcessError: - library_paths = [run_shell( - [python_bin_path, '-c', - 'from distutils.sysconfig import get_python_lib;' - 'print(get_python_lib())'])] + library_paths = [ + run_shell([ + python_bin_path, '-c', + 'from distutils.sysconfig import get_python_lib;' + 'print(get_python_lib())' + ]) + ] all_paths = set(python_paths + library_paths) @@ -187,8 +195,7 @@ def setup_python(environ_cp): environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path, default_python_bin_path) # Check if the path is valid - if os.path.isfile(python_bin_path) and os.access( - python_bin_path, os.X_OK): + if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK): break elif not os.path.exists(python_bin_path): print('Invalid python path: %s cannot be found.' % python_bin_path) @@ -230,8 +237,9 @@ def setup_python(environ_cp): environ_cp['PYTHON_BIN_PATH'] = python_bin_path # Write tools/python_bin_path.sh - with open(os.path.join( - _TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'), 'w') as f: + with open( + os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'), + 'w') as f: f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path) @@ -250,7 +258,7 @@ def reset_tf_configure_bazelrc(workspace_path): continue f.write('%s\n' % l) if is_windows(): - tf_bazelrc_path = _TF_BAZELRC.replace("\\", "/") + tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/') else: tf_bazelrc_path = _TF_BAZELRC f.write('import %s\n' % tf_bazelrc_path) @@ -261,8 +269,8 @@ def cleanup_makefile(): These files could interfere with Bazel parsing. """ - makefile_download_dir = os.path.join( - _TF_WORKSPACE_ROOT, 'tensorflow', 'contrib', 'makefile', 'downloads') + makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow', + 'contrib', 'makefile', 'downloads') if os.path.isdir(makefile_download_dir): for root, _, filenames in os.walk(makefile_download_dir): for f in filenames: @@ -330,9 +338,8 @@ def get_var(environ_cp, 'Environment variable %s must be set as a boolean indicator.\n' 'The following are accepted as TRUE : %s.\n' 'The following are accepted as FALSE: %s.\n' - 'Current value is %s.' % ( - var_name, ', '.join(true_strings), ', '.join(false_strings), - var)) + 'Current value is %s.' % (var_name, ', '.join(true_strings), + ', '.join(false_strings), var)) while var is None: user_input_origin = get_input(question) @@ -355,8 +362,12 @@ def get_var(environ_cp, return var -def set_build_var(environ_cp, var_name, query_item, option_name, - enabled_by_default, bazel_config_name=None): +def set_build_var(environ_cp, + var_name, + query_item, + option_name, + enabled_by_default, + bazel_config_name=None): """Set if query_item will be enabled for the build. Ask user if query_item will be enabled. Default is used if no input is given. @@ -379,8 +390,8 @@ def set_build_var(environ_cp, var_name, query_item, option_name, elif bazel_config_name is not None: # TODO(mikecase): Migrate all users of configure.py to use --config Bazel # options and not to set build configs through environment variables. - write_to_bazelrc('build:%s --define %s=true' - % (bazel_config_name, option_name)) + write_to_bazelrc( + 'build:%s --define %s=true' % (bazel_config_name, option_name)) def set_action_env_var(environ_cp, @@ -447,7 +458,8 @@ def check_bazel_version(min_version): if which('bazel') is None: print('Cannot find bazel. Please install bazel.') sys.exit(0) - curr_version = run_shell(['bazel', '--batch', '--bazelrc=/dev/null', 'version']) + curr_version = run_shell( + ['bazel', '--batch', '--bazelrc=/dev/null', 'version']) for line in curr_version.split('\n'): if 'Build label: ' in line: @@ -499,6 +511,7 @@ def set_cc_opt_flags(environ_cp): write_to_bazelrc('build:opt --host_copt=-march=native') write_to_bazelrc('build:opt --define with_default_optimizations=true') + def set_tf_cuda_clang(environ_cp): """set TF_CUDA_CLANG action_env. @@ -581,16 +594,14 @@ def set_clang_cuda_compiler_path(environ_cp): clang_cuda_compiler_path) -def prompt_loop_or_load_from_env( - environ_cp, - var_name, - var_default, - ask_for_var, - check_success, - error_msg, - suppress_default_error=False, - n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS -): +def prompt_loop_or_load_from_env(environ_cp, + var_name, + var_default, + ask_for_var, + check_success, + error_msg, + suppress_default_error=False, + n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS): """Loop over user prompts for an ENV param until receiving a valid response. For the env param var_name, read from the environment or verify user input @@ -629,9 +640,7 @@ def prompt_loop_or_load_from_env( ) for _ in range(n_ask_attempts): - val = get_from_env_or_user_or_default(environ_cp, - var_name, - full_query, + val = get_from_env_or_user_or_default(environ_cp, var_name, full_query, default) if check_success(val): break @@ -639,9 +648,9 @@ def prompt_loop_or_load_from_env( print(error_msg % val) environ_cp[var_name] = '' else: - raise UserInputError('Invalid %s setting was provided %d times in a row. ' - 'Assuming to be a scripting mistake.' % - (var_name, n_ask_attempts)) + raise UserInputError( + 'Invalid %s setting was provided %d times in a row. ' + 'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts)) environ_cp[var_name] = val return val @@ -650,8 +659,8 @@ def prompt_loop_or_load_from_env( def create_android_ndk_rule(environ_cp): """Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule.""" if is_windows() or is_cygwin(): - default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' % - environ_cp['APPDATA']) + default_ndk_path = cygpath( + '%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA']) elif is_macos(): default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME'] else: @@ -668,8 +677,7 @@ def create_android_ndk_rule(environ_cp): ask_for_var='Please specify the home path of the Android NDK to use.', check_success=valid_ndk_path, error_msg=('The path %s or its child file "source.properties" ' - 'does not exist.') - ) + 'does not exist.')) write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path) write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL', check_ndk_level(android_ndk_home_path)) @@ -703,9 +711,9 @@ def create_android_sdk_rule(environ_cp): api_levels = [x.replace('android-', '') for x in api_levels] def valid_api_level(api_level): - return os.path.exists(os.path.join(android_sdk_home_path, - 'platforms', - 'android-' + api_level)) + return os.path.exists( + os.path.join(android_sdk_home_path, 'platforms', + 'android-' + api_level)) android_api_level = prompt_loop_or_load_from_env( environ_cp, @@ -720,9 +728,8 @@ def create_android_sdk_rule(environ_cp): versions = sorted(os.listdir(build_tools)) def valid_build_tools(version): - return os.path.exists(os.path.join(android_sdk_home_path, - 'build-tools', - version)) + return os.path.exists( + os.path.join(android_sdk_home_path, 'build-tools', version)) android_build_tools_version = prompt_loop_or_load_from_env( environ_cp, @@ -736,10 +743,8 @@ def create_android_sdk_rule(environ_cp): write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION', android_build_tools_version) - write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', - android_api_level) - write_action_env_to_bazelrc('ANDROID_SDK_HOME', - android_sdk_home_path) + write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level) + write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path) def check_ndk_level(android_ndk_home_path): @@ -798,6 +803,7 @@ def reformat_version_sequence(version_str, sequence_count): Args: version_str: String, the version string. sequence_count: int, an integer. + Returns: string, reformatted version string. """ @@ -841,12 +847,19 @@ def set_tf_cuda_version(environ_cp): if is_windows(): cuda_rt_lib_paths = ['lib/x64/cudart.lib'] elif is_linux(): - cuda_rt_lib_paths = ['%s/libcudart.so.%s' % (x, tf_cuda_version) - for x in ['lib64', 'lib/x86_64-linux-gnu']] + cuda_rt_lib_paths = [ + '%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [ + 'lib64', + 'lib/powerpc64le-linux-gnu', + 'lib/x86_64-linux-gnu', + ] + ] elif is_macos(): cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version] - cuda_toolkit_paths_full = [os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths] + cuda_toolkit_paths_full = [ + os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths + ] if any([os.path.exists(x) for x in cuda_toolkit_paths_full]): break @@ -919,8 +932,8 @@ def set_tf_cudnn_version(environ_cp): cudnn_path_from_ldconfig) if cudnn_path_from_ldconfig: cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1) - if os.path.exists('%s.%s' % (cudnn_path_from_ldconfig, - tf_cudnn_version)): + if os.path.exists( + '%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)): cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig) break @@ -1166,6 +1179,7 @@ def get_native_cuda_compute_capabilities(environ_cp): Args: environ_cp: copy of the os.environ. + Returns: string of native cuda compute capabilities, separated by comma. """ @@ -1290,8 +1304,7 @@ def set_computecpp_toolkit_path(environ_cp): else: sycl_rt_lib_path = '' - sycl_rt_lib_path_full = os.path.join(toolkit_path, - sycl_rt_lib_path) + sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path) exists = os.path.exists(sycl_rt_lib_path_full) if not exists: print('Invalid SYCL %s library path. %s cannot be found' % @@ -1319,8 +1332,8 @@ def set_trisycl_include_dir(environ_cp): ask_trisycl_include_dir = ('Please specify the location of the triSYCL ' 'include directory. (Use --config=sycl_trisycl ' 'when building with Bazel) ' - '[Default is %s]: ' - ) % (_DEFAULT_TRISYCL_INCLUDE_DIR) + '[Default is %s]: ') % ( + _DEFAULT_TRISYCL_INCLUDE_DIR) while True: trisycl_include_dir = get_from_env_or_user_or_default( @@ -1329,13 +1342,12 @@ def set_trisycl_include_dir(environ_cp): if os.path.exists(trisycl_include_dir): break - print('Invalid triSYCL include directory, %s cannot be found' - % (trisycl_include_dir)) + print('Invalid triSYCL include directory, %s cannot be found' % + (trisycl_include_dir)) # Set TRISYCL_INCLUDE_DIR environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir - write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', - trisycl_include_dir) + write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir) def set_mpi_home(environ_cp): @@ -1345,8 +1357,9 @@ def set_mpi_home(environ_cp): default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home)) def valid_mpi_path(mpi_home): - exists = (os.path.exists(os.path.join(mpi_home, 'include')) and - os.path.exists(os.path.join(mpi_home, 'lib'))) + exists = ( + os.path.exists(os.path.join(mpi_home, 'include')) and + os.path.exists(os.path.join(mpi_home, 'lib'))) if not exists: print('Invalid path to the MPI Toolkit. %s or %s cannot be found' % (os.path.join(mpi_home, 'include'), @@ -1434,11 +1447,9 @@ def set_windows_build_flags(environ_cp): if get_var( environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline', - True, - ('Would you like to override eigen strong inline for some C++ ' - 'compilation to reduce the compilation time?'), - 'Eigen strong inline overridden.', - 'Not overriding eigen strong inline, ' + True, ('Would you like to override eigen strong inline for some C++ ' + 'compilation to reduce the compilation time?'), + 'Eigen strong inline overridden.', 'Not overriding eigen strong inline, ' 'some compilations could take more than 20 mins.'): # Due to a known MSVC compiler issue # https://github.com/tensorflow/tensorflow/issues/10521 @@ -1455,10 +1466,11 @@ def config_info_line(name, help_text): def main(): parser = argparse.ArgumentParser() - parser.add_argument("--workspace", - type=str, - default=_TF_WORKSPACE_ROOT, - help="The absolute path to your active Bazel workspace.") + parser.add_argument( + '--workspace', + type=str, + default=_TF_WORKSPACE_ROOT, + help='The absolute path to your active Bazel workspace.') args = parser.parse_args() # Make a copy of os.environ to be clear when functions and getting and setting @@ -1500,7 +1512,7 @@ def main(): # runtime to allow the Tensorflow testcases which compare numpy # results to Tensorflow results to succeed. if is_ppc64le(): - write_action_env_to_bazelrc("OMP_NUM_THREADS", 1) + write_action_env_to_bazelrc('OMP_NUM_THREADS', 1) set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc', 'with_jemalloc', True) @@ -1514,12 +1526,12 @@ def main(): 'with_kafka_support', True, 'kafka') set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support', False, 'xla') - set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support', - False, 'gdr') + set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support', False, + 'gdr') set_build_var(environ_cp, 'TF_NEED_VERBS', 'VERBS', 'with_verbs_support', False, 'verbs') - set_build_var(environ_cp, 'TF_NEED_NGRAPH', 'nGraph', - 'with_ngraph_support', False, 'ngraph') + set_build_var(environ_cp, 'TF_NEED_NGRAPH', 'nGraph', 'with_ngraph_support', + False, 'ngraph') set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False) if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1': @@ -1585,13 +1597,10 @@ def main(): # Add a config option to build TensorFlow 2.0 API. write_to_bazelrc('build:v2 --define=tf_api_version=2') - if get_var( - environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', - False, - ('Would you like to interactively configure ./WORKSPACE for ' - 'Android builds?'), - 'Searching for NDK and SDK installations.', - 'Not configuring the WORKSPACE for Android builds.'): + if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False, + ('Would you like to interactively configure ./WORKSPACE for ' + 'Android builds?'), 'Searching for NDK and SDK installations.', + 'Not configuring the WORKSPACE for Android builds.'): create_android_ndk_rule(environ_cp) create_android_sdk_rule(environ_cp) @@ -1605,5 +1614,6 @@ def main(): config_info_line('mkl', 'Build with MKL support.') config_info_line('monolithic', 'Config for mostly static monolithic build.') + if __name__ == '__main__': main() diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl index 5648b1525a..f5fdd3a75e 100644 --- a/third_party/gpus/cuda_configure.bzl +++ b/third_party/gpus/cuda_configure.bzl @@ -48,6 +48,7 @@ _DEFAULT_CUDA_COMPUTE_CAPABILITIES = ["3.5", "5.2"] CUDA_LIB_PATHS = [ "lib64/", "lib64/stubs/", + "lib/powerpc64le-linux-gnu/", "lib/x86_64-linux-gnu/", "lib/x64/", "lib/", @@ -70,6 +71,7 @@ CUPTI_HEADER_PATHS = [ # the other CUDA libraries but rather in a special extras/CUPTI directory. CUPTI_LIB_PATHS = [ "extras/CUPTI/lib64/", + "lib/powerpc64le-linux-gnu/", "lib/x86_64-linux-gnu/", "lib64/", "extras/CUPTI/libx64/", -- cgit v1.2.3